repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
ecabreragranado/OpticaFisicaII
Experimento de Young/Biprisma de Fresnel_Ejercicio.ipynb
gpl-3.0
import warnings warnings.filterwarnings('ignore') import numpy as np import matplotlib.pyplot as plt %matplotlib inline plt.style.use('bmh') import ipywidgets as widgets from IPython.display import display import io import base64 from IPython.display import clear_output #Datos fijos ###################33 D = 3 Lambda = 6.32e-7 # longitud de onda de la radiación de 500 nm k = 2.0*np.pi/Lambda n = 1.33 # agua alpha = 0.7*np.pi/180 y0 = 10 yrepres = 14 # in mm ###########################3 I1 = 1 # Consideramos irradiancias normalizadas a un cierto valor. I2 = 1 fig,ax = plt.subplots(1,1) ax.plot(0,0,'o',lw=2) ax.set_xlim(-0.1,D+0.1) ax.set_ylim(-yrepres*1.5,yrepres*1.5) ax.set_xlabel("x (m)") ax.set_ylabel("y (mm)") ax.set_title('Esquema del montaje experimental') line1, = ax.plot(np.linspace(-1,D,50),np.zeros(50),'k') buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) figwidg = widgets.HTML("""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf.getvalue()).decode('ascii'))) plt.close(fig) fig2,ax2 = plt.subplots(1,1) ax2.set_xlabel("x (mm)") ax2.set_ylabel("y (mm)") #ax2.set_xlim(0,x.max()*1e3) #ax2.set_ylim(-yrepres,yrepres) ax2.set_title('Pantalla') ax2.pcolormesh(np.zeros((500,500))) buf2 = io.BytesIO() plt.savefig(buf2, format='png') buf2.seek(0) figwidg2 = widgets.HTML("""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf2.getvalue()).decode('ascii'))) #figbox = widgets.HBox([figwidg,figwidg2]) plt.close(fig2) clear_output() fig3,ax3 = plt.subplots(1,1) ax3.set_ylabel("y (mm)") ax3.set_title('Zoom (3 mm)') ax3.pcolormesh(np.zeros((50,50))) buf3 = io.BytesIO() plt.savefig(buf3, format='png') buf3.seek(0) figwidg3 = widgets.HTML("""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf3.getvalue()).decode('ascii'))) figbox = widgets.HBox([figwidg,figwidg2,figwidg3]) plt.close(fig3) clear_output() ######### Interfwidg = widgets.FloatText(0,description='Int. (mm)',color='#C13535') def changeBiprism(x0=0.1): a = 2*x0*np.tan((n-1)*alpha) #separacion entre fuentes virtuales interfranja = Lambda*D/a xlimit = (D-x0)*np.tan((n-1)*alpha) # distancia desde el eje que ocupa el patron de interferencias Pasoespacial = interfranja/20 xpantalla = np.linspace(-xlimit*1.2,xlimit*1.2,round(xlimit/Pasoespacial)) X,Y = np.meshgrid(xpantalla,xpantalla) delta = (k*a*X/D) Itotal = I1 + I2 + (2.0*np.sqrt(I1*I2)*np.cos(delta))*(np.abs(X)<xlimit) figwidg.observe(repsetup(x0),names='new') figwidg2.observe(repfringes(xpantalla,Itotal),names='new') figwidg3.observe(repzoom(xpantalla,Itotal),names='new') Interfwidg.observe(updateInterf(interfranja),names='new') return def updateInterf(interfranja): Interfwidg.value=round(interfranja*1e3,2) return def repsetup(x0): fig,ax = plt.subplots(1,1,figsize=(7,5)) ax.plot(0,0,'o',lw=2) ax.set_xlim(-0.1,D+0.1) ax.set_ylim(-yrepres*1.5,yrepres*1.5) ax.set_xlabel("x (m)") ax.set_ylabel("y (mm)") ax.set_title('Esquema del montaje experimental') ax.plot(np.linspace(-1,D,50),np.zeros(50),'k') ax.vlines(x0,-y0,y0,'r',lw=2) ax.vlines(D,-y0*3,y0*3,lw=4) ax.plot(np.linspace(x0,x0+.02,D),(-y0/.02)*np.linspace(x0,x0+.02,D) + y0*(1+x0/.02),'r',lw=2) ax.plot(np.linspace(x0,x0+.02,D),(y0/.02)*np.linspace(x0,x0+.02,D) - y0*(1+x0/.02),'r',lw=2) buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) clear_output() figwidg.value ="""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf.getvalue()).decode('ascii')) clear_output() plt.close(fig) return def repfringes(x,Itotal): fig2,ax2 = plt.subplots(1,1,figsize=(3,5)) ax2.set_ylabel("y (mm)") ax2.set_xlim(0,x.max()*1e3) ax2.set_ylim(-yrepres,yrepres) ax2.set_title('Pantalla') ax2.pcolormesh(x*1e3,x*1e3,Itotal.T,cmap = 'gray',vmin=0,vmax=4) buf2 = io.BytesIO() plt.savefig(buf2, format='png') buf2.seek(0) clear_output() figwidg2.value ="""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf2.getvalue()).decode('ascii')) clear_output() plt.close(fig2) return def repzoom(x,Itotal): fig3,ax3 = plt.subplots(1,1,figsize=(3,5)) ax3.set_ylabel("y (mm)") ax3.set_xlim(0,x.max()*1e3) ax3.set_ylim(-3,3) ax3.set_title('Zoom (3 mm)') ax3.pcolormesh(x*1e3,x*1e3,Itotal.T,cmap = 'gray',vmin=0,vmax=4) buf3 = io.BytesIO() plt.savefig(buf3, format='png') buf3.seek(0) clear_output() figwidg3.value ="""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf3.getvalue()).decode('ascii')) clear_output() plt.close(fig3) return Positionwidg = widgets.FloatSlider(value=0.5,min=0.02,max=1.0,step=0.1,description='Dist fuente-biprism',orientation='horizontal') changepos = widgets.interactive(changeBiprism,x0=Positionwidg) resultswidg = widgets.HBox([figbox,Interfwidg]) display(changepos,resultswidg) """ Explanation: 1. Objetivo de la práctica Los objetivos de esta práctica son los siguientes: Observación e interpretación del diagrama interferencial producido por un biprisma de Fresnel. Determinación de la longitud de onda de la fuente láser empleada. 2. Fundamento teórico Consideremos el sistema de dos rendijas separadas una distancia $a$ de la figura siguiente, que son iluminadas por la luz procedente de una fuente casi puntual $S$ de radiación monocromática de longitud de onda $\lambda$. A una cierta distancia $D$, tal que $D>>a$, del plano que contiene a la doble rendija, se coloca una pantalla sobre la que se realizan observaciones. El campo eléctrico en el punto $P_h$ de la pantalla vendrá dado por la superposición coherente de las ondas procedentes de ambas rendijas. $$E_p(x) = E_1+E_2 = \frac{E_0}{r_1} e^{i(\omega t -kr_1)} + \frac{E_0}{r_2} e^{i(\omega t -kr_2)}$$ La distribución de irradiancia que se observa en un punto $P_h$ de la pantalla vendrá dada por la siguiente expresión. Visualmente en la pantalla se observará algo similar a lo mostrado en la figura siguiente. $$I_P(x) \propto |E_0|^2\left[\frac{1}{r_1^2}+\frac{1}{r_2^2}+\frac{1}{r_1r_2}\cos k\left(r_2-r_1\right)\right]$$ El tercer término en la ecuación anterior es el que modula la irradiancia en la pantalla. Así en un punto $P_h$ de la pantalla habrá un máximo de irradiancia si se verifica la siguiente condición. $$ k·\frac{a·x}{D} = 1 $$ esto es, $$\frac{kax}{D}=2m\pi, \,\,\,\,\,\,\,\, \mathrm{donde} \,\,\,\,\,\,\,\, m=0,\,\pm 1,\,\pm 2...,$$ Los máximos ocurren en los puntos: $$x =m \frac{\lambda D}{a}$$ y son una sucesión de franjas paralelas al eje $Z$ y equidistantes. La distancia que hay entre dos máximos o mínimos de interferencia, $Int$, se denomina interfranja y su valor viene dado por la expresión siguiente. $$Int = \frac{\lambda D}{a}$$ 3. Biprisma de Fresnel Hay diferentes dispositivos que son equivalentes al sistema de dos rendijas originariamente empleado por Thomas Young. Aqui vamos a considerar el dispositivo llamado biprisma de Fresnel, el cual nos permitirá variar fácilmente la separación entre las rendijas o fuentes secundarias. En la figura siguiente se muestra cómo el biprisma produce dos fuentes secundarias de radiación equivalentes a las dos rendijas del dispositivo de Young que se ha descrito anteriormente. La luz que emerge por la parte superior del biprisma lo hace tal como si procediera de $S_1$, mientras que la parte del haz refractada por la parte inferior se propaga como si procediera de $S_2$. Por lo tanto, a la derecha del biprisma tenemos la superposición de dos ondas esféricas procedentes de $S_1$ y $S_2$, respectivamente. El plano donde se encuentran estas fuentes virtuales es equivalente al plano de las rendijas en el experimento de Young. 4. Práctica de medida de la longitud de onda de emisión de un Láser de He-Ne con ayuda de un biprisma de Fresnel. El dispositivo experimental con el que vamos a trabajar consiste en un láser de He-Ne, un objetivo de microscopio, un biprisma y una pantalla de observación situada al final del banco óptico. La luz procedente de láser se hace incidir sobre el objetivo de microscopio, $OM$, cuya función consiste en focalizar el haz de luz y obtener así una fuente casi puntual, $S$, como se muestra en la figura. 5. Ejercicios previos a la sesión de prácticas A continuación se presenta una simulación de la primera parte de la práctica a llevar a cabo en el laboratorio. Nuestro objetivo es hallar la longitud de onda $\lambda$ de la radiacion que ilumina el Biprisma de Fresnel utilizando la expresion ya mencionada anteriormente, $$Int = \frac{\lambda D}{a} \Rightarrow \lambda = \frac{Int a}{D}$$ Para ello, primero mediremos la interfranja del patron de interferencias generado cuando iluminamos con una fuente puntual el Biprisma de Fresnel para una distancia de trabajo $D$. A continuacion, mediremos el valor de la separacion entre las fuentes virtuales $a$ y utilizaremos la anterior expresion para obtener $\lambda$. En primer lugar, se presenta a la izquierda una un esquema del montaje experimental: fuente puntual, biprisma de Fresnel (en rojo) y pantalla donde se visualizan las franjas de interferencia producidas por el biprisma. La distancia entre la fuente y el biprisma puede ser modificada moviendo el desplazador que se muestra justo encima de este esquema. A la derecha, se muestra el patron de interferencias (completo y una imagen ampliada de 6 mm de longitud en la zona central del patron), y el valor de la interfranja para la distancia fuente-biprisma fijada. Moviendo el desplazador, se puede observar como cambia el patron y el valor de la interfranja asociado. End of explanation """ # Escribe el valor de la interfranja obtenido despues del signo igual a continuacion Interfranja = """ Explanation: Mueve el desplazador hasta la distancia recomendada en el guion de la Practica 1 del laboratorio de la asignatura y anota el valor de la interfranja mostrado para el patron de interferencias resultante en la siguiente celda End of explanation """ from matplotlib.patches import Circle #Focal de la lente fp = 0.2 # Separacion entre fuentes virtuales x0 = Positionwidg.value a = 2*x0*np.tan((n-1)*alpha) #separacion entre fuentes virtuales xlimit = (D-x0)*np.tan((n-1)*alpha) # distancia desde el eje que ocupa el patron de interferencias fig,ax = plt.subplots(1,1) ax.plot(0,0,'o',lw=2) ax.set_xlim(-0.1,D+0.1) ax.set_ylim(-yrepres*1.5,yrepres*1.5) ax.set_xlabel("x (m)") ax.set_ylabel("y (mm)") ax.set_title('Esquema del montaje experimental') line1, = ax.plot(np.linspace(-1,D,50),np.zeros(50),'k') buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) figwidg = widgets.HTML("""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf.getvalue()).decode('ascii'))) plt.close(fig) fig2,ax2 = plt.subplots(1,1) ax2.set_xlabel("x (mm)") ax2.set_ylabel("y (mm)") #ax2.set_xlim(0,x.max()*1e3) #ax2.set_ylim(-yrepres,yrepres) ax2.set_title('Pantalla') ax2.pcolormesh(np.zeros((500,500))) buf2 = io.BytesIO() plt.savefig(buf2, format='png') buf2.seek(0) figwidg2 = widgets.HTML("""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf2.getvalue()).decode('ascii'))) figboxlens = widgets.HBox([figwidg,figwidg2]) plt.close(fig2) clear_output() ######### aprimawidg = widgets.FloatText(0,description='a\'. (mm)',color='#C13535') def changelens(xlens=D-0.1): s = -xlens sp = D - xlens beta = sp/s aprima = beta*a figwidg.observe(repsetuplens(xlens),names='new') spref = (D + np.sqrt(D**2 - 4*fp*D))/2 #Posicion correcta sref = spref-D figwidg2.observe(repimage(xlens,sref,aprima),names='new') aprimawidg.observe(updateaprima(aprima),names='new') return def updateaprima(aprima): aprimawidg.value=round(-aprima*1e3,2) # en mm return def repsetuplens(xlens): fig,ax = plt.subplots(1,1,figsize=(7,5)) ax.plot(0,0,'o',lw=2) ax.set_xlim(-0.1,D+0.1) ax.set_ylim(-yrepres*1.5,yrepres*1.5) ax.set_xlabel("x (m)") ax.set_ylabel("y (mm)") ax.set_title('Esquema del montaje experimental') ax.plot(np.linspace(-1,D,50),np.zeros(50),'k') ax.vlines(x0,-y0,y0,'r',lw=2) ax.vlines(D,-y0*3,y0*3,lw=4) ax.plot(np.linspace(x0,x0+.02,D),(-y0/.02)*np.linspace(x0,x0+.02,D) + y0*(1+x0/.02),'r',lw=2) ax.plot(np.linspace(x0,x0+.02,D),(y0/.02)*np.linspace(x0,x0+.02,D) - y0*(1+x0/.02),'r',lw=2) ax.annotate("", xy=(np.abs(xlens), y0*1.5), xytext=(np.abs(xlens),-y0*1.5), arrowprops=dict(arrowstyle="<->",color='k')) # representacion lente buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) clear_output() figwidg.value ="""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf.getvalue()).decode('ascii')) clear_output() plt.close(fig) return def repimage(xlens,sref,ap): fig2,ax2 = plt.subplots(1,1,figsize=(4,4)) rad = 3*(xlens+ sref)**2 + 0.6 # Nota: sref < 0 circle1 = Circle((0,-ap*1e3/2), radius=rad, color='r',alpha = 1 - np.abs(xlens+sref)) circle2 = Circle((0,ap*1e3/2), radius=rad, color='r',alpha = 1- np.abs(xlens+sref)) ax2.add_artist(circle1) ax2.add_artist(circle2) ax2.set_ylabel("y (mm)") ax2.set_xlim(-yrepres,yrepres) ax2.set_ylim(-yrepres,yrepres) ax2.set_title('Pantalla') buf2 = io.BytesIO() plt.savefig(buf2, format='png') buf2.seek(0) clear_output() figwidg2.value ="""<img src='data:image/png;base64,{}'/>""".format(base64.b64encode(buf2.getvalue()).decode('ascii')) clear_output() plt.close(fig2) return PosLenswidg = widgets.FloatSlider(value=1,min=x0+0.05,max=1.0,step=0.03,description='Dist Fuente-Lente',orientation='horizontal') changeposlens = widgets.interactive(changelens,xlens=PosLenswidg) finalwidg = widgets.HBox([figboxlens,aprimawidg]) display(changeposlens,finalwidg) """ Explanation: Para medir la separacion entre las dos fuentes virtuales creadas por el biprisma, no podemos hacerlo directamente en el laboratorio. Sin embargo, si colocamos una lente convergente despues del biprisma, esta formara una imagen real de esas fuentes virtuales, la cual si podemos medir en el laboratorio. La figura siguiente muestra el montaje que se llevara a cabo en el laboratorio. En la siguiente simulacion, se plantea esta situacion. La figura a la izquierda reproduce el montaje experimental, pudiendo utilizar el desplazador para mover la posicion de la lente. La figura de la derecha muestra lo que se veria en la pantalla. Cuando la lente se encuentre en su posicion correcta, se formara en ella la imagen de las dos fuentes virtuales generadas por el biprisma. En la simulacion, apareceran los dos puntos mas definidos y mas pequeños. Finalmente, tambien se muestra el valor de la separacion entre esas imagenes virtuales $a'$. Se propone por tanto mover la lente hasta que las imagenes se vean lo mas nitidas posibles, lo que nos permitira obtener los parametros que nos faltan para calcular la longitud de onda $\lambda$. End of explanation """ a_prima = """ Explanation: Anota el valor de la separacion entre las imagenes de las fuentes virtuales que aparecen en la anterior grafica. End of explanation """ aumento = a = """ Explanation: Calcula, utilizando el valor del aumento dado por la lente convergente el valor de la separacion entre las fuentes virtuales generadas por el biprisma. Utilizar los valores de la distancia objeto-lente ($s$) y lente-imagen ($sprima$) para el calculo del aumento. Anotar en la celda siguiente el valor del aumento obtenido y el valor de la separacion $a$ obtenida. End of explanation """ long_de_onda = """ Explanation: Finalmente, obtener el valor de la longitud de onda $\lambda$ utilizando los valores obtenidos en los anteriores apartados. Anotar el valor obtenido en la siguiente celda End of explanation """
artzers/MachineLearning
Deconv/Deconvolution.ipynb
mit
import itertools ta=[1,2,3] tb=[4,5,6] #tc=[(i,j) for i,j in zip(ta,tb)] #print tc #import itertools #for i in itertools.product('ABCD', repeat = 2): # print i, for i in itertools.product(range(1,4),range(4,7)):#dikaer product print(i,) print(' ') a=np.arange(10) print(a) a[ta]*=2 print(a) from scipy.sparse import csc_matrix from scipy.sparse import linalg as sppl import numpy.linalg test = csc_matrix((4,3)) test[0:2,0:2]=np.random.randn(test[0:2,0:2].shape[0],test[0:2,0:2].shape[1]) test[2:4,2:3]=np.random.randn(test[2:4,2:3].shape[0],test[2:4,2:3].shape[1]) print(test) #testa = test.toarray() #print testa tt = csc_matrix(np.random.randn(3,4)) #tRes = np.mat(csc_matrix.dot(test,tt)) tRes = csc_matrix.dot(test,tt) print(tRes) A=csc_matrix(((img.shape[0]-3)*(img.shape[1]-3),img.shape[0]*img.shape[1])) print(A.shape) ind = -1 for j in range(0,np.int32(img.shape[1])-3): for i in range(0,np.int32(img.shape[0])-3): ind +=1 #index=[] ravel = [] for e in itertools.product(range(j,j+3),range(i,i+3)): #index.append(e) ravel.append(e[1]+e[0]*img.shape[0]) A[ind,[ravel]]=1./9. #print ravel,A[ind,:] from scipy import sparse reg = 0.001 X = A.T.dot(A) X=X+reg*sparse.eye(X.shape[0]) print(X.shape) Xinv = sp.sparse.linalg.inv(X) print(Xinv.shape) X=csc_matrix.dot(csc_matrix(Xinv), A.T) padBlur = csc_matrix(blurImg[0:np.int32(img.shape[0])-3,0:np.int32(img.shape[1])-3].ravel()).T print(X.shape,padBlur.shape) X=csc_matrix.dot(X,padBlur) print(X.shape) res=X.todense() res = res.reshape(blurImg.shape) fig = plt.figure() ax = fig.add_subplot(131) ax.imshow(img,cmap='gray') ax = fig.add_subplot(132) ax.imshow(blurImg,cmap='gray') ax = fig.add_subplot(133) ax.imshow(res,cmap='gray') plt.show() from scipy.sparse import csc_matrix,lil_matrix from scipy.sparse import linalg as sppl import numpy.linalg from scipy import sparse import cv2 import sys,time from __future__ import division from scipy import signal#warning dia = 5 blurKernel = np.ones((dia,dia))/(dia**2) print(blurKernel) img2 = cv2.imread('camera.jpg') img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) #img2 = cv2.resize(img2,(0,0),fx=0.2,fy=0.2) print(img2.shape) blurImg2 = signal.convolve2d(img2, blurKernel, 'same','symm') plt.imshow(blurImg2,cmap='gray') plt.show() A2=lil_matrix(((img2.shape[0]-dia)*(img2.shape[1]-dia),img2.shape[0]*img2.shape[1])) print(A2.shape) ind = -1 for j in range(0,np.int32(img2.shape[1])-dia): sys.stdout.write('\r now : %d' % j +' of %d'%(np.int32(img2.shape[1])-dia)) sys.stdout.flush() time.sleep(0.2) for i in range(0,np.int32(img2.shape[0])-dia): ind +=1 #index=[] ravel = [] for e in itertools.product(range(j,j+dia),range(i,i+dia)): #index.append(e) ravel.append(e[1]+e[0]*img2.shape[0]) A2[ind,[ravel]]=1./(dia**2) #print ravel,A[ind,:] A2 = csc_matrix(A2) print print('A2 complete') """ Explanation: $$argmin\frac{1}{2}(AX-B)^2+\lambda X \rightarrow X=(A^TA+\lambda I)^{-1}A^TB$$ $$A=\left[ \begin{matrix} 1/9 & 0 & 0 \ ... & 1/9 & ... \ 0 & ... & 0 \end{matrix} \right] $$ End of explanation """ lr=0.7 reg = 0.01 rad = np.int32((dia-1) / 2) print(rad,np.int32(img2.shape[0]-rad)) padBlur2 = csc_matrix(blurImg2[rad:np.int32(img2.shape[0])-rad-1,rad:np.int32(img2.shape[1])-rad-1].ravel()).T print(A2.shape,padBlur2.shape,blurImg2.shape) Xi = np.mat(np.random.randn(img2.shape[0],img2.shape[1]).ravel()).T print(Xi.shape) epoch = 200 oldloss = 100000000 newloss = 0 for i in range(0,epoch): delta = A2.T.dot(A2) delta=delta+reg*sparse.eye(delta.shape[0]) delta=csc_matrix.dot(csc_matrix(delta), csc_matrix(Xi)) delta-=A2.T.dot(padBlur2) delta *= lr Xi -= delta if (i > 0 and np.mod(i,25) == 0) or i==epoch - 1: newloss = np.sum(np.abs(Xi.reshape(img2.shape)-img2)) # ratio = (oldloss - newloss)/newloss # if ratio < 0.: # lr *= 0.5 # elif ratio < 0.2: # lr *= 1.3 # elif ratio < 0.3: # lr *= 1.5 # oldloss = newloss print(i,lr,newloss) print('optimization complete') res=Xi#.todense() print(Xi.shape,img2.shape) res = res.reshape(img2.shape) # fig = plt.figure() # plt.imshow(img2,cmap='gray') # fig = plt.figure() # plt.imshow(blurImg2,cmap='gray') # fig = plt.figure() # plt.imshow(res,cmap='gray') fig = plt.figure() ax = fig.add_subplot(131) ax.imshow(img2,cmap='gray') ax = fig.add_subplot(132) ax.imshow(blurImg2,cmap='gray') ax = fig.add_subplot(133) ax.imshow(res,cmap='gray') plt.show() oImg = np.random.random(blurImg.shape)+10 for i in range(200): oImg = oImg - 1.0*( 0.01*oImg+signal.convolve2d( -blurImg + signal.convolve2d( oImg, blurKernel, 'same','symm'), blurKernel, 'same','symm')) plt.imshow(blurImg,cmap='gray') fig = plt.figure() plt.imshow(oImg,cmap='gray') fig = plt.figure() plt.imshow(img,cmap='gray') #fig = plt.figure() #ax = fig.add_subplot(111) #ax.imshow(img,cmap='gray') #ax = fig.add_subplot(112) #ax.imshow(blurImg,cmap='gray') #ax = fig.add_subplot(113) #ax.imshow(oImg,cmap='gray') #fig.show() """ Explanation: $X_{k+1}=X_{k}-\mu[(A^TA+\lambda I)X_k-A^TB]$ End of explanation """
azubiolo/itstep
it_step/ml_from_scratch/8_svm_lab/svm.ipynb
mit
k_classes = 2 X = [[1., 1.5, 0.2], [1., 0.3, 1.2], [1, 1.6, 0.4], [1., 1.3, 0.25], [1., 0.5, 1.12]] Y = [1, 2, 1, 1, 2] """ Explanation: Support Vector Machines Course recap This lab consists in implementing the Support Vector Machines (SVM) algorithm. Given a training set $ D = \left{ \left(x^{(i)}, y^{(i)}\right), x^{(i)} \in \mathcal{X}, y^{(i)} \in \mathcal{Y}, i \in {1, \dots, n } \right}$, where $\mathcal{Y} = { 1, \dots, k}$ . Recall (from lecture 7), SVM aims at minimizing the following cost function $J$: $$ \begin{split} J(\theta_1, \theta_2, \dots, \theta_k) &= \sum_{i = 1}^n L_i \ &= \sum_{i = 1}^n \sum_{j \neq y_i} \max(0, \theta_j^Tx^{(i)} - \theta_{y^{(i)}}^T x^{(i)} + \Delta) \end{split} $$ Defining the training set Let us define variables X and Y that will contain the features $\mathcal{X}$ and labels $\mathcal{Y}$ of the training set. Again, we will be having an intercept $x_0^{(i)}$. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt plt.figure() X1 = [x[1] for x in X] X2 = [x[2] for x in X] plt.scatter(X1, X2, c=Y) # plot x1, x2, color is defined by the label y plt.show() """ Explanation: Let us take a look at the data in 2D (we ignore the intercept which is constantly equal to 1). End of explanation """ def score(x, theta): d = len(x) thetaTx = 0 for idx in range(d): thetaTx += x[idx] * theta[idx] return thetaTx """ Explanation: The data was generated so that we have two quite distinct classes. This is usually not the case in reality, and for this reason we will see what happens when outliers are implemented (see homework below). Prediction function Exercise: Define a function score that takes as parameter the feature vector $x$ as well as a model $\theta$ and outputs the score: $$ h(x) = \theta^T x = \sum_{j = 0}^d \theta_j x_j$$ End of explanation """ def cost_function(x, y, thetas, delta): thetayTx = predict(x, thetas[y]) loss = 0 d = len(x) for j in range(d): if j is not y: print("x " + str(x)) print("thetas " + str(thetas)) thetajTx = predict(x, thetas[idx]) loss += max(0, thetajTx - thetayTx + delta) return loss """ Explanation: Defining the cost function Cost function on a single sample Exercise: Define a function cost_function that takes as parameter a sample (the actual label $y$, the feature vector $x$), the $\theta$s for each classes as well as $\Delta$ and returns the value of the cost function for this sample. Hint: Recall from lecture 7 that it is given by: $$ L_i = \sum_{j \neq y_i} \max(0, \theta_j^Tx - \theta_{y}^T x + \Delta) $$ End of explanation """ def cost_function_total(X, Y, thetas, delta): cost = 0 # initialize the cost with 0 n = len(Y) for i in range(n): # iterate over the training set x = X[i] # get the ith feature vector y = Y[i] # get the ith label cost += cost_function(x, y, thetas, delta) # add the cost of the current sample to the total cost return cost def initialize_thetas(X, k_classes): d = len(X[1]) theta = [0] * d return [theta] * k_classes thetas_0 = initialize_thetas(X, 2) """ Explanation: Now we are able to compute the loss for a single training sample, we can get the total cost. Exercise: Define a function cost_function_total that will compute the total cost function given by $$ J = L_i = \sum_{i = 1}^n \sum_{j \neq y_i} \max(0, \theta_j^Tx^{(i)} - \theta_{y^{(i)}}^T x^{(i)} + \Delta) $$ End of explanation """ def predict(x, thetas): k_classes = len(thetas) prediction = 0 highest_score = score(x, thetas[prediction]) # initialize with the first class for idx_class in range(k_classes): class_score = score(x, thetas[idx_class]) if class_score > highest_score: prediction = idx_class return prediction + 1 predict(X[0], thetas_0) """ Explanation: Recall that the prediction on a feature vector $x$ is given by the value of $j$ that maximizes the score $\theta_j^T x$. Exercise: Define a function predict which takes a feature vector x as well as the $\theta_j$s and outputs the predicted class $\hat{y}$ which is the value of $j$ maximizing the score. Hint: We have defined a score function for this purpose. End of explanation """ def gradients(x, y, thetas, delta): d = len(x) k_classes = len(thetas) predicted_class = predict(x, thetas) grads = [[0] * d] * k_classes # initialize a list of k_class gradients with zeros everywhere for idx_class in range(k_classes): # iterate over all the classes to compute the gradient for each class # there are 2 formulas: one for the true class (given by 'y') and another one for the other classes if idx_class + 1 == y: # if idx_class is equal to the actual class p = 0 for j in range(k_classes): if j + 1 != y: # are counting over the classes different than the actual class if score(x, thetas[j]) - score(x, thetas[y - 1]) + delta > 0: p += 1 for idx in range(d): grads[idx_class][idx] = - p * x[idx] else: # if idx_class is not the actual class if score(x, thetas[idx_class]) - score(x, thetas[y - 1]) + delta > 0: for idx in range(d): grads[idx_class][idx] = x[idx] # we do not need an else statement here because the gradient would be equal to 0 in this case, # and the gradient has been initialized with zeros return grads # to delete print(gradients(X[0], Y[0], thetas_0, 4.0)) """ Explanation: Gradient We have just defined everything we need to make the prediction and compute the loss for the SVM problem. As usual, we want to minimize this loss. The gradient descent works well in this case and in order to apply it, we need to compute the gradient. Recall that we need to compute a gradient per class as we have $k$ vectors $\theta_j$. The gradient for a sample $x, y$ is given by the following formulas: - if $j \neq y$: $$ \nabla_{\theta_j} L = \begin{cases} x \quad \text{if} \quad \theta_j^Tx - \theta_{y}^Tx + \Delta > 0 \ 0 \quad \text{otherwise.} \end{cases} $$ - if $j = y$: $$ \nabla_{\theta_y} L = px $$ where $p$ is the number of times the desired margin is not satisfied, that is, the number of $j \neq y$ such that $\theta_j^Tx - \theta_{y}^Tx + \Delta > 0$ End of explanation """ # For the sake of clarity, we first define a function that sums vectors elementwise def sum_vectors(x1, x2): d = len(x1) sum_vector = x1 for idx in range(d): sum_vector[idx] += x2[idx] return sum_vector def gradient_total(X, Y, thetas, delta): n = len(Y) # number of training samples d = len(X[1]) k_classes = len(thetas) grads_sum = [[0] * d] * k_classes for i in range(n): x = X[i] y = Y[i] grads = gradients(x, y, thetas, delta) # get the gradient for the current sample for j in range(k_classes): grads_sum[j] = sum_vectors(grads[j], grads_sum[j]) # add it to the total gradients return grads_sum # to delete gradient_total(X, Y, thetas_0, 4.0) """ Explanation: The last quantity needed in order to apply the gradient descent is the total gradient (if we want to apply a batch gradient descent, the gradient for a single sample is enough in the stochastic gradient descent case is enough). To compute it, we just need to sum the gradients for all the samples within the training set. Exercise: Implement a function gradient_total that takes as inputs a set of feature vectors $X$, a set of labels $Y$, values for the $\theta_j$s as well as the hyperparamter $\Delta$ and outputs the gradient of $J$. End of explanation """ # For the sake of readability, we define a function axpb (that stands for a x plus b) that outputs a * x + b # where a is a scalar and x and b are vectors def axpb(a, x, b): # x and b should have the same size, a is a scalar d = len(x) sum_vector = b for idx in range(d): sum_vector[idx] += a * x[idx] return sum_vector def gradient_descent(X, Y, delta, learning_rate): k_classes = len(set(Y)) thetas = initialize_thetas(X, k_classes) for i_iter in range(5): grads = gradient_total(X, Y, thetas, delta) for j in range(k_classes): thetas[j] = axpb(-learning_rate, grads[j], thetas[j]) print("X " + str(X)) cost = cost_function_total(X, Y, thetas, delta) print("iteration " + str(i_iter) + ", cost = " + str(cost)) return thetas """ Explanation: Now that we have the gradient, we can apply the gradient descent algorithm. Exercise: Implement a function gradient_descent that takes as parameter the training set $(X, Y)$, the hyperparameter $\Delta$ as well as a learning rate and applied the gradient descent algorithm to the SVM case. Hint: Feel free to get back to the lectures to recall the gradient descent update. End of explanation """
akallio1/science-days-2017
tieteen-paivat-2017.ipynb
mit
# Alustetaan koneoppimisen ympäristö (ohjelmakirjastot) import warnings warnings.filterwarnings('ignore') %matplotlib inline from time import time import numpy as np from sklearn import random_projection, decomposition, manifold import matplotlib.pyplot as plt import seaborn as sns from keras.datasets import mnist from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score from keras.models import Sequential from keras.layers import Dense, Activation, Dropout from keras.utils import np_utils # Aluksi tehdään aputyökalu kuvien piirtämistä varten def plot_embedding(X, title=None, t0=None): x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) plt.figure(figsize=(9,6)) plt.axis('off') for i in range(X.shape[0]): plt.text(X[i, 0], X[i, 1], str(y[i]), color=plt.cm.Set1(y[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) if title is not None: if t0 is not None: plt.title("%s (%.2fs)" % (title, (time()-t0))) else: plt.title(title) # Ladataan data (MNIST-tietokanta) (X_train, y_train), (X_test, y_test) = mnist.load_data() X = X_train[:1024] y = y_train[:1024] # Raportoidaan työ tehdyksi print('\nData ladattu onnistuneesti (MNIST).') print('Datan palasten koko: train:',len(X_train),'test:',len(X_test)) """ Explanation: Data-analyysityöpaja Tieteen päivillä Mitä data-analyysi on? Data-analyysi tarkoitaa sitä, että datan pohjalta päätellään jotain uutta. Esimerkiksi mittausdatan perusteella voidaan todeta, että uusi lääkeaine näyttää laskevan verenpainetta. No mitä se data on? Nykypäivänä data voi olla mitä tahansa, mikä on saatavissa digitaalisessa muodossa. Perinteisesti data on ollut tieteellisiä havaintoja, joita on tunnollisesti kirjattu ylös, vaikkapa jonkinlaiseksi taulukoksi. Näin on edellisen verenpaine-esimerkin tapauksessa. Nykyään kuitenkin tehdään jo paljon analyysiä esimerkiksi reaaliaikaisesta videokuvasta. Hyvä esimerkki tästä on vaikkapa robottilennokki, joka lentää pitkin voimalinjoja ja videokameran kuvan avulla analysoi, että milloin lumikuorma on vaarallisen suuri. Mihin data-analyysia tarvitaan? Jos visionäärejä on uskominen, niin kohta ihan kaikkeen. Tieteessä datan analysointi on ollut keskeistä viimeistään 1900-luvun alusta alkaen. Tämä perinteinen tieteen ja asiantuntijatyön analytiikka on kuitenkin nyt saamassa rinnalleen uuden käyttäjäkunnan, kun arkisemmat data-analyysitarpeet ovat suoraan sanoen räjähtäneet. Facebookin ja Googlen kaltaiset internetajan yritykset vetävät uuden data-analytiikan nopeaa kehitystä. Yritysmaailmassa niin kutsuttu Big Data on tällä hetkellä hyvin kuuma aihe. Joka tapauksessa on selvää, että tulevaisuudessa data-analyysiä tehdään paljon enemmän ja paljon laajemmin. Eli ei pelkästään tutkimuslaitoksissa, vaan myös tavallisissa yrityksissä, virastoissa ja järjestöissä. Jos opettelee ainakin perusasiat, niin saa melkoisen hyödyn tulevaisuutta ajatellen. Alku Ensiksi alustamme koneoppimisen työkalumme ja lataamme datan, jota työpajassa käsitellään. Alla oleva koodinpätkä tekee nämä kaksi asiaa. Koodi ajetaan klikkaamalla harmaaseen laatikkoon, jolloin se tulee valituksi. Valitse ylävalikosta Cell -> Run ja koodi käynnistyy. Sen merkkinä ilmestyy In-riville tähti. Kun homma on valmis, niin alle ilmestyvät tulokset. Tässä tapauksessa pitäisi tulla tieto ladatun datan koosta sekä esimerkiksi muutamia merkkejä, joita data sisältää. Jatkossa koodin voi ajaa myös näppärämmin painamalla Ctrl ja Enter. End of explanation """ n_img_per_row = 32 # 32*32=1024 img = np.zeros((28 * n_img_per_row, 28 * n_img_per_row)) for i in range(n_img_per_row): ix = 28 * i for j in range(n_img_per_row): iy = 28 * j img[ix:ix + 28, iy:iy + 28] = X[i * n_img_per_row + j,:,:] plt.figure(figsize=(9, 9)) plt.imshow(img) plt.title('MNIST-data (1024 ensimmäistä merkkiä)') ax=plt.axis('off') """ Explanation: Seuraavaksi katsotaan, että mistä datassa on kyse. Datamme on tietokanta ihmisten käsin kirjoittamista numeromerkeistä, eli sen pohjalta voi vaikkapa tutkia ihmisten tapoja piirtää numeroja tai rakentaa tekoälyn, joka ymmärtää käsin kirjoitettua tekstiä. Datassa on liian paljon piirrettyjä merkkejä läpikäytäväksi, joten visualisoidaan niistä vain alkupäästä 1024 ensimmäistä. End of explanation """ # Piirretään alkupään merkkejä pltsize=1 plt.figure(figsize=(10*pltsize, pltsize)) for i in range(10): plt.subplot(1,10,i+1) plt.axis('off') plt.imshow(X_train[i,:,:]) plt.title(str(y_train[i])) """ Explanation: Oppimisen kannalta pelkät merkit eivät vielä kerro paljon, vaan tarvitsemme myös tiedon siitä, että mitä oikeaa merkkiä mikäkin käsin piirretty merkki vastaa. Onneksi myös tämä tieto löytyy datasta valmiiksi. Alla oleva koodi piirtää 10 ensimmäistä merkkiä ja niiden luokittelun. End of explanation """ # Tehdään PCA-analyysi pca = decomposition.PCA(n_components=2) X_pca = pca.fit_transform(X.reshape(-1,28*28)) # Ja piirretään analyysin tulos plot_embedding(X_pca, "PCA-kuvaus") """ Explanation: Oikeassa maailmassa tulee usein vastaan tilanteita, joissa meillä on kiinnostavaa dataa, mutta ei valmiiksi tietoa sen sisällöstä. Nyt tietokannan rakentaja on valmiiksi luokitellut ja tarkastanut merkit, mutta mitä jos näin ei olisi? Suuren tietokannan läpikäynti käsin on valtava urakka. Jos suuri määrä käsin kirjoitettuja merkkejä pitää tunnistaa ihmistyönä, niin miten tällaisen projektin voisi hoitaa? Miten koneella voitaisiin helpottaa työmäärää? Onko käsin kirjoitettujen merkkien kuvadatalla mitään arvoa, jos niitä vastaavaa luokittelua ei ole tiedossa? Mitä käytännön sovelluksia voit keksiä pelkkien kuvien käytölle? Pohdi näitä kysymyksiä hetki. Ne ovat perustavanlaatuisia haasteita data-analytiikan projekteissa ja myös hyvää pohjustusta seuraaville vaiheille. Ensimmäinen analyysi Seuraavaksi pääsemme käyttämään varsinaista data-analytiikan menetelmää. Katsomme aluksi pelkästään kuvadataa ja emme käytä luokittelutietoa hyväksi. Mitä pelkillä kuvilla voi tehdä? Niitä voi esimerkiksi vertailla keskenään. Jokainen kuva on 28x28 kasa pikseleitä. Jos on esimerkiksi kaksi numeroa 0 esittävää kuvaa, niin niissä on todennäköisesti tummat pikselit varsin samoissa paikoissa molemmissa. Valitettavasti ihmisen on mahdotonta päässään tuollaista vertailua käsitellä, koska 28x28 = 784 eli vertailtavia numeroita on liikaa muistettavaksi tai hahmotettavaksi. Data-analyysin menetelmien avulla on kuitenkin mahdollista hakea toistuvia piirteitä mm. niinkutsutun pääkomponenttianalyysin avulla. Menetelmästä käytetään yleensä sen englanninkielistä nimeä Primary Component Analysis (PCA). Pääkomponenttianalyysin idea on, että jokainen kuva esitetään pääkomponenttien summana. Esimerkiksi jos pääkomponentit ovat risti ja ympyrä, niin silloin voidaan sujuvasti esittää ristejä, palloja, ristejä ympyrän sisällä ja toki myös tyhjiä kuvia. Pääkomponenttien määrän voi päättää itse ja usein niitä halutaan kaksi, jotta data voidaan piirtää kahdessa ulottuvuudessa eli vaaka- ja pystyakselille (X ja Y). Tehdään siis pääkomponenttianalyysi merkkidatalle. Luettavuuden vuoksi merkit piirretään sen luokan numerona, johon ne kuuluvat. Ovatko samat numerot järjestäytyneet ryhmiksi? Näetkö kuvassa muuta rakennetta tai logiikkaa? Aiemmin mietimme, että miten käsin kirjoitettujen merkkien tunnistamista voisi helpottaa automaation avulla. Voisiko tätä menetelmää hyödyntää? Saat tehtyä analyysin ja piirrettyä tuloksen alla olevalla koodilla. Käy tuloskuvaa ajatuksella läpi ja koita vastata kysymyksiin. End of explanation """ # Tehdään satunnainen kuvaus rp = random_projection.SparseRandomProjection(n_components=2, random_state=42) X_projected = rp.fit_transform(X.reshape(-1,28*28)) # Piirretään kuvaus plot_embedding(X_projected, "Satunnainen kuvaus") """ Explanation: Huomasit varmasti, että samat numerot sijoitettiin lähelle toisiaan. Näin kuuluu mennäkin, koska niissä on hyvin samanlaiset pikselit. Mutta myös toisiaan muistuttavat numerot, kuten 0 ja 8, tulivat lähelle. Pääset itse kokeilemaan koneoppimisen toimintaa lähemmin tässä webbisivustossa: https://transcranial.github.io/keras-js/#/mnist-cnn Sivustolla on koneoppimisohjelma, joka tunnistaa numeroita. Se on rakennettu saman datan päälle, vaikkakin hyvin kehittyneitä menetelmiä käyttäen. Kuinka hyvin sivuston ohjelma erottaa toisistaan esimerkiksi 0:n ja 8:n? Vähän tilastollista ajattelua Data-analyysissa on aina pidettävä mielessä kriittinen lähestyminen ja tulosten merkitsevyyden pohtiminen. Yllä olevassa numeropilvessä on selvästi nähtävissä järjestystä, mutta toisaalta ihminen on taipuvainen näkemään järjestystä sielläkin, missä sitä ei ole - kuten vaikkapa lottoarvonnan tuloksissa. Tilastollisen merkitsevyyden testaaminen voi pitkälle vietynä olla monimutkaistakin. Mutta emme lähde tässä laskemaan p-arvoja tai tekemään muuta tilastotiedettä, koska yksinkertainenkin ratkaisu on jo hyvä. Jos tarvitsee varmistua siitä, että näkemämme järjestys ei ole pelkästään mielikuvituksen tuottamaa ja oikeasti alla on pelkästään satunnaista sotkua, niin voimme katsoa vastaavaa visualisointia, mutta rakentaa sen puhtaasti satunnaisen datan päälle. End of explanation """ # Tehdään luokittelija clf_dt = DecisionTreeClassifier() clf_dt.fit(X_train.reshape(-1,28*28), y_train) pred_dt = clf_dt.predict(X_test.reshape(-1,28*28)) # Piirretään tulokseksi alkupäästä luokituksia pltsize=1 plt.figure(figsize=(10*pltsize, pltsize)) for i in range(10): plt.subplot(1,10,i+1) plt.axis('off') plt.imshow(X_test[i,:,:]) plt.title(str(pred_dt[i]) + ' (' + str(y_test[i]) + ')') # Raportoidaan tulokset print('Luokiteltu', len(pred_dt), 'kuvaa, luokituksista oikein menneiden osuus on:', accuracy_score(y_test, pred_dt)*100, '%') print('Alla kuvat ja analysoidut luokat, oikeat luokat ovat suluissa') """ Explanation: Voidaanko sanoa, että tämä satunnainen sotku on selvästi eri tulos kuin aiemmin tekemämme pääkomponenttianalyysin tuottama? Merkkien tunnistaminen Seuraavaksi pääsemme itse asiaan. Olemme nyt pöyhineet dataa ja päässeet varmuuteen, että se on järkevää ja analysoitavissa. Rakennamme koneoppimismenetelmiä käyttäen nk. luokittelijan, joka kykenee oppimaan kuvien piirteet ja tunnistamaan niitä sen jälkeen. Nyt on syytä jälleen olla tarkkana: jos syötämme menetelmälle dataa, niin se varmasti osaa oppia ulkoa kyseisen datan kaikki yksityiskohdat. Mutta haluamme, että menetelmä "näkee metsän puilta", eli oppii tunnistamaan merkkien yleisiä hahmoja. Teemme siis koneoppimisen perustempun, eli jaamme datan kahteen osaan. Harjoitusdatalla opetetaan menetelmä, kun taas sen toimintaa testataan testidatalla. Koneoppijan tulee siis selvitä sellaisistakin kuvista, joita se ei ole aikaisemmin nähnyt. Tällä tavalla varmistetaan, että ei pelkästään opita ulkoa harjoitusdataa. Alla oleva koodi tekee luokittelun ja tulostaa esimerkiksi 10 ensimmäistä luokiteltua merkkiä. Luokitteluun käytetään klassista koneoppimisen menetelmää, nk. päätöspuuta. Kuinka hyvin menetelmä pärjää? End of explanation """ # Lisätään kuviin häiriötä noiselevel = 0.5 # TEHTÄVÄ: muuta tätä arvoa välillä 0.0 - 1.0 ja katso miten tulokset vaihtuvat X_test_noisy = np.zeros((len(X_test), 28, 28)) for i in range(len(X_test)): X_test_noisy[i,:,:] = (1-noiselevel)*X_test[i,:,:] + noiselevel*np.random.rand(28,28)*255 clf_dt_noisy = DecisionTreeClassifier() clf_dt_noisy.fit(X_train.reshape(-1,28*28), y_train) pred_dt_noisy = clf_dt_noisy.predict(X_test_noisy.reshape(-1,28*28)) # Piirretään tulokseksi alkupäästä luokituksia pltsize=1 plt.figure(figsize=(10*pltsize, pltsize)) for i in range(10): plt.subplot(1,10,i+1) plt.axis('off') plt.imshow(X_test_noisy[i,:,:]) plt.title(str(pred_dt_noisy[i]) + ' (' + str(y_test[i]) + ')') # Raportoidaan tulokset print('Luokiteltu', len(pred_dt_noisy), 'kuvaa, luokituksista oikein menneiden osuus on:', accuracy_score(y_test, pred_dt_noisy)*100, '%') print('Alla kuvat ja analysoidut luokat, oikeat luokat ovat suluissa') """ Explanation: Kyseinen koneoppimismenetelmä tuntuu toimivan ihan kohtuullisesti, vaikka ei pärjääkään ihmiselle tässä tehtävässä. Koneoppija joutuu koville Selvästikin päätöspuu osaa luokitella kuvia kohtuudella. Oikeassa elämässä esimerkiksi kuvadata ei ole aina priimalaatua. Mitä jos teemme tehtävästä asteen vaikeamman lisäämällä kuvaan häiriötä (kohinaa)? Lopputulos vastaa suunnilleen sitä, miltä näyttäisi vaikkapa huonolla kameralla vähässä valossa otettu kuva käsinkirjoitetusta tekstistä. End of explanation """ # Alustetaan neuroverkko (malli) model = Sequential() # Luodaan neuroverkkoon sisäänmenokerros model.add(Dense(20, input_dim=28*28)) model.add(Activation('relu')) # Luodaan neuroverkkoon uusi kerros (piilokerros) # TEHTÄVÄ: voit poistaa kommenttimerkit kolmen seuraavan rivin edestä ja myös säätää parametreja #model.add(Dense(50, input_dim=28*28)) #model.add(Activation('relu')) #model.add(Dropout(0.2)) # Luodaan neuroverkkoon uusi kerros (toinen piilokerros) # TEHTÄVÄ: voit poistaa kommenttimerkit kolmen seuraavan rivin edestä ja myös säätää parametreja #model.add(Dense(50)) #model.add(Activation('relu')) #model.add(Dropout(0.2)) # Luodaan neuroverkkoon tuloskerros model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) # Muotoillaan data neuroverkon ymmärtämään muotoon (nk. one-hot encoding) nb_classes = 10 Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) # Ajetaan neuroverkon opetus nb_epoch = 4 print('Neuroverkon opetus käynnissä, odota...\n') history = model.fit(X_train.reshape((-1,28*28)), Y_train, nb_epoch=nb_epoch, batch_size=32, verbose=2) scores = model.evaluate(X_test.reshape((-1,28*28)), Y_test, verbose=0) print('\nOpetus valmis!') """ Explanation: Kun sekoitetaan 50-50 alkuperäistä kuvaa ja kohinaa, niin koneoppija ei enää pärjää. Ihminen kyllä yhä kykenee numerot tunnistamaan. Missä menee koneoppijan kyky sietää kohinaa, paranisiko tilanne jos kohinan määrä tiputetaan kymmeneen prosenttiin, eli noiselevel=0.1? Palaa koodin ja kokeile erilaisia kohinan osuuksia sekä niiden vaikutusta. Ja lopuksi neuroverkkoja Koneoppimisesta ei voi nykyään puhua mainitsemassa neuroverkkoja. Ne ovat tällä hetkellä pinnalla oleva, vaikkakin pitkän historian omaava, koneoppimisen suuntaus, jossa käytetyt menetelmät ovat saaneet inspiraationsa ihmisen hermojärjestelmästä. Dataa syötetään sisään keinotekoiseen digitaaliseen neuroverkkoon ja verkon yhteyksiä säädetään niin, että se oppii tuottamaan datasta haluttuja signaaleja ulos. Toistamme äskeisen harjoituksen neuroverkkojen avulla. Ne ovat laskennallisesti raskaita, joten tällaisen kevyenkin mallin tekeminen vaatii aikansa. Voit myös vaihtoehtoisesti mennä kokeilemaan neuroverkkoja interaktiiviselle webbisivustolle: http://playground.tensorflow.org/ Siellä voi säätää verkon asetuksia ja muotoa, sekä katsoa, että miten verkon kyky oppia dataa (joukko sinisiä ja oransseja palloja) muuttuu. Alaspäin skrollaamalla löytyy sivulta lisätietoa verkon käytöstä ja toiminnasta. End of explanation """ # Luokitellaan dataa neuroverkolla predictions = model.predict(X_test.reshape((-1,28*28))) rounded = np.argmax(predictions, axis=1) # Piirretään tulokseksi alkupäästä luokituksia pltsize=1 plt.figure(figsize=(10*pltsize, pltsize)) for i in range(10): plt.subplot(1,10,i+1) plt.axis('off') plt.imshow(X_test[i,:,:]) plt.title(str(rounded[i]) + ' (' + str(y_test[i]) + ')') # Raportoidaan tulokset print('Luokiteltu', len(rounded), 'kuvaa, luokituksista oikein menneiden osuus on:', accuracy_score(y_test, rounded)*100, '%') print('Alla kuvat ja analysoidut luokat, oikeat luokat ovat suluissa') """ Explanation: Nyt on neuroverkko luotu. Seuraavaksi käytetään sitä dataan ja katsotaan kuinka se pärjää? End of explanation """
imatge-upc/activitynet-2016-cvprw
notebooks/01 Checking Downloaded Videos.ipynb
mit
import os import json DOWNLOAD_DIR = '/imatge/amontes/work/datasets/ActivityNet/v1.3/videos' videos = os.listdir(DOWNLOAD_DIR) videos_ids = [] for video in videos: videos_ids.append(video.split('.mp4')[0]) """ Explanation: Checking Downloaded Videos Due all the videos are located at YouTube, not all the videos have been downloaded (due to geografic restrictions, removed by the owner), so I must clean and store all the information of the successfully downloaded videos. First step is to chech which videos have been downloaded. End of explanation """ with open('../dataset/originals/activity_net.v1-3.min.json', 'r') as f: dataset = json.load(f) print('Number of videos of the original dataset: {} videos.'.format(len(dataset['database'].keys()))) for key in dataset['database'].keys(): if key not in videos_ids: del dataset['database'][key] print('Number of videos successfully downloaded: {} videos'.format(len(dataset['database'].keys()))) with open('../dataset/tmp/dataset_downloaded.json', 'w') as f: json.dump(dataset, f) """ Explanation: Now lets load the original dataset and remove all the videos which have not been downloaded. End of explanation """ with open('../dataset/tmp/dataset_downloaded_nb_frames.json', 'r') as f: dataset = json.load(f) # Removing the videos which has been impossible to read with OpenCV (a minor number) for key in dataset['database'].keys(): if dataset['database'][key]['num_frames'] is None: del dataset['database'][key] print('Number of videos successfully downloaded: {}'.format(len(dataset['database'].keys()))) """ Explanation: Now, a information very important to extract for each video are its number of frames. This would be very helpful for future computations, so lets run the script at python/tools/: sh python get_nb_frames.py ../../dataset/tmp/dataset_downloaded.json ../../dataset/tmp/dataset_downloaded_nb_frames.json End of explanation """ taxonomy = dataset['taxonomy'] all_node_ids = [x["nodeId"] for x in taxonomy] leaf_node_ids = [] for x in all_node_ids: is_parent = False for query_node in taxonomy: if query_node["parentId"]==x: is_parent = True if not is_parent: leaf_node_ids.append(x) leaf_nodes = [x for x in taxonomy if x["nodeId"] in leaf_node_ids] with open('../dataset/labels.txt', 'w') as f: # Write down the none activity f.write('{}\t{}\n'.format(0, 'none')) for i in range(len(leaf_nodes)): f.write('{}\t{}\n'.format(i+1, leaf_nodes[i]['nodeName'])) with open('../dataset/videos.json', 'w') as f: json.dump(dataset['database'], f) """ Explanation: Now with all the dataset I am going to work with, I'll store separetly the videos information and labels available. Because the labels are represented as a tree of activities, I'll get only the leaf nodes because this are the same labels the videos are tagged with. End of explanation """
iannesbitt/ml_bootcamp
Deep Learning/Tensorflow Basics.ipynb
mit
import tensorflow as tf """ Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a> Tensorflow Basics Remember to reference the video for full explanations, this is just a notebook for code reference. You can import the library: End of explanation """ hello = tf.constant('Hello World') type(hello) x = tf.constant(100) type(x) """ Explanation: Simple Constants Let's show how to create a simple constant with Tensorflow, which TF stores as a tensor object: End of explanation """ sess = tf.Session() sess.run(hello) type(sess.run(hello)) sess.run(x) type(sess.run(x)) """ Explanation: Running Sessions Now you can create a TensorFlow Session, which is a class for running TensorFlow operations. A Session object encapsulates the environment in which Operation objects are executed, and Tensor objects are evaluated. For example: End of explanation """ x = tf.constant(2) y = tf.constant(3) with tf.Session() as sess: print('Operations with Constants') print('Addition',sess.run(x+y)) print('Subtraction',sess.run(x-y)) print('Multiplication',sess.run(x*y)) print('Division',sess.run(x/y)) """ Explanation: Operations You can line up multiple Tensorflow operations in to be run during a session: End of explanation """ x = tf.placeholder(tf.int32) y = tf.placeholder(tf.int32) x type(x) """ Explanation: Placeholder You may not always have the constants right away, and you may be waiting for a constant to appear after a cycle of operations. tf.placeholder is a tool for this. It inserts a placeholder for a tensor that will be always fed. Important: This tensor will produce an error if evaluated. Its value must be fed using the feed_dict optional argument to Session.run(), Tensor.eval(), or Operation.run(). For example, for a placeholder of a matrix of floating point numbers: x = tf.placeholder(tf.float32, shape=(1024, 1024)) Here is an example for integer placeholders: End of explanation """ add = tf.add(x,y) sub = tf.sub(x,y) mul = tf.mul(x,y) """ Explanation: Defining Operations End of explanation """ d = {x:20,y:30} with tf.Session() as sess: print('Operations with Constants') print('Addition',sess.run(add,feed_dict=d)) print('Subtraction',sess.run(sub,feed_dict=d)) print('Multiplication',sess.run(mul,feed_dict=d)) """ Explanation: Running operations with variable input: End of explanation """ import numpy as np # Make sure to use floats here, int64 will cause an error. a = np.array([[5.0,5.0]]) b = np.array([[2.0],[2.0]]) a a.shape b b.shape mat1 = tf.constant(a) mat2 = tf.constant(b) """ Explanation: Now let's see an example of a more complex operation, using Matrix Multiplication. First we need to create the matrices: End of explanation """ matrix_multi = tf.matmul(mat1,mat2) """ Explanation: The matrix multiplication operation: End of explanation """ with tf.Session() as sess: result = sess.run(matrix_multi) print(result) """ Explanation: Now run the session to perform the Operation: End of explanation """
LSSTC-DSFP/LSSTC-DSFP-Sessions
Sessions/Session01/Day4/IntroToMachineLearning.ipynb
mit
import numpy as np import matplotlib.pyplot as plt %matplotlib inline """ Explanation: Introduction to Machine Learning: Examples of Unsupervised and Supervised Machine-Learning Algorithms Version 0.1 Broadly speaking, machine-learning methods constitute a diverse collection of data-driven algorithms designed to classify/characterize/analyze sources in multi-dimensional spaces. The topics and studies that fall under the umbrella of machine learning is growing, and there is no good catch-all definition. The number (and variation) of algorithms is vast, and beyond the scope of these exercises. While we will discuss a few specific algorithms today, more importantly, we will explore the scope of the two general methods: unsupervised learning and supervised learning and introduce the powerful (and dangerous?) Python package scikit-learn. By AA Miller (Jet Propulsion Laboratory, California Institute of Technology.) (c) 2016 California Institute of Technology. Government sponsorship acknowledged. End of explanation """ # execute dummy code here from sklearn import datasets from sklearn.ensemble import RandomForestClassifier iris = datasets.load_iris() RFclf = RandomForestClassifier().fit(iris.data, iris.target) """ Explanation: Problem 1) Introduction to scikit-learn At the most basic level, scikit-learn makes machine learning extremely easy within Python. By way of example, here is a short piece of code that builds a complex, non-linear model to classify sources in the Iris data set that we learned about yesterday: from sklearn import datasets from sklearn.ensemble import RandomForestClassifier iris = datasets.load_iris() RFclf = RandomForestClassifier().fit(iris.data, iris.target) Those 4 lines of code have constructed a model that is superior to any system of hard cuts that we could have encoded while looking at the multidimensional space. This can be fast as well: execute the dummy code in the cell below to see how "easy" machine-learning is with scikit-learn. End of explanation """ print(np.shape( # complete print( # complete """ Explanation: Generally speaking, the procedure for scikit-learn is uniform across all machine-learning algorithms. Models are accessed via the various modules (ensemble, SVM, neighbors, etc), with user-defined tuning parameters. The features (or data) for the models are stored in a 2D array, X, with rows representing individual sources and columns representing the corresponding feature values. [In a minority of cases, X, represents a similarity or distance matrix where each entry represents the distance to every other source in the data set.] In cases where there is a known classification or scalar value (typically supervised methods), this information is stored in a 1D array y. Unsupervised models are fit by calling .fit(X) and supervised models are fit by calling .fit(X, y). In both cases, predictions for new observations, Xnew, can be obtained by calling .predict(Xnew). Those are the basics and beyond that, the details are algorithm specific, but the documentation for essentially everything within scikit-learn is excellent, so read the docs. To further develop our intuition, we will now explore the Iris dataset a little further. Problem 1a What is the pythonic type of iris? You likely haven't encountered a scikit-learn Bunch before. It's functionality is essentially the same as a dictionary. Problem 1b What are the keys of iris? Most importantly, iris contains data and target values. These are all you need for scikit-learn, though the feature and target names and description are useful. Problem 1c What is the shape and content of the iris data? End of explanation """ print(np.shape( # complete print( # complete """ Explanation: Problem 1d What is the shape and content of the iris target? End of explanation """ plt.scatter( # complete plt.xlabel('sepal length') plt.ylabel('sepal width') """ Explanation: Finally, as a baseline for the exercises that follow, we will now make a simple 2D plot showing the separation of the 3 classes in the iris dataset. This plot will serve as the reference for examining the quality of the clustering algorithms. Problem 1e Make a scatter plot showing sepal length vs. sepal width for the iris data set. Color the points according to their respective classes. Hint - determine which columns of data correspond to sepal length and sepal width. End of explanation """ from sklearn.cluster import KMeans Kcluster = KMeans( # complete # complete plt.figure() plt.scatter( # complete plt.xlabel('sepal length') plt.ylabel('sepal width') Kcluster = KMeans( # complete # complete plt.figure() plt.scatter( # complete plt.xlabel('sepal length') plt.ylabel('sepal width') """ Explanation: Problem 2) Unsupervised Machine Learning Unsupervised machine learning, sometimes referred to as clustering or data mining, aims to group or classify sources in the multidimensional feature space. The "unsupervised" comes from the fact that there are no target labels provided to the algorithm, so the machine is asked to cluster the data "on its own." The lack of labels means there is no (simple) method for validating the accuracy of the solution provided by the machine (though sometimes simple examination can show the results are terrible). For this reason [note - this is my (AAM) opinion and there many be many others who disagree], unsupervised methods are not particularly useful for astronomy. Supposing one did find some useful clustering structure, an adversarial researcher could always claim that the current feature space does not accurately capture the physics of the system and as such the clustering result is not interesting or, worse, erroneous. The one potentially powerful exception to this broad statement is outlier detection, which can be a branch of both unsupervised and supervised learning. Finding weirdo objects is an astronomical pastime, and there are unsupervised methods that may help in that regard in the LSST era. To begin today we will examine one of the most famous, and simple, clustering algorithms: $k$-means. $k$-means clustering looks to identify $k$ convex clusters, where $k$ is a user defined number. And here-in lies the rub: if we truly knew the number of clusters in advance, we likely wouldn't need to perform any clustering in the first place. This is the major downside to $k$-means. Operationally, pseudocode for the algorithm can be summarized as the following: initiate search by identifying k points (i.e. the cluster centers) loop assign each point in the data set to the closest cluster center calculate new cluster centers based on mean position of all cluster points if diff(new center - old center) &lt; threshold: stop (i.e. clusters are defined) The threshold is defined by the user, though in some cases the total number of iterations is also An advantage of $k$-means is that the solution will always converge, though the solution may only be a local minimum. Disadvantages include the assumption of convexity, i.e. difficult to capture complex geometry, and the curse of dimensionality (though you can combat that with dimensionality reduction after yesterday). In scikit-learn the KMeans algorithm is implemented as part of the sklearn.cluster module. Problem 2a Fit two different $k$-means models to the iris data, one with 2 clusters and one with 3 clusters. Plot the resulting clusters in the sepal length-sepal width plane (same plot as above). How do the results compare to the true classifications? Hint - the .labels_ attribute of the KMeans object will return the clusters measured by the algorithm. End of explanation """ rs = 14 Kcluster1 = KMeans(# complete plt.figure() plt.scatter(# complete plt.xlabel('sepal length') plt.ylabel('sepal width') """ Explanation: With 3 clusters the algorithm does a good job of separating the three classes. However, without the a priori knowledge that there are 3 different types of iris, the 2 cluster solution would appear to be superior. Problem 2b How do the results change if the 3 cluster model is called with n_init = 1 and init = 'random' options? Use rs for the random state [this allows me to cheat in service of making a point]. *Note - the respective defaults for these two parameters are 10 and k-means++, respectively. Read the docs to see why these choices are, likely, better than those in 2b. End of explanation """ print("feature\t\t\tmean\tstd\tmin\tmax") for featnum, feat in enumerate(iris.feature_names): print("{:s}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}".format( # complete """ Explanation: A random aside that is not particularly relevant here $k$-means evaluates the Euclidean distance between individual sources and cluster centers, thus, the magnitude of the individual features has a strong effect on the final clustering outcome. Problem 2c Calculate the mean, standard deviation, min, and max of each feature in the iris data set. Based on these summaries, which feature is most important for clustering? End of explanation """ from sklearn.preprocessing import StandardScaler scaler = StandardScaler().fit( # complete Kcluster = KMeans( # complete plt.figure() plt.scatter( # complete plt.xlabel('sepal length') plt.ylabel('sepal width') """ Explanation: Petal length has the largest range and standard deviation, thus, it will have the most "weight" when determining the $k$ clusters. The truth is that the iris data set is fairly small and straightfoward. Nevertheless, we will now examine the clustering results after re-scaling the features. [Some algorithms, cough Support Vector Machines cough, are notoriously sensitive to the feature scaling, so it is important to know about this step.] Imagine you are classifying stellar light curves: the data set will include contact binaries with periods of $\sim 0.1 \; \mathrm{d}$ and Mira variables with periods of $\gg 100 \; \mathrm{d}$. Without re-scaling, this feature that covers 4 orders of magnitude may dominate all others in the final model projections. The two most common forms of re-scaling are to rescale to a guassian with mean $= 0$ and variance $= 1$, or to rescale the min and max of the feature to $[0, 1]$. The best normalization is problem dependent. The sklearn.preprocessing module makes it easy to re-scale the feature set. It is essential that the same scaling used for the training set be used for all other data run through the model. The testing, validation, and field observations cannot be re-scaled independently. This would result in meaningless final classifications/predictions. Problem 2d Re-scale the features to normal distributions, and perform $k$-means clustering on the iris data. How do the results compare to those obtained earlier? Hint - you may find 'StandardScaler()' within the sklearn.preprocessing module useful. End of explanation """ from sklearn.cluster import DBSCAN dbs = DBSCAN( # complete dbs.fit( # complete dbs_outliers = # complete plt.figure() plt.scatter( # complete plt.scatter( # complete plt.xlabel('sepal length') plt.ylabel('sepal width') """ Explanation: These results are almost identical to those obtained without scaling. This is due to the simplicity of the iris data set. How do I test the accuracy of my clusters? Essentially - you don't. There are some methods that are available, but they essentially compare clusters to labeled samples, and if the samples are labeled it is likely that supervised learning is more useful anyway. If you are curious, scikit-learn does provide some built-in functions for analyzing clustering, but again, it is difficult to evaluate the validity of any newly discovered clusters. What if I don't know how many clusters are present in the data? An excellent question, as you will almost never know this a priori. Many algorithms, like $k$-means, do require the number of clusters to be specified, but some other methods do not. As an example DBSCAN. In brief, DBSCAN requires two parameters: minPts, the minimum number of points necessary for a cluster, and $\epsilon$, a distance measure. Clusters are grown by identifying core points, objects that have at least minPts located within a distance $\epsilon$. Reachable points are those within a distance $\epsilon$ of at least one core point but less than minPts core points. Identically, these points define the outskirts of the clusters. Finally, there are also outliers which are points that are $> \epsilon$ away from any core points. Thus, DBSCAN naturally identifies clusters, does not assume clusters are convex, and even provides a notion of outliers. The downsides to the algorithm are that the results are highly dependent on the two tuning parameters, and that clusters of highly different densities can be difficult to recover (because $\epsilon$ and minPts is specified for all clusters. In scitkit-learn the DBSCAN algorithm is part of the sklearn.cluster module. $\epsilon$ and minPts are set by eps and min_samples, respectively. Problem 2e Cluster the iris data using DBSCAN. Play around with the tuning parameters to see how they affect the final clustering results. How does the use of DBSCAN compare to $k$-means? Can you obtain 3 clusters with DBSCAN? If not, given the knowledge that the iris dataset has 3 classes - does this invalidate DBSCAN as a viable algorithm? Note - DBSCAN labels outliers as $-1$, and thus, plt.scatter(), will plot all these points as the same color. End of explanation """ from astroquery.sdss import SDSS # enables direct queries to the SDSS database GALquery = """SELECT TOP 10000 p.dered_u - p.dered_g as ug, p.dered_g - p.dered_r as gr, p.dered_g - p.dered_i as gi, p.dered_g - p.dered_z as gz, p.petroRad_i, p.petroR50_i, p.deVAB_i FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND p.type = 3 """ SDSSgals = SDSS.query_sql(GALquery) SDSSgals """ Explanation: I was unable to obtain 3 clusters with DBSCAN. While these results are, on the surface, worse than what we got with $k$-means, my suspicion is that the 4 features do not adequately separate the 3 classes. [See - a nayseyer can always make that argument.] This is not a problem for DBSCAN as an algorithm, but rather, evidence that no single algorithm works well in all cases. Challenge Problem) Cluster SDSS Galaxy Data The following query will select 10k likely galaxies from the SDSS database and return the results of that query into an astropy.Table object. (For now, if you are not familiar with the SDSS DB schema, don't worry about this query, just know that it returns a bunch of photometric features.) End of explanation """ # complete """ Explanation: I have used my own domain knowledge to specificly choose features that may be useful when clustering galaxies. If you know a bit about SDSS and can think of other features that may be useful feel free to add them to the query. One nice feature of astropy tables is that they can readily be turned into pandas DataFrames, which can in turn easily be turned into a sklearn X array with NumPy. For example: X = np.array(SDSSgals.to_pandas()) And you are ready to go. Challenge Problem Using the SDSS dataset above, identify interesting clusters within the data [this is intentionally very open ended, if you uncover anything especially exciting you'll have a chance to share it with the group]. Feel free to use the algorithms discussed above, or any other packages available via sklearn. Can you make sense of the clusters in the context of galaxy evolution? Hint - don't fret if you know nothing about galaxy evolution (neither do I!). Just take a critical look at the clusters that are identified End of explanation """ from sklearn.neighbors import KNeighborsClassifier KNNclf = KNeighborsClassifier( # complete preds = # complete plt.figure() plt.scatter( # complete KNNclf = KNeighborsClassifier(# complete preds = # complete plt.figure() plt.scatter( # complete """ Explanation: Note - the above solution seems to separate out elliptical galaxies from blue star forming galaxies, however, the results are highly, highly dependent upon the tuning parameters. Problem 3) Supervised Machine Learning Supervised machine learning, on the other hand, aims to predict a target class or produce a regression result based on the location of labelled sources (i.e. the training set) in the multidimensional feature space. The "supervised" comes from the fact that we are specifying the allowed outputs from the model. As there are labels available for the training set, it is possible to estimate the accuracy of the model (though there are generally important caveats about generalization, which we will explore in further detail later). The details and machinations of supervised learning will be explored further during the following break-out session. Here, we will simply introduce some of the basics as a point of comparison to unsupervised machine learning. We will begin with a simple, but nevertheless, elegant algorithm for classification and regression: $k$-nearest-neighbors ($k$NN). In brief, the classification or regression output is determined by examining the $k$ nearest neighbors in the training set, where $k$ is a user defined number. Typically, though not always, distances between sources are Euclidean, and the final classification is assigned to whichever class has a plurality within the $k$ nearest neighbors (in the case of regression, the average of the $k$ neighbors is the output from the model). We will experiment with the steps necessary to optimize $k$, and other tuning parameters, in the detailed break-out problem. In scikit-learn the KNeighborsClassifer algorithm is implemented as part of the sklearn.neighbors module. Problem 3a Fit two different $k$NN models to the iris data, one with 3 neighbors and one with 10 neighbors. Plot the resulting class predictions in the sepal length-sepal width plane (same plot as above). How do the results compare to the true classifications? Is there any reason to be suspect of this procedure? Hint - after you have constructed the model, it is possible to obtain model predictions using the .predict() method, which requires a feature array, same features and order as the training set, as input. Hint that isn't essential, but is worth thinking about - should the features be re-scaled in any way? End of explanation """ from sklearn.cross_validation import cross_val_predict CVpreds = cross_val_predict( # complete plt.figure() plt.scatter( # complete print("The accuracy of the kNN = 5 model is ~{:.4}".format( # complete CVpreds50 = cross_val_predict( # complete print("The accuracy of the kNN = 50 model is ~{:.4}".format( # complete """ Explanation: These results are almost identical to the training classifications. However, we have cheated! In this case we are evaluating the accuracy of the model (98% in this case) using the same data that defines the model. Thus, what we have really evaluated here is the training error. The relevant parameter, however, is the generalization error: how accurate are the model predictions on new data? Without going into too much detail, we will test this using cross validation (CV), which will be explored in more detail later. In brief, CV provides predictions on the training set using a subset of the data to generate a model that predicts the class of the remaining sources. Using cross_val_predict, we can get a better sense of the model accuracy. Predictions from cross_val_predict are produced in the following manner: from sklearn.cross_validation import cross_val_predict CVpreds = cross_val_predict(sklearn.model(), X, y) where sklearn.model() is the desired model, X is the feature array, and y is the label array. Problem 3b Produce cross-validation predictions for the iris dataset and a $k$NN with 5 neighbors. Plot the resulting classifications, as above, and estimate the accuracy of the model as applied to new data. How does this accuracy compare to a $k$NN with 50 neighbors? End of explanation """ # complete """ Explanation: While it is useful to understand the overall accuracy of the model, it is even more useful to understand the nature of the misclassifications that occur. Problem 3c Calculate the accuracy for each class in the iris set, as determined via CV for the $k$NN = 50 model. End of explanation """ from sklearn.metrics import confusion_matrix cm = confusion_matrix( # complete print(cm) """ Explanation: We just found that the classifier does a much better job classifying setosa and versicolor than it does for virginica. The main reason for this is some viginica flowers lie far outside the main virginica locus, and within predominantly versicolor "neighborhoods". In addition to knowing the accuracy for the individual classes, it is also useful to know class predictions for the misclassified sources, or in other words where there is "confusion" for the classifier. The best way to summarize this information is with a confusion matrix. In a confusion matrix, one axis shows the true class and the other shows the predicted class. For a perfect classifier all of the power will be along the diagonal, while confusion is represented by off-diagonal signal. Like almost everything else we have encountered during this exercise, scikit-learn makes it easy to compute a confusion matrix. This can be accomplished with the following: from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_prep) Problem 3d Calculate the confusion matrix for the iris training set and the $k$NN = 50 model. End of explanation """ normalized_cm = cm.astype('float')/cm.sum(axis = 1)[:,np.newaxis] normalized_cm """ Explanation: From this representation, we see right away that most of the virginica that are being misclassifed are being scattered into the versicolor class. However, this representation could still be improved: it'd be helpful to normalize each value relative to the total number of sources in each class, and better still, it'd be good to have a visual representation of the confusion matrix. This visual representation will be readily digestible. Now let's normalize the confusion matrix. Problem 3e Calculate the normalized confusion matrix. Be careful, you have to sum along one axis, and then divide along the other. Anti-hint: This operation is actually straightforward using some array manipulation that we have not covered up to this point. Thus, we have performed the necessary operations for you below. If you have extra time, you should try to develop an alternate way to arrive at the same normalization. End of explanation """ plt.imshow( # complete """ Explanation: The normalization makes it easier to compare the classes, since each class has a different number of sources. Now we can procede with a visual representation of the confusion matrix. This is best done using imshow() within pyplot. You will also need to plot a colorbar, and labeling the axes will also be helpful. Problem C3 Plot the confusion matrix. Be sure to label each of the axeses. Hint - you might find the sklearn confusion matrix tutorial helpful for making a nice plot. End of explanation """
rebeccabilbro/viz
animation/lorenz_ipywidgets.ipynb
mit
%matplotlib inline from ipywidgets import interact, interactive from IPython.display import clear_output, display, HTML import numpy as np from scipy import integrate from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import cnames from matplotlib import animation """ Explanation: Exploring the Lorenz System of Differential Equations In this Notebook we explore the Lorenz system of differential equations: $$ \begin{aligned} \dot{x} & = \sigma(y-x) \ \dot{y} & = \rho x - y - xz \ \dot{z} & = -\beta z + xy \end{aligned} $$ This is one of the classic systems in non-linear differential equations. It exhibits a range of different behaviors as the parameters ($\sigma$, $\beta$, $\rho$) are varied. Imports First, we import the needed things from IPython, NumPy, Matplotlib and SciPy. End of explanation """ def solve_lorenz(N=10, angle=0.0, max_time=4.0, sigma=10.0, beta=8./3, rho=28.0): fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1], projection='3d') ax.axis('off') # prepare the axes limits ax.set_xlim((-25, 25)) ax.set_ylim((-35, 35)) ax.set_zlim((5, 55)) def lorenz_deriv(x_y_z, t0, sigma=sigma, beta=beta, rho=rho): """Compute the time-derivative of a Lorenz system.""" x, y, z = x_y_z return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z] # Choose random starting points, uniformly distributed from -15 to 15 np.random.seed(1) x0 = -15 + 30 * np.random.random((N, 3)) # Solve for the trajectories t = np.linspace(0, max_time, int(250*max_time)) x_t = np.asarray([integrate.odeint(lorenz_deriv, x0i, t) for x0i in x0]) # choose a different color for each trajectory colors = plt.cm.jet(np.linspace(0, 1, N)) for i in range(N): x, y, z = x_t[i,:,:].T lines = ax.plot(x, y, z, '-', c=colors[i]) plt.setp(lines, linewidth=2) ax.view_init(30, angle) plt.show() return t, x_t """ Explanation: Computing the trajectories and plotting the result We define a function that can integrate the differential equations numerically and then plot the solutions. This function has arguments that control the parameters of the differential equation ($\sigma$, $\beta$, $\rho$), the numerical integration (N, max_time) and the visualization (angle). End of explanation """ t, x_t = solve_lorenz(angle=0, N=10) """ Explanation: Let's call the function once to view the solutions. For this set of parameters, we see the trajectories swirling around two points, called attractors. End of explanation """ w = interactive(solve_lorenz, angle=(0.,360.), N=(0,50), sigma=(0.0,50.0), rho=(0.0,50.0)) display(w) """ Explanation: Using IPython's interactive function, we can explore how the trajectories behave as we change the various parameters. End of explanation """ t, x_t = w.result w.kwargs """ Explanation: The object returned by interactive is a Widget object and it has attributes that contain the current result and arguments: End of explanation """ xyz_avg = x_t.mean(axis=1) xyz_avg.shape """ Explanation: After interacting with the system, we can take the result and perform further computations. In this case, we compute the average positions in $x$, $y$ and $z$. End of explanation """ plt.hist(xyz_avg[:,0]) plt.title('Average $x(t)$') plt.hist(xyz_avg[:,1]) plt.title('Average $y(t)$') """ Explanation: Creating histograms of the average positions (across different trajectories) show that on average the trajectories swirl about the attractors. End of explanation """
igabr/Metis_Projects_Chicago_2017
04-Project-Fletcher/Phases/Phase_4/Phase_4_Notebook.ipynb
mit
gabr_tweets = unpickle_object("gabr_ibrahim_tweets_LDA_Complete.pkl") gabr_tweets[0]['gabr_ibrahim'].keys() #just to refresh our mind of the keys in the sub-dictionary """ Explanation: So far, we have two databases: 2nd degree connection database where all handles have valid LDA Analysis. A database with my tweets and associated LDA analysis. The LDA method was quite powerful for potential followers, distilling down their entire corpus to a few key terms. Let's now do some TF-IDF and KMeans clustering to see if we find similar results to LDA. In fact, later in the notebook, I will take the intersection of the LDA Analysis results and TF-IDF results. This intersection will represent words/topics that were picked up by BOTH models for a particular handle's tweets. This will give us the most robust results! End of explanation """ temp_gabr_df = pd.DataFrame.from_dict(gabr_tweets[0], orient="index") temp_gabr_df = filtration(temp_gabr_df, "content") gabr_tweets_filtered_1 = dataframe_to_dict(temp_gabr_df) clean_tweet_list = [] totalvocab_tokenized = [] totalvocab_stemmed = [] for tweet in gabr_tweets_filtered_1[0]['gabr_ibrahim']['content']: clean_tweet = "" to_process = nlp(tweet) for token in to_process: if token.is_space: continue elif token.is_punct: continue elif token.is_stop: continue elif token.is_digit: continue elif len(token) == 1: continue elif len(token) == 2: continue else: clean_tweet += str(token.lemma_) + ' ' totalvocab_tokenized.append(str(token.lemma_)) totalvocab_stemmed.append(str(token.lemma_)) clean_tweet_list.append(clean_tweet) #just going to add this to the dictionary so we can do the second round of filtration gabr_tweets_filtered_1[0]['gabr_ibrahim']['temp_tfidf'] = clean_tweet_list temp_gabr_df = pd.DataFrame.from_dict(gabr_tweets_filtered_1[0], orient='index') temp_gabr_df = filtration(temp_gabr_df, 'temp_tfidf') gabr_tweets_filtered_2 = dataframe_to_dict(temp_gabr_df) clean_tweet_list = gabr_tweets_filtered_2[0]['gabr_ibrahim']["temp_tfidf"] del gabr_tweets_filtered_2[0]["gabr_ibrahim"]["temp_tfidf"] # we will add back TF-IDF analysis later! vocab_frame = pd.DataFrame({'words': totalvocab_tokenized}, index = totalvocab_stemmed) print('There are ' + str(vocab_frame.shape[0]) + ' items in vocab_frame') #define vectorizer parameters tfidf_vectorizer = TfidfVectorizer(max_features=200000, stop_words='english', ngram_range=(0,2)) tfidf_matrix = tfidf_vectorizer.fit_transform(clean_tweet_list) #fit the vectorizer to synopses print(tfidf_matrix.shape) terms = tfidf_vectorizer.get_feature_names() num_clusters = 20 km = KMeans(n_clusters=num_clusters, n_jobs=-1, random_state=200) km.fit(tfidf_matrix) clusters = km.labels_.tolist() order_centroids = km.cluster_centers_.argsort()[:, ::-1] cluster_dict = dict() for i in range(num_clusters): for ind in order_centroids[i, :20]: #replace 6 with n words per cluster word = str(vocab_frame.ix[terms[ind].split(' ')].values.tolist()[0][0]) if i not in cluster_dict: cluster_dict[i] = [word] else: cluster_dict[i].append(word) cluster_dict.keys() #here we see all 20 clusters. cluster_dict[0] #words in cluster 1 cluster_dict[1] #words in cluster 2 cluster_dict[2] #words in cluster 3 #Now lets make our tfidf Counter! cluster_values = [] for k, v in cluster_dict.items(): cluster_values.extend(v) counter_gabr_tfidf = Counter(cluster_values) counter_gabr_tfidf gabr_tweets_filtered_2[0]['gabr_ibrahim']["tfid_counter"] = counter_gabr_tfidf gabr_tfidf_counter = gabr_tweets_filtered_2[0]['gabr_ibrahim']["tfid_counter"] gabr_lda_counter = gabr_tweets_filtered_2[0]['gabr_ibrahim']["LDA"] gabr_tfidf_set = set() gabr_lda_set = set() for key, value in gabr_tfidf_counter.items(): gabr_tfidf_set.add(key) for key, value in gabr_lda_counter.items(): gabr_lda_set.add(key) intersection = gabr_tfidf_set.intersection(gabr_lda_set) gabr_tweets_filtered_2[0]['gabr_ibrahim']["lda_tfid_intersection"] = intersection pickle_object(gabr_tweets_filtered_2, "FINAL_GABR_DATABASE_LDA_TFIDF_VERIFIED") """ Explanation: I will now create a TF-IDF model for my tweets. Using K-Means Clustering with TF-IDF, I will cluster my tweet's into 20 centroids. From each of these centroids, I will extract 20 words. These words will be placed in a counter dictionary. TF-IDF KMeans - segemented by individual tweet! I will make use of spacy again in order to ensure we are giving the 'purest' for of our tweets to the tf-idf vectorizer. You will see two lists below relating to vocabulary. I will use these lists later to create a usefull dictionary that will help identify particular words within a centroid by index! End of explanation """
darkomen/TFG
medidas/18082015/Análisis de datos Ensayo 1.ipynb
cc0-1.0
#Importamos las librerías utilizadas import numpy as np import pandas as pd import seaborn as sns #Mostramos las versiones usadas de cada librerías print ("Numpy v{}".format(np.__version__)) print ("Pandas v{}".format(pd.__version__)) print ("Seaborn v{}".format(sns.__version__)) #Abrimos el fichero csv con los datos de la muestra datos = pd.read_csv('ensayo1.CSV') %pylab inline #Almacenamos en una lista las columnas del fichero con las que vamos a trabajar columns = ['Diametro X','Diametro Y', 'RPM TRAC'] #Mostramos un resumen de los datos obtenidoss datos[columns].describe() #datos.describe().loc['mean',['Diametro X [mm]', 'Diametro Y [mm]']] """ Explanation: Análisis de los datos obtenidos Uso de ipython para el análsis y muestra de los datos obtenidos durante la producción.Se implementa un regulador experto. Los datos analizados son del día 18 de Agosto del 2015 Los datos del experimento: * Hora de inicio: 16:08 * Hora final : 16:35 * Filamento extruido: * $T: 150ºC$ * $V_{min} tractora: 1.5 mm/s$ * $V_{max} tractora: 3.4 mm/s$ * Los incrementos de velocidades en las reglas del sistema experto son distintas: * En los caso 3 y 5 se mantiene un incremento de +2. * En los casos 4 y 6 se reduce el incremento a -1. End of explanation """ datos.ix[:, "Diametro X":"Diametro Y"].plot(figsize=(16,10),ylim=(0.5,3)).hlines([1.85,1.65],0,3500,colors='r') #datos['RPM TRAC'].plot(secondary_y='RPM TRAC') datos.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes') """ Explanation: Representamos ambos diámetro y la velocidad de la tractora en la misma gráfica End of explanation """ plt.scatter(x=datos['Diametro X'], y=datos['Diametro Y'], marker='.') """ Explanation: Con esta segunda aproximación se ha conseguido estabilizar los datos. Se va a tratar de bajar ese porcentaje. Como cuarta aproximación, vamos a modificar las velocidades de tracción. El rango de velocidades propuesto es de 1.5 a 5.3, manteniendo los incrementos del sistema experto como en el actual ensayo. Comparativa de Diametro X frente a Diametro Y para ver el ratio del filamento End of explanation """ datos_filtrados = datos[(datos['Diametro X'] >= 0.9) & (datos['Diametro Y'] >= 0.9)] #datos_filtrados.ix[:, "Diametro X":"Diametro Y"].boxplot(return_type='axes') """ Explanation: Filtrado de datos Las muestras tomadas $d_x >= 0.9$ or $d_y >= 0.9$ las asumimos como error del sensor, por ello las filtramos de las muestras tomadas. End of explanation """ plt.scatter(x=datos_filtrados['Diametro X'], y=datos_filtrados['Diametro Y'], marker='.') """ Explanation: Representación de X/Y End of explanation """ ratio = datos_filtrados['Diametro X']/datos_filtrados['Diametro Y'] ratio.describe() rolling_mean = pd.rolling_mean(ratio, 50) rolling_std = pd.rolling_std(ratio, 50) rolling_mean.plot(figsize=(12,6)) # plt.fill_between(ratio, y1=rolling_mean+rolling_std, y2=rolling_mean-rolling_std, alpha=0.5) ratio.plot(figsize=(12,6), alpha=0.6, ylim=(0.5,1.5)) """ Explanation: Analizamos datos del ratio End of explanation """ Th_u = 1.85 Th_d = 1.65 data_violations = datos[(datos['Diametro X'] > Th_u) | (datos['Diametro X'] < Th_d) | (datos['Diametro Y'] > Th_u) | (datos['Diametro Y'] < Th_d)] data_violations.describe() data_violations.plot(subplots=True, figsize=(12,12)) """ Explanation: Límites de calidad Calculamos el número de veces que traspasamos unos límites de calidad. $Th^+ = 1.85$ and $Th^- = 1.65$ End of explanation """
jbwhit/WSP-312-Tips-and-Tricks
notebooks/02-diff.ipynb
mit
# uncomment the bottom line in this cell, change the final line of # the loaded script to `mpld3.display()` (instead of show). # %load http://mpld3.github.io/_downloads/linked_brush.py """ Explanation: Interactive Notebook Possibilities http://mpld3.github.io/examples/linked_brush.html End of explanation """ import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn.datasets import load_iris import mpld3 from mpld3 import plugins, utils data = load_iris() X = data.data y = data.target # dither the data for clearer plotting X += 0.1 * np.random.random(X.shape) fig, ax = plt.subplots(4, 4, sharex="col", sharey="row", figsize=(8, 8)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, hspace=0.1, wspace=0.1) for i in range(4): for j in range(4): points = ax[3 - i, j].scatter(X[:, j], X[:, i], c=y, s=40, alpha=0.6) # remove tick labels for axi in ax.flat: for axis in [axi.xaxis, axi.yaxis]: axis.set_major_formatter(plt.NullFormatter()) # Here we connect the linked brush plugin plugins.connect(fig, plugins.LinkedBrush(points)) # mpld3.show() mpld3.display() %matplotlib inline import mpld3 import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set_context('poster') # sns.set_style('whitegrid') sns.set_style('darkgrid') plt.rcParams['figure.figsize'] = 12, 8 # plotsize def sinplot(flip=1, ax=None): """Demo plot from seaborn.""" x = np.linspace(0, 14, 500) for i in range(1, 7): ax.plot(x, np.sin(-1.60 + x + i * .5) * (7 - i) * flip, label=str(i)) # mpld3.enable_notebook() fig, ax = plt.subplots(figsize=(12, 8)) sinplot(ax=ax) ax.set_ylabel("y-label") ax.set_xlabel("x-label") fig.tight_layout() mpld3.disable_notebook() """ Explanation: Linked Brushing Example This example uses the standard Iris dataset and plots it with a linked brushing tool for dynamically exploring the data. The paintbrush button at the bottom left can be used to enable and disable the behavior. End of explanation """
dnc1994/MachineLearning-UW
ml-classification/blank/module-4-linear-classifier-regularization-assignment-blank.ipynb
mit
from __future__ import division import graphlab """ Explanation: Logistic Regression with L2 regularization The goal of this second notebook is to implement your own logistic regression classifier with L2 regularization. You will do the following: Extract features from Amazon product reviews. Convert an SFrame into a NumPy array. Write a function to compute the derivative of log likelihood function with an L2 penalty with respect to a single coefficient. Implement gradient ascent with an L2 penalty. Empirically explore how the L2 penalty can ameliorate overfitting. Fire up GraphLab Create Make sure you have the latest version of GraphLab Create. Upgrade by pip install graphlab-create --upgrade See this page for detailed instructions on upgrading. End of explanation """ products = graphlab.SFrame('amazon_baby_subset.gl/') """ Explanation: Load and process review dataset For this assignment, we will use the same subset of the Amazon product review dataset that we used in Module 3 assignment. The subset was chosen to contain similar numbers of positive and negative reviews, as the original dataset consisted of mostly positive reviews. End of explanation """ # The same feature processing (same as the previous assignments) # --------------------------------------------------------------- import json with open('important_words.json', 'r') as f: # Reads the list of most frequent words important_words = json.load(f) important_words = [str(s) for s in important_words] def remove_punctuation(text): import string return text.translate(None, string.punctuation) # Remove punctuation. products['review_clean'] = products['review'].apply(remove_punctuation) # Split out the words into individual columns for word in important_words: products[word] = products['review_clean'].apply(lambda s : s.split().count(word)) """ Explanation: Just like we did previously, we will work with a hand-curated list of important words extracted from the review data. We will also perform 2 simple data transformations: Remove punctuation using Python's built-in string functionality. Compute word counts (only for the important_words) Refer to Module 3 assignment for more details. End of explanation """ products """ Explanation: Now, let us take a look at what the dataset looks like (Note: This may take a few minutes). End of explanation """ train_data, validation_data = products.random_split(.8, seed=2) print 'Training set : %d data points' % len(train_data) print 'Validation set : %d data points' % len(validation_data) """ Explanation: Train-Validation split We split the data into a train-validation split with 80% of the data in the training set and 20% of the data in the validation set. We use seed=2 so that everyone gets the same result. Note: In previous assignments, we have called this a train-test split. However, the portion of data that we don't train on will be used to help select model parameters. Thus, this portion of data should be called a validation set. Recall that examining performance of various potential models (i.e. models with different parameters) should be on a validation set, while evaluation of selected model should always be on a test set. End of explanation """ import numpy as np def get_numpy_data(data_sframe, features, label): data_sframe['intercept'] = 1 features = ['intercept'] + features features_sframe = data_sframe[features] feature_matrix = features_sframe.to_numpy() label_sarray = data_sframe[label] label_array = label_sarray.to_numpy() return(feature_matrix, label_array) """ Explanation: Convert SFrame to NumPy array Just like in the second assignment of the previous module, we provide you with a function that extracts columns from an SFrame and converts them into a NumPy array. Two arrays are returned: one representing features and another representing class labels. Note: The feature matrix includes an additional column 'intercept' filled with 1's to take account of the intercept term. End of explanation """ feature_matrix_train, sentiment_train = get_numpy_data(train_data, important_words, 'sentiment') feature_matrix_valid, sentiment_valid = get_numpy_data(validation_data, important_words, 'sentiment') """ Explanation: We convert both the training and validation sets into NumPy arrays. Warning: This may take a few minutes. End of explanation """ ''' produces probablistic estimate for P(y_i = +1 | x_i, w). estimate ranges between 0 and 1. ''' def predict_probability(feature_matrix, coefficients): # Take dot product of feature_matrix and coefficients ## YOUR CODE HERE ... # Compute P(y_i = +1 | x_i, w) using the link function ## YOUR CODE HERE predictions = ... return predictions """ Explanation: Building on logistic regression with no L2 penalty assignment Let us now build on Module 3 assignment. Recall from lecture that the link function for logistic regression can be defined as: $$ P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))}, $$ where the feature vector $h(\mathbf{x}_i)$ is given by the word counts of important_words in the review $\mathbf{x}_i$. We will use the same code as in this past assignment to make probability predictions since this part is not affected by the L2 penalty. (Only the way in which the coefficients are learned is affected by the addition of a regularization term.) End of explanation """ def feature_derivative_with_L2(errors, feature, coefficient, l2_penalty, feature_is_constant): # Compute the dot product of errors and feature ## YOUR CODE HERE derivative = ... # add L2 penalty term for any feature that isn't the intercept. if not feature_is_constant: ## YOUR CODE HERE ... return derivative """ Explanation: Adding L2 penalty Let us now work on extending logistic regression with L2 regularization. As discussed in the lectures, the L2 regularization is particularly useful in preventing overfitting. In this assignment, we will explore L2 regularization in detail. Recall from lecture and the previous assignment that for logistic regression without an L2 penalty, the derivative of the log likelihood function is: $$ \frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) $$ Adding L2 penalty to the derivative It takes only a small modification to add a L2 penalty. All terms indicated in red refer to terms that were added due to an L2 penalty. Recall from the lecture that the link function is still the sigmoid: $$ P(y_i = +1 | \mathbf{x}_i,\mathbf{w}) = \frac{1}{1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))}, $$ We add the L2 penalty term to the per-coefficient derivative of log likelihood: $$ \frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) \color{red}{-2\lambda w_j } $$ The per-coefficient derivative for logistic regression with an L2 penalty is as follows: $$ \frac{\partial\ell}{\partial w_j} = \sum_{i=1}^N h_j(\mathbf{x}i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) \color{red}{-2\lambda w_j } $$ and for the intercept term, we have $$ \frac{\partial\ell}{\partial w_0} = \sum{i=1}^N h_0(\mathbf{x}_i)\left(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w})\right) $$ Note: As we did in the Regression course, we do not apply the L2 penalty on the intercept. A large intercept does not necessarily indicate overfitting because the intercept is not associated with any particular feature. Write a function that computes the derivative of log likelihood with respect to a single coefficient $w_j$. Unlike its counterpart in the last assignment, the function accepts five arguments: * errors vector containing $(\mathbf{1}[y_i = +1] - P(y_i = +1 | \mathbf{x}_i, \mathbf{w}))$ for all $i$ * feature vector containing $h_j(\mathbf{x}_i)$ for all $i$ * coefficient containing the current value of coefficient $w_j$. * l2_penalty representing the L2 penalty constant $\lambda$ * feature_is_constant telling whether the $j$-th feature is constant or not. End of explanation """ def compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty): indicator = (sentiment==+1) scores = np.dot(feature_matrix, coefficients) lp = np.sum((indicator-1)*scores - np.log(1. + np.exp(-scores))) - l2_penalty*np.sum(coefficients[1:]**2) return lp """ Explanation: Quiz question: In the code above, was the intercept term regularized? To verify the correctness of the gradient ascent algorithm, we provide a function for computing log likelihood (which we recall from the last assignment was a topic detailed in an advanced optional video, and used here for its numerical stability). $$\ell\ell(\mathbf{w}) = \sum_{i=1}^N \Big( (\mathbf{1}[y_i = +1] - 1)\mathbf{w}^T h(\mathbf{x}_i) - \ln\left(1 + \exp(-\mathbf{w}^T h(\mathbf{x}_i))\right) \Big) \color{red}{-\lambda\|\mathbf{w}\|_2^2} $$ End of explanation """ def logistic_regression_with_L2(feature_matrix, sentiment, initial_coefficients, step_size, l2_penalty, max_iter): coefficients = np.array(initial_coefficients) # make sure it's a numpy array for itr in xrange(max_iter): # Predict P(y_i = +1|x_i,w) using your predict_probability() function ## YOUR CODE HERE predictions = ... # Compute indicator value for (y_i = +1) indicator = (sentiment==+1) # Compute the errors as indicator - predictions errors = indicator - predictions for j in xrange(len(coefficients)): # loop over each coefficient is_intercept = (j == 0) # Recall that feature_matrix[:,j] is the feature column associated with coefficients[j]. # Compute the derivative for coefficients[j]. Save it in a variable called derivative ## YOUR CODE HERE derivative = ... # add the step size times the derivative to the current coefficient ## YOUR CODE HERE ... # Checking whether log likelihood is increasing if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \ or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0: lp = compute_log_likelihood_with_L2(feature_matrix, sentiment, coefficients, l2_penalty) print 'iteration %*d: log likelihood of observed labels = %.8f' % \ (int(np.ceil(np.log10(max_iter))), itr, lp) return coefficients """ Explanation: Quiz question: Does the term with L2 regularization increase or decrease $\ell\ell(\mathbf{w})$? The logistic regression function looks almost like the one in the last assignment, with a minor modification to account for the L2 penalty. Fill in the code below to complete this modification. End of explanation """ # run with L2 = 0 coefficients_0_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-6, l2_penalty=0, max_iter=501) # run with L2 = 4 coefficients_4_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-6, l2_penalty=4, max_iter=501) # run with L2 = 10 coefficients_10_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-6, l2_penalty=10, max_iter=501) # run with L2 = 1e2 coefficients_1e2_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-6, l2_penalty=1e2, max_iter=501) # run with L2 = 1e3 coefficients_1e3_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-6, l2_penalty=1e3, max_iter=501) # run with L2 = 1e5 coefficients_1e5_penalty = logistic_regression_with_L2(feature_matrix_train, sentiment_train, initial_coefficients=np.zeros(194), step_size=5e-6, l2_penalty=1e5, max_iter=501) """ Explanation: Explore effects of L2 regularization Now that we have written up all the pieces needed for regularized logistic regression, let's explore the benefits of using L2 regularization in analyzing sentiment for product reviews. As iterations pass, the log likelihood should increase. Below, we train models with increasing amounts of regularization, starting with no L2 penalty, which is equivalent to our previous logistic regression implementation. End of explanation """ table = graphlab.SFrame({'word': ['(intercept)'] + important_words}) def add_coefficients_to_table(coefficients, column_name): table[column_name] = coefficients return table """ Explanation: Compare coefficients We now compare the coefficients for each of the models that were trained above. We will create a table of features and learned coefficients associated with each of the different L2 penalty values. Below is a simple helper function that will help us create this table. End of explanation """ add_coefficients_to_table(coefficients_0_penalty, 'coefficients [L2=0]') add_coefficients_to_table(coefficients_4_penalty, 'coefficients [L2=4]') add_coefficients_to_table(coefficients_10_penalty, 'coefficients [L2=10]') add_coefficients_to_table(coefficients_1e2_penalty, 'coefficients [L2=1e2]') add_coefficients_to_table(coefficients_1e3_penalty, 'coefficients [L2=1e3]') add_coefficients_to_table(coefficients_1e5_penalty, 'coefficients [L2=1e5]') """ Explanation: Now, let's run the function add_coefficients_to_table for each of the L2 penalty strengths. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = 10, 6 def make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list): cmap_positive = plt.get_cmap('Reds') cmap_negative = plt.get_cmap('Blues') xx = l2_penalty_list plt.plot(xx, [0.]*len(xx), '--', lw=1, color='k') table_positive_words = table.filter_by(column_name='word', values=positive_words) table_negative_words = table.filter_by(column_name='word', values=negative_words) del table_positive_words['word'] del table_negative_words['word'] for i in xrange(len(positive_words)): color = cmap_positive(0.8*((i+1)/(len(positive_words)*1.2)+0.15)) plt.plot(xx, table_positive_words[i:i+1].to_numpy().flatten(), '-', label=positive_words[i], linewidth=4.0, color=color) for i in xrange(len(negative_words)): color = cmap_negative(0.8*((i+1)/(len(negative_words)*1.2)+0.15)) plt.plot(xx, table_negative_words[i:i+1].to_numpy().flatten(), '-', label=negative_words[i], linewidth=4.0, color=color) plt.legend(loc='best', ncol=3, prop={'size':16}, columnspacing=0.5) plt.axis([1, 1e5, -1, 2]) plt.title('Coefficient path') plt.xlabel('L2 penalty ($\lambda$)') plt.ylabel('Coefficient value') plt.xscale('log') plt.rcParams.update({'font.size': 18}) plt.tight_layout() """ Explanation: Using the coefficients trained with L2 penalty 0, find the 5 most positive words (with largest positive coefficients). Save them to positive_words. Similarly, find the 5 most negative words (with largest negative coefficients) and save them to negative_words. Quiz Question. Which of the following is not listed in either positive_words or negative_words? Let us observe the effect of increasing L2 penalty on the 10 words just selected. We provide you with a utility function to plot the coefficient path. End of explanation """ make_coefficient_plot(table, positive_words, negative_words, l2_penalty_list=[0, 4, 10, 1e2, 1e3, 1e5]) """ Explanation: Run the following cell to generate the plot. Use the plot to answer the following quiz question. End of explanation """ def get_classification_accuracy(feature_matrix, sentiment, coefficients): scores = np.dot(feature_matrix, coefficients) apply_threshold = np.vectorize(lambda x: 1. if x > 0 else -1.) predictions = apply_threshold(scores) num_correct = (predictions == sentiment).sum() accuracy = num_correct / len(feature_matrix) return accuracy """ Explanation: Quiz Question: (True/False) All coefficients consistently get smaller in size as the L2 penalty is increased. Quiz Question: (True/False) The relative order of coefficients is preserved as the L2 penalty is increased. (For example, if the coefficient for 'cat' was more positive than that for 'dog', this remains true as the L2 penalty increases.) Measuring accuracy Now, let us compute the accuracy of the classifier model. Recall that the accuracy is given by $$ \mbox{accuracy} = \frac{\mbox{# correctly classified data points}}{\mbox{# total data points}} $$ Recall from lecture that that the class prediction is calculated using $$ \hat{y}_i = \left{ \begin{array}{ll} +1 & h(\mathbf{x}_i)^T\mathbf{w} > 0 \ -1 & h(\mathbf{x}_i)^T\mathbf{w} \leq 0 \ \end{array} \right. $$ Note: It is important to know that the model prediction code doesn't change even with the addition of an L2 penalty. The only thing that changes is the estimated coefficients used in this prediction. Based on the above, we will use the same code that was used in Module 3 assignment. End of explanation """ train_accuracy = {} train_accuracy[0] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_0_penalty) train_accuracy[4] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_4_penalty) train_accuracy[10] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_10_penalty) train_accuracy[1e2] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e2_penalty) train_accuracy[1e3] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e3_penalty) train_accuracy[1e5] = get_classification_accuracy(feature_matrix_train, sentiment_train, coefficients_1e5_penalty) validation_accuracy = {} validation_accuracy[0] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_0_penalty) validation_accuracy[4] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_4_penalty) validation_accuracy[10] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_10_penalty) validation_accuracy[1e2] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e2_penalty) validation_accuracy[1e3] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e3_penalty) validation_accuracy[1e5] = get_classification_accuracy(feature_matrix_valid, sentiment_valid, coefficients_1e5_penalty) # Build a simple report for key in sorted(validation_accuracy.keys()): print "L2 penalty = %g" % key print "train accuracy = %s, validation_accuracy = %s" % (train_accuracy[key], validation_accuracy[key]) print "--------------------------------------------------------------------------------" """ Explanation: Below, we compare the accuracy on the training data and validation data for all the models that were trained in this assignment. We first calculate the accuracy values and then build a simple report summarizing the performance for the various models. End of explanation """
chloeyangu/BigDataAnalytics
The Airbnb Scoop/Source Code/2. Data Preparation Part 1 (Listings).ipynb
mit
import pymongo from pymongo import MongoClient """ Explanation: From Command Line - Import CSV file (Raw Data) into MongoDB mongoimport --db airbnb --type csv --file listings_new.csv -c listings_new mongoimport --db airbnb --type csv --file barcelona_attractions.csv -c attractions End of explanation """ client = MongoClient('mongodb://localhost:27017/') """ Explanation: Connect Python to MongoDB End of explanation """ db = client.airbnb """ Explanation: Retrieve from Database Database named as "airbnb" End of explanation """ listings = db.listings_new attractions = db.attractions """ Explanation: Retrieve Tables from Database End of explanation """ import pandas as pd listings_df = pd.DataFrame(list(db.listings_new.find())) listings_df.head() listings_df.columns.values """ Explanation: Store data in a pandas dataframe for further analysis End of explanation """ listings_df = listings_df.convert_objects(convert_numeric=True) listings_df['price'] = listings_df['price'].str[1:] listings_df['price'] = listings_df.price.replace(',', '',regex=True) listings_df['price'] = listings_df.price.astype(float).fillna(0.0) listings_df['extra_people'] = listings_df['extra_people'].str[1:] listings_df['extra_people'] = listings_df.extra_people.replace(',', '',regex=True).replace('', '0',regex=True) listings_df['extra_people'] = listings_df.extra_people.astype(float).fillna(0.0) listings_df['weekly_price'] = listings_df['weekly_price'].str[1:] listings_df['weekly_price'] = listings_df.weekly_price.replace(',', '',regex=True).replace('', '0',regex=True) listings_df['weekly_price'] = listings_df.weekly_price.astype(float).fillna(0.0) listings_df['monthly_price'] = listings_df['monthly_price'].str[1:] listings_df['monthly_price'] = listings_df.monthly_price.replace(',', '',regex=True).replace('', '0',regex=True) listings_df['monthly_price'] = listings_df.monthly_price.astype(float).fillna(0.0) listings_df['security_deposit'] = listings_df['security_deposit'].str[1:] listings_df['security_deposit'] = listings_df.security_deposit.replace(',', '',regex=True).replace('', '0',regex=True) listings_df['security_deposit'] = listings_df.security_deposit.astype(float).fillna(0.0) listings_df['cleaning_fee'] = listings_df['cleaning_fee'].str[1:] listings_df['cleaning_fee'] = listings_df.cleaning_fee.replace(',', '',regex=True).replace('', '0',regex=True) listings_df['cleaning_fee'] = listings_df.cleaning_fee.astype(float).fillna(0.0) """ Explanation: Convert numeric variables End of explanation """ listings_df['amenities_split'] = listings_df["amenities"].apply(lambda x: x[1:-1].split(',')) #Get unique amenities unique_amenities = list(set(x for l in listings_df["amenities_split"] for x in l)) unique_amenities = unique_amenities[0:2] + unique_amenities[3:] unique_amenities num_col = len(unique_amenities) #number of columns data_array = [] for n in range(0, len(listings_df)): lst = [] for i in range (0, num_col): row = listings_df["amenities_split"][n] if unique_amenities[i] in row: lst.append(1) else: lst.append(0) data_array.append(lst) df = pd.DataFrame(data_array, columns=unique_amenities) listings_df2 = listings_df.join(df) listings_df2.head() """ Explanation: Convert Amenities to Dummy Variables End of explanation """ attractions = pd.DataFrame(list(db.attractions.find())) attractions.head() #Calculate distance between 2 lat long points #Returns distance in km def distance(lat1, long1, lat2, long2): from math import sin, cos, sqrt, atan2, radians # approximate radius of earth in km R = 6373.0 lat1 = radians(lat1) long1 = radians(long1) lat2 = radians(lat2) long2 = radians(long2) dlong = long2 - long1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlong / 2)**2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) distance = R * c return distance for n in range(0, len(listings_df2)): nearest_attr = attractions['attraction'][0] nearest_attr_rating = attractions['rating'][0] nearest_attr_lat = attractions['lat'][0] nearest_attr_long = attractions['long'][0] list_lat = listings_df2['latitude'][n] list_long = listings_df2['longitude'][n] #Distance from first attraction to listing dist_nearest = distance(list_lat, list_long, nearest_attr_lat, nearest_attr_long) for i in range(1, len(attractions)): attr_lat = attractions['lat'][i] attr_long = attractions['long'][i] dist = distance(list_lat, list_long, attr_lat, attr_long) if dist < dist_nearest: nearest_attr = attractions['attraction'][i] nearest_attr_rating = attractions['rating'][i] nearest_attr_lat = attractions['lat'][i] nearest_attr_long = attractions['long'][i] dist_nearest = dist listings_df2.loc[n, 'nearest_attr'] = nearest_attr listings_df2.loc[n, 'nearest_attr_rating'] = nearest_attr_rating listings_df2.loc[n, 'nearest_attr_lat'] = nearest_attr_lat listings_df2.loc[n, 'nearest_attr_long'] = nearest_attr_long listings_df2.loc[n, 'nearest_attr_dist'] = dist_nearest listings_df2.head() #listings_df2.to_csv("listings_31Mar.csv") """ Explanation: Combine Attractions Data End of explanation """
ComputationalModeling/spring-2017-danielak
past-semesters/fall_2016/day-by-day/day06-modeling-radioactivity-day1/radioactivity_modeling.ipynb
agpl-3.0
# put your code here! add additional cells if necessary. """ Explanation: Why is my banana glowing? (modeling a system that evolves in time) Student names Work in pairs, and put the names of both people in your group here! (If you're in a group of 3, just move your chairs so you can work together.) Learning Goals (Why are we asking you to do this?) Lots of systems (physical, biological, economic, social, etc...) evolve in time, and you have to figure out how to change things at the next time based on what happened before. Today, we're doing that using radioactivity and compare our models to some data. This type of modeling is going to make us think about: How to make a system move forward in time using loops How to work with numpy arrays to do interesting and useful things How to think about a model in relation to experimental data, and to decide if your model isn't quite good enough (ooh! foreshadowing!) Radioactivity <img src="https://whatisnuclear.com/articles/radioactive.png" alt="ooh, shiny!" style="width: 250px;"/> The word "radioactivity" conjures up many images - perhaps a nuclear reactor (or a nuclear reactor accident, such as Fukushima...), or perhaps MSU's very own National Superconducting Cylotron Laboratory, or NSCL. What the word "radioactivity" refers to is particles that are emitted from the nuclei of some atoms, which are unstable due to the fundamental properties of the nucleus. Depending on the nucleus, the particles emitted can be highly energetic photons, electrons, or 'alpha particles' (helium nuclei). Not all nuclei are radioactive; many elements, like normal hydrogen ($^1\mathrm{H}$) or carbon ($^{12}\mathrm{C}$), are extremely stable, and their nuclei do not spontaneously emit particles. However, isotopes of these elements are radioactive - $^3\mathrm{H}$, or tritium, is a hydrogen atom with two extra neutrons in the nucleus, and which has a "half-life" of 12.32 years, and $^{14}\mathrm{C}$, the isotope of carbon used to estimate the ages of old artifacts by archeologists using a technique called carbon dating, has a "half-life" of 5,730 years. The "half-life" is the time that it takes for half of a sample of that element to decay. In other words, if you start out with a number of atons of $^{14}\mathrm{C}$ equal to $N_0$, after some amount of time t you would have: $N(t) = N_0 2^{-\frac{t}{t_{1/2}}}$ left, where $t_{1/2}$ in this equation is the half-life of $^{14}\mathrm{C}$, 5,730 years. Radioactive isotopes have a tremendous range of half-lives, with values measured from around $10^{-23}$ seconds to more than $10^{22}$ years! The study of these isotopes - which ones can and can't decay, how long they take to decay, and what they produce when they do decay - can tell you a tremendous amount about the basic properties of matter (take a tour of the NSCL some time and you'll hear all about it!) Also, it's worth noting that radioactivity is naturally occurring, and plenty of things that you interact with in everyday life are a little bit radioactive. A relatively common isotope of potassium is radioactive, so anything that contains potassium - like bananas! - creates detectable amounts of radioactivity. There are also radioactive isotopes in granite (which is used to make floors, counter tops, and lots of other things), cigarettes, the clay in kitty litter, and a lot of types of old pottery and glassware. A mysterious stranger... and an even more mysterious substance You happen to be wandering down a dark alley just north of campus after the bars close on a Friday. A mysterious man gives you a chunk of some material of unknown properties. He tells you that it is "kinda radioactive, maybe...", but that it's very late and he just woke up in this alley. You can't help but notice that it's warm to the touch, and it has a lovely green glow. By the time you look up, he has disappeared in a puff of smoke. You clearly can't give the sample back to him, but don't think it's safe to leave it in the alley. For the moment, let's assume that it has a half-life $t_{s}$ (which you'll determine later) and you have 1 kg of it. First, work out some things on your group's whiteboard: How much of a sample of material should be remaining after T seconds? How many decays should there be per second at that time? Hint 1: $\frac{d}{dx} 2^x = 2^x \mathrm{ln}(2)$, and don't forget the chain rule! Hint 2: What's the relationship between $\frac{dN}{dt}$ and the measured number of decays per second? Check with an instructor before you move on! Next: make two plots, one with the amount of time that should be remaining as a function of time, and a second with the expected decays per second as a function of time! End of explanation """ # this block of code reads in the data files. Don't worry too much about how they # work right now -- we'll talk about that in a couple of weeks weeks! import numpy as np ''' count_times = the time since the start of data-taking when the data was taken (in seconds) count_rate = the number of counts since the last time data was taken, at the time in count_times ''' count_times = np.loadtxt("count_rates.txt", dtype=int)[0] count_rates = np.loadtxt("count_rates.txt", dtype=int)[1] ''' sample_times = the time since the start of data-taking when the sample was measured (in seconds) sample_amounts = the number of atoms left of the mysterious material at the time in sample_times ''' sample_times = np.loadtxt("sample_amount.txt", dtype=int)[0] sample_amounts = np.loadtxt("sample_amount.txt", dtype=int)[1] """ Explanation: When you get home, you ask your roommate, who conveniently happens to work in a nuclear chemistry lab, to measure some of the properties of this odd material that you've been given. When she comes home that night, she says that she managed to purify the sample and measure its radioactive decay rate (which is to say, the number of decays over some period of time) and the total amount of stuff as a function of time. Since you have to use two different machines to do that, it's in two different files. She also mentions that she didn't have time to do any more because somebody else in the lab had forgotten to clean out the two machines, so she had to do a quick cleanup job before making the measurements. End of explanation """ # put your code here! add additional cells if necessary. """ Explanation: Using the four numpy arrays created in the cell above, plot the measured count rates as a function of time and, on a separate plot, plot the measured sample amounts as a function of time. What do you notice about these plots, compared to the ones from your analytic estimate? Also, if you inspect the data, approximately what is the initial amount of sample and the half-life? (Hint: when you plot, make sure you plot points rather than a line!) End of explanation """ # put your code here! add additional cells if necessary. """ Explanation: Next, using just the count rates and the measured initial amount of the substance (i.e., just the array count_rates and the first value in sample_amounts), estimate what the amount of sample should be as a function of time. Make a plot of this. (Note: you should work your model out on the whiteboard first!) End of explanation """ # put your code here! add additional cells if necessary. """ Explanation: Based on the plot in the previous cell, what do you think is going on? put your answer here! How might you modify your model to emulate this behavior? In other words, how might you modify the equation for decay rate to get something that matches the observed decay rate? put your answer here! More complex data manipulation and visualization We're now going to try to do something to the decay rate data that you worked with above, to try to get a more accurate understanding of the decay rate and whatever is adding confusion to your modeling. What you're going to do is: "Smooth" the decay rate data over multiple adjacent samples in time to get rid of some of the noise. Try writing a piece of code to loop over the array of data and average the sample you're interested in along with the N samples on either side (i.e., from element i-N to i+N, for an arbitrary number of cells). Store this smoothed data in a new array (perhaps using np.zeros_like() to create the new array?). Plot your smoothed data on top of the noisy data to ensure that it agrees. Create a new array with an analytic equation that describes for the decay rate as a function of time, taking into account what you're seeing in point (2), and try to find the values of the various constants in the equation. Plot the new array on top of the raw data and smoothed values. Does this expression, and the constants that you decided on, give reasonable results? End of explanation """ from IPython.display import HTML HTML( """ <iframe src="https://goo.gl/forms/F1MvFMDpIWPScchr2?embedded=true" width="80%" height="1200px" frameborder="0" marginheight="0" marginwidth="0"> Loading... </iframe> """ ) """ Explanation: Feedback on this assignment Please fill out the form that appears when you run the code below. You must completely fill this out in order for your group to receive credit for the assignment! End of explanation """
sofmonk/aima-python
csp.ipynb
mit
from csp import * """ Explanation: Constraint Satisfaction Problems (CSPs) This IPy notebook acts as supporting material for topics covered in Chapter 6 Constraint Satisfaction Problems of the book Artificial Intelligence: A Modern Approach. We make use of the implementations in csp.py module. Even though this notebook includes a brief summary of the main topics familiarity with the material present in the book is expected. We will look at some visualizations and solve some of the CSP problems described in the book. Let us import everything from the csp module to get started. End of explanation """ %psource CSP """ Explanation: Review CSPs are a special kind of search problems. Here we don't treat the space as a black box but the state has a particular form and we use that to our advantage to tweak our algorithms to be more suited to the problems. A CSP State is defined by a set of variables which can take values from corresponding domains. These variables can take only certain values in their domains to satisfy the constraints. A set of assignments which satisfies all constraints passes the goal test. Let us start by exploring the CSP class which we will use to model our CSPs. You can keep the popup open and read the main page to get a better idea of the code. End of explanation """ s = UniversalDict(['R','G','B']) s[5] """ Explanation: The _ init _ method parameters specify the CSP. Variable can be passed as a list of strings or integers. Domains are passed as dict where key specify the variables and value specify the domains. The variables are passed as an empty list. Variables are extracted from the keys of the domain dictionary. Neighbor is a dict of variables that essentially describes the constraint graph. Here each variable key has a list its value which are the variables that are constraint along with it. The constraint parameter should be a function f(A, a, B, b) that returns true if neighbors A, B satisfy the constraint when they have values A=a, B=b. We have additional parameters like nassings which is incremented each time an assignment is made when calling the assign method. You can read more about the methods and parameters in the class doc string. We will talk more about them as we encounter their use. Let us jump to an example. Graph Coloring We use the graph coloring problem as our running example for demonstrating the different algorithms in the csp module. The idea of map coloring problem is that the adjacent nodes (those connected by edges) should not have the same color throughout the graph. The graph can be colored using a fixed number of colors. Here each node is a variable and the values are the colors that can be assigned to them. Given that the domain will be the same for all our nodes we use a custom dict defined by the UniversalDict class. The UniversalDict Class takes in a parameter which it returns as value for all the keys of the dict. It is very similar to defaultdict in Python except that it does not support item assignment. End of explanation """ %psource different_values_constraint """ Explanation: For our CSP we also need to define a constraint function f(A, a, B, b). In this what we need is that the neighbors must not have the same color. This is defined in the function different_values_constraint of the module. End of explanation """ %pdoc parse_neighbors """ Explanation: The CSP class takes neighbors in the form of a Dict. The module specifies a simple helper function named parse_neighbors which allows to take input in the form of strings and return a Dict of the form compatible with the CSP Class. End of explanation """ %psource MapColoringCSP australia, usa, france """ Explanation: The MapColoringCSP function creates and returns a CSP with the above constraint function and states. The variables our the keys of the neighbors dict and the constraint is the one specified by the different_values_constratint function. australia, usa and france are three CSPs that have been created using MapColoringCSP. australia corresponds to Figure 6.1 in the book. End of explanation """ %psource queen_constraint """ Explanation: NQueens The N-queens puzzle is the problem of placing N chess queens on a N×N chessboard so that no two queens threaten each other. Here N is a natural number. Like the graph coloring, problem NQueens is also implemented in the csp module. The NQueensCSP class inherits from the CSP class. It makes some modifications in the methods to suit the particular problem. The queens are assumed to be placed one per column, from left to right. That means position (x, y) represents (var, val) in the CSP. The constraint that needs to be passed on the CSP is defined in the queen_constraint function. The constraint is satisfied (true) if A, B are really the same variable, or if they are not in the same row, down diagonal, or up diagonal. End of explanation """ %psource NQueensCSP """ Explanation: The NQueensCSP method implements methods that support solving the problem via min_conflicts which is one of the techniques for solving CSPs. Because min_conflicts hill climbs the number of conflicts to solve the CSP assign and unassign are modified to record conflicts. More details about the structures rows, downs, ups which help in recording conflicts are explained in the docstring. End of explanation """ eight_queens = NQueensCSP(8) """ Explanation: The _ init _ method takes only one parameter n the size of the problem. To create an instance we just pass the required n into the constructor. End of explanation """ import copy class InstruCSP(CSP): def __init__(self, variables, domains, neighbors, constraints): super().__init__(variables, domains, neighbors, constraints) self.assignment_history = [] def assign(self, var, val, assignment): super().assign(var,val, assignment) self.assignment_history.append(copy.deepcopy(assignment)) def unassign(self, var, assignment): super().unassign(var,assignment) self.assignment_history.append(copy.deepcopy(assignment)) """ Explanation: Helper Functions We will now implement a few helper functions that will help us visualize the Coloring Problem. We will make some modifications to the existing Classes and Functions for additional book keeping. To begin we modify the assign and unassign methods in the CSP to add a copy of the assignment to the assignment_history. We call this new class InstruCSP. This will allow us to see how the assignment evolves over time. End of explanation """ def make_instru(csp): return InstruCSP(csp.variables, csp.domains, csp.neighbors, csp.constraints) """ Explanation: Next, we define make_instru which takes an instance of CSP and returns a InstruCSP instance. End of explanation """ neighbors = { 0: [6, 11, 15, 18, 4, 11, 6, 15, 18, 4], 1: [12, 12, 14, 14], 2: [17, 6, 11, 6, 11, 10, 17, 14, 10, 14], 3: [20, 8, 19, 12, 20, 19, 8, 12], 4: [11, 0, 18, 5, 18, 5, 11, 0], 5: [4, 4], 6: [8, 15, 0, 11, 2, 14, 8, 11, 15, 2, 0, 14], 7: [13, 16, 13, 16], 8: [19, 15, 6, 14, 12, 3, 6, 15, 19, 12, 3, 14], 9: [20, 15, 19, 16, 15, 19, 20, 16], 10: [17, 11, 2, 11, 17, 2], 11: [6, 0, 4, 10, 2, 6, 2, 0, 10, 4], 12: [8, 3, 8, 14, 1, 3, 1, 14], 13: [7, 15, 18, 15, 16, 7, 18, 16], 14: [8, 6, 2, 12, 1, 8, 6, 2, 1, 12], 15: [8, 6, 16, 13, 18, 0, 6, 8, 19, 9, 0, 19, 13, 18, 9, 16], 16: [7, 15, 13, 9, 7, 13, 15, 9], 17: [10, 2, 2, 10], 18: [15, 0, 13, 4, 0, 15, 13, 4], 19: [20, 8, 15, 9, 15, 8, 3, 20, 3, 9], 20: [3, 19, 9, 19, 3, 9] } """ Explanation: We will now use a graph defined as a dictonary for plotting purposes in our Graph Coloring Problem. The keys are the nodes and their corresponding values are the nodes they are connected to. End of explanation """ coloring_problem = MapColoringCSP('RGBY', neighbors) coloring_problem1 = make_instru(coloring_problem) """ Explanation: Now we are ready to create an InstruCSP instance for our problem. We are doing this for an instance of MapColoringProblem class which inherits from the CSP Class. This means that our make_instru function will work perfectly for it. End of explanation """ result = backtracking_search(coloring_problem1) result # A dictonary of assignments. """ Explanation: Backtracking Search For solving a CSP the main issue with Naive search algorithms is that they can continue expanding obviously wrong paths. In backtracking search, we check constraints as we go. Backtracking is just the above idea combined with the fact that we are dealing with one variable at a time. Backtracking Search is implemented in the repository as the function backtracking_search. This is the same as Figure 6.5 in the book. The function takes as input a CSP and few other optional parameters which can be used to further speed it up. The function returns the correct assignment if it satisfies the goal. We will discuss these later. Let us solve our coloring_problem1 with backtracking_search. End of explanation """ coloring_problem1.nassigns """ Explanation: Let us also check the number of assignments made. End of explanation """ len(coloring_problem1.assignment_history) """ Explanation: Now let us check the total number of assignments and unassignments which is the length ofour assignment history. End of explanation """ %psource mrv %psource num_legal_values %psource CSP.nconflicts """ Explanation: Now let us explore the optional keyword arguments that the backtracking_search function takes. These optional arguments help speed up the assignment further. Along with these, we will also point out to methods in the CSP class that help make this work. The first of these is select_unassigned_variable. It takes in a function that helps in deciding the order in which variables will be selected for assignment. We use a heuristic called Most Restricted Variable which is implemented by the function mrv. The idea behind mrv is to choose the variable with the fewest legal values left in its domain. The intuition behind selecting the mrv or the most constrained variable is that it allows us to encounter failure quickly before going too deep into a tree if we have selected a wrong step before. The mrv implementation makes use of another function num_legal_values to sort out the variables by a number of legal values left in its domain. This function, in turn, calls the nconflicts method of the CSP to return such values. End of explanation """ %psource lcv """ Explanation: Another ordering related parameter order_domain_values governs the value ordering. Here we select the Least Constraining Value which is implemented by the function lcv. The idea is to select the value which rules out the fewest values in the remaining variables. The intuition behind selecting the lcv is that it leaves a lot of freedom to assign values later. The idea behind selecting the mrc and lcv makes sense because we need to do all variables but for values, we might better try the ones that are likely. So for vars, we face the hard ones first. End of explanation """ solve_simple = copy.deepcopy(usa) solve_parameters = copy.deepcopy(usa) backtracking_search(solve_simple) backtracking_search(solve_parameters, order_domain_values=lcv, select_unassigned_variable=mrv, inference=mac ) solve_simple.nassigns solve_parameters.nassigns """ Explanation: Finally, the third parameter inference can make use of one of the two techniques called Arc Consistency or Forward Checking. The details of these methods can be found in the Section 6.3.2 of the book. In short the idea of inference is to detect the possible failure before it occurs and to look ahead to not make mistakes. mac and forward_checking implement these two techniques. The CSP methods support_pruning, suppose, prune, choices, infer_assignment and restore help in using these techniques. You can know more about these by looking up the source code. Now let us compare the performance with these parameters enabled vs the default parameters. We will use the Graph Coloring problem instance usa for comparison. We will call the instances solve_simple and solve_parameters and solve them using backtracking and compare the number of assignments. End of explanation """ %matplotlib inline import networkx as nx import matplotlib.pyplot as plt import matplotlib import time """ Explanation: Graph Coloring Visualization Next, we define some functions to create the visualisation from the assignment_history of coloring_problem1. The reader need not concern himself with the code that immediately follows as it is the usage of Matplotib with IPython Widgets. If you are interested in reading more about these visit ipywidgets.readthedocs.io. We will be using the networkx library to generate graphs. These graphs can be treated as the graph that needs to be colored or as a constraint graph for this problem. If interested you can read a dead simple tutorial here. We start by importing the necessary libraries and initializing matplotlib inline. End of explanation """ def make_update_step_function(graph, instru_csp): def draw_graph(graph): # create networkx graph G=nx.Graph(graph) # draw graph pos = nx.spring_layout(G,k=0.15) return (G, pos) G, pos = draw_graph(graph) def update_step(iteration): # here iteration is the index of the assignment_history we want to visualize. current = instru_csp.assignment_history[iteration] # We convert the particular assignment to a default dict so that the color for nodes which # have not been assigned defaults to black. current = defaultdict(lambda: 'Black', current) # Now we use colors in the list and default to black otherwise. colors = [current[node] for node in G.node.keys()] # Finally drawing the nodes. nx.draw(G, pos, node_color=colors, node_size=500) labels = {label:label for label in G.node} # Labels shifted by offset so as to not overlap nodes. label_pos = {key:[value[0], value[1]+0.03] for key, value in pos.items()} nx.draw_networkx_labels(G, label_pos, labels, font_size=20) # show graph plt.show() return update_step # <-- this is a function def make_visualize(slider): ''' Takes an input a slider and returns callback function for timer and animation ''' def visualize_callback(Visualize, time_step): if Visualize is True: for i in range(slider.min, slider.max + 1): slider.value = i time.sleep(float(time_step)) return visualize_callback """ Explanation: The ipython widgets we will be using require the plots in the form of a step function such that there is a graph corresponding to each value. We define the make_update_step_function which return such a function. It takes in as inputs the neighbors/graph along with an instance of the InstruCSP. This will be more clear with the example below. If this sounds confusing do not worry this is not the part of the core material and our only goal is to help you visualize how the process works. End of explanation """ step_func = make_update_step_function(neighbors, coloring_problem1) """ Explanation: Finally let us plot our problem. We first use the function above to obtain a step function. End of explanation """ matplotlib.rcParams['figure.figsize'] = (18.0, 18.0) """ Explanation: Next we set the canvas size. End of explanation """ import ipywidgets as widgets from IPython.display import display iteration_slider = widgets.IntSlider(min=0, max=len(coloring_problem1.assignment_history)-1, step=1, value=0) w=widgets.interactive(step_func,iteration=iteration_slider) display(w) visualize_callback = make_visualize(iteration_slider) visualize_button = widgets.ToggleButton(desctiption = "Visualize", value = False) time_select = widgets.ToggleButtons(description='Extra Delay:',options=['0', '0.1', '0.2', '0.5', '0.7', '1.0']) a = widgets.interactive(visualize_callback, Visualize = visualize_button, time_step=time_select) display(a) """ Explanation: Finally our plot using ipywidget slider and matplotib. You can move the slider to experiment and see the coloring change. It is also possible to move the slider using arrow keys or to jump to the value by directly editing the number with a double click. The Visualize Button will automatically animate the slider for you. The Extra Delay Box allows you to set time delay in seconds upto one second for each time step. End of explanation """ def label_queen_conflicts(assignment,grid): ''' Mark grid with queens that are under conflict. ''' for col, row in assignment.items(): # check each queen for conflict row_conflicts = {temp_col:temp_row for temp_col,temp_row in assignment.items() if temp_row == row and temp_col != col} up_conflicts = {temp_col:temp_row for temp_col,temp_row in assignment.items() if temp_row+temp_col == row+col and temp_col != col} down_conflicts = {temp_col:temp_row for temp_col,temp_row in assignment.items() if temp_row-temp_col == row-col and temp_col != col} # Now marking the grid. for col, row in row_conflicts.items(): grid[col][row] = 3 for col, row in up_conflicts.items(): grid[col][row] = 3 for col, row in down_conflicts.items(): grid[col][row] = 3 return grid def make_plot_board_step_function(instru_csp): '''ipywidgets interactive function supports single parameter as input. This function creates and return such a function by taking in input other parameters. ''' n = len(instru_csp.variables) def plot_board_step(iteration): ''' Add Queens to the Board.''' data = instru_csp.assignment_history[iteration] grid = [[(col+row+1)%2 for col in range(n)] for row in range(n)] grid = label_queen_conflicts(data, grid) # Update grid with conflict labels. # color map of fixed colors cmap = matplotlib.colors.ListedColormap(['white','lightsteelblue','red']) bounds=[0,1,2,3] # 0 for white 1 for black 2 onwards for conflict labels (red). norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) fig = plt.imshow(grid, interpolation='nearest', cmap = cmap,norm=norm) plt.axis('off') fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False) # Place the Queens Unicode Symbol for col, row in data.items(): fig.axes.text(row, col, u"\u265B", va='center', ha='center', family='Dejavu Sans', fontsize=32) plt.show() return plot_board_step """ Explanation: NQueens Visualization Just like the Graph Coloring Problem we will start with defining a few helper functions to help us visualize the assignments as they evolve over time. The make_plot_board_step_function behaves similar to the make_update_step_function introduced earlier. It initializes a chess board in the form of a 2D grid with alternating 0s and 1s. This is used by plot_board_step function which draws the board using matplotlib and adds queens to it. This function also calls the label_queen_conflicts which modifies the grid placing 3 in positions in a position where there is a conflict. End of explanation """ twelve_queens_csp = NQueensCSP(12) backtracking_instru_queen = make_instru(twelve_queens_csp) result = backtracking_search(backtracking_instru_queen) backtrack_queen_step = make_plot_board_step_function(backtracking_instru_queen) # Step Function for Widgets """ Explanation: Now let us visualize a solution obtained via backtracking. We use of the previosuly defined make_instru function for keeping a history of steps. End of explanation """ matplotlib.rcParams['figure.figsize'] = (8.0, 8.0) matplotlib.rcParams['font.family'].append(u'Dejavu Sans') iteration_slider = widgets.IntSlider(min=0, max=len(backtracking_instru_queen.assignment_history)-1, step=0, value=0) w=widgets.interactive(backtrack_queen_step,iteration=iteration_slider) display(w) visualize_callback = make_visualize(iteration_slider) visualize_button = widgets.ToggleButton(desctiption = "Visualize", value = False) time_select = widgets.ToggleButtons(description='Extra Delay:',options=['0', '0.1', '0.2', '0.5', '0.7', '1.0']) a = widgets.interactive(visualize_callback, Visualize = visualize_button, time_step=time_select) display(a) """ Explanation: Now finally we set some matplotlib parameters to adjust how our plot will look. The font is necessary because the Black Queen Unicode character is not a part of all fonts. You can move the slider to experiment and observe the how queens are assigned. It is also possible to move the slider using arrow keys or to jump to the value by directly editing the number with a double click.The Visualize Button will automatically animate the slider for you. The Extra Delay Box allows you to set time delay in seconds upto one second for each time step. End of explanation """ conflicts_instru_queen = make_instru(twelve_queens_csp) result = min_conflicts(conflicts_instru_queen) conflicts_step = make_plot_board_step_function(conflicts_instru_queen) """ Explanation: Now let us finally repeat the above steps for min_conflicts solution. End of explanation """ iteration_slider = widgets.IntSlider(min=0, max=len(conflicts_instru_queen.assignment_history)-1, step=0, value=0) w=widgets.interactive(conflicts_step,iteration=iteration_slider) display(w) visualize_callback = make_visualize(iteration_slider) visualize_button = widgets.ToggleButton(desctiption = "Visualize", value = False) time_select = widgets.ToggleButtons(description='Extra Delay:',options=['0', '0.1', '0.2', '0.5', '0.7', '1.0']) a = widgets.interactive(visualize_callback, Visualize = visualize_button, time_step=time_select) display(a) """ Explanation: The visualization has same features as the above. But here it also highlights the conflicts by labeling the conflicted queens with a red background. End of explanation """
sintefmath/Splipy
doc/Tutorial/Basic manipulation.ipynb
gpl-3.0
import splipy as sp import numpy as np import matplotlib.pyplot as plt import splipy.curve_factory as curve_factory """ Explanation: Basic Manipulation Splipy implements all affine transformations like translate (move), rotate, scale etc. These should be available as operators where this makes sense. To start, we need to import the libraries we are going to use first End of explanation """ crv = curve_factory.n_gon(6) # create a sample curve t0 = crv.start() # parametric starting point t1 = crv.end() # parametric end point t = np.linspace(t0, t1, 361) # uniform grid of 361 evaluation points on the parametric domain x = crv(t) plt.plot(x[:,0], x[:,1]) # plot curve crv.rotate(10.0/360*2*np.pi) # rotate by 10 degrees (input is in radians) x = crv(t) plt.plot(x[:,0], x[:,1], 'r-') # plot curve (in red) plt.axis('equal') plt.show() """ Explanation: Rotate End of explanation """ crv = curve_factory.n_gon(6) # create a sample curve t0 = crv.start() # parametric starting point t1 = crv.end() # parametric end point t = np.linspace(t0, t1, 361) # uniform grid of 361 evaluation points on the parametric domain x = crv(t) plt.plot(x[:,0], x[:,1]) # plot curve dx = [0.1, 0.1] # translation amount crv.translate(dx) # move the object by 'dx' x = crv(t) plt.plot(x[:,0], x[:,1], 'r-') # plot curve (in red) plt.axis('equal') plt.show() """ Explanation: Translate End of explanation """ crv.translate([1, 2]) # moves object 1 in x-direction, 2 in y-direction crv += [1,2] # does the exact same thing crv = crv + [1,2] # same thing crv_2 = crv + [1,2] # creates a new object crv_2 which is the translated version of crv crv += (1,2) # translation vector only needs to be array-like (any indexable input will work) """ Explanation: Note that translate can also be applied as an operator End of explanation """ crv = curve_factory.n_gon(6) # create a sample curve t0 = crv.start() # parametric starting point t1 = crv.end() # parametric end point t = np.linspace(t0, t1, 361) # uniform grid of 361 evaluation points on the parametric domain x = crv(t) plt.plot(x[:,0], x[:,1]) # plot curve crv.scale(1.5) # scales the object by a factor of 150% x = crv(t) plt.plot(x[:,0], x[:,1], 'r-') # plot curve (in red) plt.axis('equal') plt.show() """ Explanation: Scaling Note that scaling is done in relation to the origin. Depending on your use, you might want to center the object around the origin before scaling. End of explanation """ crv.scale(1.5) crv *= 1.5 # does the exact same thing crv = crv * 1.5 # same thing crv_2 = crv * 1.5 # keeps crv unchanged, returns a new object crv_2 which is the scaled version of crv crv *= (2,1) # doubles the size in x-direction, while leaving the size in y-direction unchanged """ Explanation: Scaling is also available as operators End of explanation """ curve = curve_factory.n_gon(6) # for a slightly more inefficient translation operations, we may manipulate the controlpoints one-by-one for controlpoint in curve: controlpoint += [1,0] # alternative way of iterating over the controlpoints of a spline object for i in range(len(curve)): curve[i] += [1,0] print(curve) curve[0] += [1,0] # this will move the first controlpoint one unit in the x-direction curve[0,0] += 1 # exact same thing (now moved a total of two) print(curve) """ Explanation: Control-point manipulation For special case manipulation, it is possible to manipulate the controlpoints directly End of explanation """
jhconning/Dev-II
notebooks/SFM.ipynb
bsd-3-clause
ppf(Tbar=100, Kbar=100, Lbar=400) """ Explanation: The Specific Factors or Ricardo-Viner Model Background The SF model is a workhorse model in trade, growth, political economy and development. We will see variants of the model used to describe rural to urban migration, the Lewis model and other dual sector models of sectoral misallocation, models such as the Harris-Todaro model that explain migration and the urban informal sector. The specific factors model predicts that, in the absence of political redistribution mechanisms, specific factors in declining sectors will organize strong opposition to policies that might otherwise raise growth. The initial distribution of factor endowments may therefore make a big difference in terms of what types of political coalitions mobilize for and against different policies. This is the basic driving force in Moav-Galor's (2006) growth model on why some regions made public investments in human capital which sped the transition from agriculture to manufacturing and enhanced growth, whereas similar policies were delayed in other regions where political/economic resistance was stronger, for instance where landlords had stronger voice in political decisions. These are just a few of the applications. The model is relatively easy to analyze -- it can be described compactly in terms of diagrams and yet is very rich in predictions. The Specific Factors (SF) or Ricardo-Viner model is a close relative of the Hecksher-Ohlin-Samuelson (HOS) neoclassical trade model. The 2x2 HOS model assumes production in each of two sectors takes place by firms using constant returns to scale technologies with capital and labor as inputs and that both capital and labor are mobile across sectors. In the SF model only labor is inter-sectorally mobile and the fixed amounts of capital become 'specific' to the sector they are trapped within. In effect the SF model therefore consists of three factors of production: mobile labor and two types of capital, one specific to each sector. Let's label the two sectors are Agriculture and Manufacturing. In agriculture competitive firms bid to hire land and labor. In manufacturing competitive firms bid to hire capital and labor. Each factor of production will be competitively priced in equilibrium but only labor is priced on a national labor market. The SF model is often described as a short-run version of the HOS model. For example suppose we start with a HOS model equilibrium where labor wage and rental rate of capital have equalized across sectors (which also implies marginal products are equalized across sectors -- no productivity differences across sectors). Now suppose that the relative price of manufacturing products suddenly rises (due to a change of world price, or government trade protection or other policies that favor manufacturing). The higher relative product price should lead firms in the booming manufacturing sector to demand both more capital and labor. Correspondingly, demand for land and labor decline in the agricultural sector. In the short run however only labor can be move from agriculture to manufacturing. Agricultural workers can become factory workers but agricultural capital (say 'land' or 'tractors') cannot be easily converted to manufacturing capital (say 'weaving machines'). So labor moves from manufacturing to the agriculture, lowering the capital labor ratio in manufacturing and raising the land to labor ratio in agriculture. The model thus predicts a surge in the real return to capital in the expanding sector and a real decline in the real return to capital in agriculture (land). Hence the measured average and marginal product of capital will now diverge across sectors. What happens to the real wage is more ambiguous: it rises measured in terms of purchasing power over agricultural goods but falls in terms of purchasing power over manufacturing goods. Whether workers are better off or worse off following this price/policy change therefore comes down to how important agricultural and manufacturing goods are in their consumption basket. This result is labeled the neo-classical ambiguity. Over the longer-term weaving machines cannot be transformed into tractors but over time new capital accumulation will build tractors and old weaving machines can be sold overseas or as scrap metal. Hence over time more capital will arrive into the manufacturing sector and leave the agricultural sector, whcih in turn will lead to even more movement of labor to manufacturing. If this process continues capital has, in effect become mobile over time, and we end up getting closer to the predictions of the HOS model. Technology and Endowments There are two sectors Agriculture and Manufacturing. Production in each sector takes place with a linear homogenous (constant returns to scale) production functions. Agricultural production requires land which is non-mobile or specific to the sector and mobile labor. $$Q_a = F(\bar T, L_a)$$ Manufacturing production requires specific capital $K$ and mobile labor. $$Q_m = G(\bar K, L_m)$$ The quantity of land in the agricultural sector and the quantity of capital in the manufacturing sector are in fixed supply during the period of analysis. That means that firms within the agricultural (manufacturing) sector may compete with one another for the limited supply of the factor, but no new land (capital) can be supplied in the short-run period of analysis. This of course means that the price of the factor will rise (or fall) quickly in response to swings in factor demand compared to the wage of labor whose supply is more elastic. The market for mobile labor is competitive and the market clears at a wage where the sum of labor demands from each sector equals total labor supply. While the total labor supply in the economy is inelastic, the supply of labor to each sector will be elastic, since a rise in the wage in one sector will attract workers from the other sector. $$L_a + L_m = \bar{L}$$ Notice that we can invert the two production function to get minimum labor requirement functions $L_a(Q_a)$ and $L_m(Q_m)$ which tell us the minimum amount of labor $L_i$ required in sector $i$ to produce quantity $Q_i$. If we take these expressions and substitute them into the labor resource constraint we get an expression for the production possibility frontier (PPF) which summarizes the tradeoffs between sectors. Assumptions and parameters for visualizations Let's get concrete and assume each sector employs a CRS Cobb-Douglas production function: $$F(\bar T, L_a)=\bar T^{1-\alpha} \cdot L_a^\alpha$$ $$G(\bar K, L_m)=\bar K^{1-\beta} \cdot L_m^\beta$$ If $\alpha = \beta = \frac{1}{2}$ and $\bar T= \bar K = 100$. Then $$Q_a = \sqrt{\bar T} \sqrt{L_a} $$ $$Q_m = \sqrt{\bar K} \sqrt{L_m}$$ Substituting these into the labor resource constraint yields: $$\frac{Q_a^2}{\bar T}+\frac{Q_m^2}{\bar K} = \bar L$$ or $$Q_m = \sqrt{\bar K \bar L - \frac{\bar K Q_a^2}{\bar T} } $$ If we make the further assumption that $\bar K = \bar L = 100$ and $\bar L = 400$ then the PPF would look like this: NOTE: If you are running this as a live jupyter notebook please first go to the code section below and execute all the code cells there. Then return and run the code cells that follow sequentially. End of explanation """ sfmplot(p=1) """ Explanation: Labor market equilibrium Profit maximizing firms in each sector will hire labor up to the point where the marginal value product of labor (MVPL) equals the market wage. Since labor is mobile across sectors in equilibrium workers must be paid the same nominal wage $w$ in either sector: $$ P_a \cdot MPL_a(\bar T, L_a) = w = P_m \cdot MPL_m(\bar K, L_m) $$ It will be useful to express the wage in real terms. Divide each expression above by $P_m$ to get $$ p \cdot MPL_a = \frac{w}{p_m} = MPL_m $$ $$\text{ where } p = \frac{P_a}{P_m}$$ In the plots below we will place the real wage measured in terms of manufactured goods on the vertical axis of the labor demand and supply diagrams. Labor allocation across sectors In a competitive market firms take the market prices of goods $p$ and wages $w$ as given. Since in equilibrium the labor market clears we can write $L_m = \bar L - L_a$ and substitute into the equilibrium condition above to get: $$p \cdot MPL_a(L_A) = w = MPL_m(\bar L-L_A) $$ The left hand side $p \cdot MPL_a(L_A)=w$ can be solved to give us demand for labor in the agricultural sector as a function of the wage $L_a(w/p)$ and is plotted below. The right hand side $w = MPL_m(\bar L-L_a)$ gives us demand for labor in the manufacturing sector $L_m$ as a function of the real wage but this in turn also gives us the supply of labor to the agricultural sector $L_a^s = \bar L - L_m(w/p)$ since firms in the agricultural sector can only attract workers to their sector by paying those workers just a bit above what they would be paid for the jobs they have to leave in the manufacturing sector. The diagram below can therefore be interpreted as showing labor demand and supply in the agricultural sector and their intersection gives us the equilibrium real wage (measured on the vertical in terms of purchasing power over manufactured goods). End of explanation """ p_autarky(Lbar, Tbar, Kbar) """ Explanation: We can solve for the equilibrium labor allocation $L_a$ as a function of the relative price $p$, and this in turn also gives us the equilibrium real wage. When $\alpha=\beta = \frac{1}{2}$ the equilibrium condition $p \cdot MPL_a(L_a) = MPL_m(\bar L-L_a)$ becomes: $$p \cdot \frac{\sqrt{\bar T}}{\sqrt{L_a}} = \frac{\sqrt{\bar K}}{\sqrt{\bar L - L_a}}$$ which we solve to find: $$L_a^e = \frac{p^2 \bar T \bar L}{\bar K - p^2 \bar T}$$ For cases where $\alpha \ne \beta$ we find the optimal $L_a$ that solves this equilibrium condition numerically. Autarky prices Thus far we have a pretty complete model of how production allocations and real wages would be determined in a small open economy where producers face world price ratio $p$. we explore comparative statics in this economy in more detail below. If however the economy is inititally in autarky or closed to the world then we must also consider the role of domestic consumer preferences in the determination of domestic equilibrium product prices. With Cobb-Douglas preferences $u(x,y) = \gamma \ln(x) + (1-\gamma) \ln(y)$ consumers demands can be written as a function of relative price $p=\frac{P_a}{P_m}$. Setting $P_m=1$ to make manufacturing the numeraire good, this can be written: $$\begin{align} C_a(p) =& \gamma \cdot \frac{I(p)}{p} \ C_m(p) =& (1-\gamma) \cdot I(p) \end{align}$$ Income $I$ in the expressions is given by the value of national production or GDP at these prices. Measured in manufactured goods: $$I(p) = p \cdot F(\bar T, L_a(p)) + G(\bar K, \bar L - L_a(p)$$ By Walras' law we only need to find the relative price at which output equals demand in one of the two product markets so in the the code below we solve for equilibrium domestic prices from the condition $Q_a(p)=C_a(p)$. For parameters $\alpha=\beta=\gamma = \frac{1}{2}$, $\bar T = \bar K =100$ and $\bar L=400$ the domestic equilibrium prices are unitary: End of explanation """ eqn(p_autarky()) """ Explanation: And this in turn leads to an equilibrium autarky allocation with $L_a = L_m = 200$ and a real wage $\frac{w}{p}=0.35$. End of explanation """ pw = 7/4 Lao, wo = eqn(p=pw) Lao, wo, wo/pw sfmtrade(p=7/4) """ Explanation: The plot below shows the autarky production and consumption point (marked by an 'X') as well as the new production point (marked by a circle) and consumption point (marked by a square) if the country opened to trade with a world relative price $p=\frac{7}{4}$. Opening to trade leads the country to expand agricultural production by re-allocating labor from manufacturing to agriculture. We confirm the expected neoclassical ambiguity result which is that an increase in the relative price of agricultural goods will lead to an increase in the real wage measured in terms of manufactured goods $\frac{w}{p_m}$ ($=w$ since we have $P_m=1$) but a decrease in real wage measured in terms of agricultural goods or $\frac{w}{P_a} = \frac{w}{p}$ in our notation (since $\frac{w}{P_a} = \frac{w}{p_m} \cdot \frac{P_m}{P_a}$). Diagrammatically $$p \uparrow \rightarrow \frac{w}{p_m} \uparrow , \frac{w}{p_a} \downarrow$$ End of explanation """ sfmplot2(2) """ Explanation: Effects of a price change on the income distribution More explanations to be placed here... End of explanation """ import numpy as np from scipy.optimize import fsolve np.seterr(divide='ignore', invalid='ignore') import matplotlib.pyplot as plt from ipywidgets import interact, fixed import seaborn %matplotlib inline plt.style.use('seaborn-colorblind') plt.rcParams["figure.figsize"] = [7,7] plt.rcParams["axes.spines.right"] = True plt.rcParams["axes.spines.top"] = False plt.rcParams["font.size"] = 18 plt.rcParams['figure.figsize'] = (10, 6) plt.rcParams['axes.grid']=True Tbar = 100 # Fixed specific land in ag. Kbar = 100 # Fixed specific capital in manuf Lbar = 400 # Total number of mobile workers LbarMax = 400 # Lbar will be on slider, max value. p = 1.00 # initial rel price of ag goods, p = Pa/Pm alpha, beta = 0.5, 0.5 # labor share in ag, manuf """ Explanation: <a id='codesection'></a> Code Section Make sure you run the cells below FIRST. Then run the cells above Python simulation and plots End of explanation """ La = np.linspace(1, LbarMax-1,LbarMax) Lm = Lbar - La """ Explanation: For the plots we want to plot over $L_a$ and $L_m = \bar L -l_a$: End of explanation """ def F(La, Tbar = Tbar): return (Tbar**(1-alpha) * La**alpha) def G(Lm, Kbar =Kbar): return (Kbar**(1-beta) * Lm**beta) def MPLa(La): return alpha*Tbar**(1-alpha) * La**(alpha-1) def MPLm(Lm): return beta*Kbar**(1-beta) * Lm**(beta-1) def MPT(La, Tbar=Tbar): return (1-alpha)*Tbar**(-alpha) * La**alpha def MPK(Lm, Kbar=Kbar): return (1-beta)*Kbar**(-beta) * Lm**beta """ Explanation: The production functions in each sector: End of explanation """ def ppf(Tbar=Tbar, Kbar=Kbar, Lbar=Lbar): Qa = F(La, Tbar) * (La<Lbar) Qm = G(Lm, Kbar) plt.title('Production Possibility Frontier') plt.xlabel(r'$Q_a$') plt.ylabel(r'$Q_m$') plt.plot(Qa, Qm) plt.gca().set_aspect('equal'); ppf(Tbar=100, Kbar=100, Lbar=400) """ Explanation: We have enough to plot a production possibility Frontier (and how it varies with factor supplies): End of explanation """ LDa = p * MPLa(La) *(La<Lbar) # for Cobb-Douglas MPL can be written this way LDm = MPLm(Lbar-La) """ Explanation: Labor demand in each sector as a function of $p=\frac{P_a}{P_m}$: End of explanation """ def eqn(p, Lbar=Lbar): '''returns numerically found equilibrium labor allocation and wage''' def func(La): return p*MPLa(La) - MPLm(Lbar-La) Laeq = fsolve(func, 1)[0] return Laeq, p*MPLa(Laeq) def u(x,y): '''Utility function''' return x*y def XD(p, Lbar = Lbar): '''Cobb-Douglas demand for goods given world prices (national income computed)''' LAe, we = eqn(p) # gdp at world prices measured in manuf goods gdp = p*F(LAe, Tbar = Tbar) + G(Lbar -LAe, Kbar=Kbar) return (1/2)*gdp/p, (1/2)*gdp def indif(x, ubar): return ubar/x def p_autarky(Lbar=Lbar, Tbar=Tbar, Kbar=Kbar): '''Find autarky product prices. By Walras' law enough to find price that sets excess demand in just one market''' def excessdemandA(p): LAe, _ = eqn(p) QA = F(LAe, Tbar = Tbar) CA, CM = XD(p, Lbar=Lbar) return QA-CA peq = fsolve(excessdemandA, 1)[0] return peq p_autarky() def sfmtrade(p): Ca = np.linspace(0,200,200) LAe, we = eqn(p) X, Y = F(LAe, Tbar = Tbar), G(Lbar -LAe) gdp = p*X + Y plt.scatter(F(LAe, Tbar), G(Lbar-LAe, Tbar), label='Trade produce') plt.scatter(*XD(p),label='Trade consume', marker='s') plt.scatter(*XD(p_autarky()), marker='x', label='Autarky') plt.plot([0,gdp/p],[gdp, 0]) ppf(100) ub = u(*XD(p)) plt.ylim(0,gdp) plt.xlim(0,gdp) plt.plot(Ca, indif(Ca, ub)) plt.grid(False) plt.legend() plt.gca().spines['bottom'].set_position('zero') plt.gca().spines['left'].set_position('zero') sfmtrade(p=7/4) """ Explanation: The following function returns the equilibrium allocation of labor to agriculture and equilibrium nominal wage for any initial relative price of agricultural goods $p=\frac{P_a}{P_m}$ End of explanation """ La = np.arange(0, LbarMax) # this is always over the LbarMax range def sfmplot(p, Lbar=LbarMax, show=True): Lm = Lbar - La Qa = (Tbar**(1-alpha) * La**alpha) * (La<Lbar) Qm = Kbar**(1-beta) * (Lbar - La)**beta pMPLa = (p * alpha * Qa/La)*(La<Lbar) # for Cobb-Douglass MPL can be written this way MPLm = beta * Qm/(Lbar-La) LA, weq = eqn(p) ymax = 1.0 plt.ylim(0,ymax) plt.xlim(0,LbarMax) plt.title('Specific Factors Model') plt.plot(La, pMPLa, linewidth = 3, label='AG labor demand') plt.plot(La, MPLm, linewidth=3, label ='MF labor demand') plt.scatter(LA, weq, s=100, color='black') plt.axhline(weq, linestyle='dashed') plt.axvline(Lbar, linewidth = 3) plt.plot([LA, LA],[0, weq], linestyle='dashed') plt.xlabel('Labor') plt.ylabel('Real wage --' + r'$\frac{w}{p_M}$') plt.grid() if show: plt.legend(loc=(1.04,0.9)) print("(La, Lm) = ({0:3.0f}, {1:3.0f}) (w/Pm, w/Pa) =({2:3.2f}, {3:3.2f})" .format(LA, Lbar-LA, weq, weq/p)) print("(v/Pa, v/Pm) = ({0:3.1f}, {1:3.1f}) (r/Pa, r/Pm) = ({2:3.1f}, {3:3.1f})" .format(MPT(LA), p*MPT(LA), MPK(Lbar-LA)/p, MPK(Lbar-LA) ) ) plt.show() """ Explanation: Simple example with $\bar L$ at $\bar L^{max}$ End of explanation """ sfmplot(p=2) """ Explanation: If you are wondering about why we skip the plt.show() when show=True, it's because for figures that use the ipywidgets interact function to allow parameters to change with sliders we will need to have a plt.show() to redraw the plot in every function call (otherwise we would end up with a stack of plots rather than a changing plot). End of explanation """ def sfmplot2(p): sfmplot(1, show=False) sfmplot(p, show=False) plt.grid(False) if p == 1: plt.title('SF Model'); else: La0, w0 = eqn(1) plt.scatter(La0,w0*p, s=100, color='black') #where wage would rise to without labor movement if p>1: plt.title(r'$\frac{P_a}{P_m} \uparrow \rightarrow \frac{w}{P_m} \uparrow, \frac{w}{P_a} \downarrow $' ); elif p<1: plt.title(r'$\frac{P_a}{P_m} \downarrow \rightarrow \frac{w}{P_m} \downarrow, \frac{w}{P_a} \uparrow $' ); plt.show(); sfmplot2(2) interact(sfmplot2, p=(0.1, 2,0.1)); """ Explanation: Suppose that by opening to trade the relative price of agricultural goods $p$ doubles from $p=1$ to $p=2$. This greatly shifts the demand for agricultural workers: End of explanation """ interact(sfmplot2, p=(0,2,0.2), Lbar=fixed(400), show=fixed(True)) """ Explanation: Before and after plots The trick here is to define a new plotting function that first plots a static plot and then allows an interaction. End of explanation """
spatialfrog/pi_weather
Weather_data_collection.ipynb
gpl-3.0
import csv import os import sys import time from datetime import datetime from sense_hat import SenseHat sense = SenseHat() sense.clear() """ Explanation: Collect Data Goal is to collect data from SenseHat. End of explanation """ sense.get_temperature() sense.get_humidity() sense.get_compass() sense.get_temperature_from_humidity() sense.get_temperature_from_pressure() sense.get_pressure() sense.get_humidity() while True: pressure = sense.get_pressure() pressure = round(pressure, 3) humidity = sense.get_humidity() humidity = round(humidity, 3) print 'Pressure: {0}\t Humidty: {1}'.format(pressure, humidity) """ Explanation: Get data Read data from the SenseHat in an interactive way. End of explanation """ # output directory output_directory = r'/home/pi/Projects/pi_weather/data/' def get_measurements(): ''' Return list of measurements Order of returned list is: * datetime, temp_humidity, temp_pressure, pressure and humidity Notes: * Numeric values rounded to 3 decimal places ''' # clear previous data sense.clear() # generate iso8601 yyyy-mm-ddTHH:MM:SS timestamp string recorded_datetime = datetime.isoformat(datetime.now()) # capture data from sensors temperature_from_humidity = round(sense.get_temperature_from_humidity(), 3) temperature_from_pressure = round(sense.get_temperature_from_pressure(), 3) pressure = round(sense.get_pressure(), 3) humidity = round(sense.get_humidity(), 3) return([recorded_datetime, temperature_from_humidity, temperature_from_pressure, pressure, humidity]) # ===== csv file csv_file_name = 'raw.csv' # full path to csv file csv_path = os.path.join(output_directory, csv_file_name) # csv column names csv_headers = ['datetime', 'temperature_from_humidity', 'temperature_from_pressure', 'pressure', 'humidity'] # check if csv file exists if os.path.exists(csv_path): pass else: # create csv file on disk with headers with open(csv_path, 'w') as f: writer = csv.writer(f) writer.writerow(csv_headers) # loop and read data every minute while True: # get data data = get_measurements() # append data to csv print(data) with open(csv_path, 'a') as f: writer = csv.writer(f) writer.writerow(data) # wait 1 minute time.sleep(5) """ Explanation: Write measurements to disk Write measurements to disk every 1 minute. Use CSV file format. Timestamp with ISO8601 format of YYYY-hh-mmTHH:MM:SS. End of explanation """
PyLCARS/PythonUberHDL
myHDL_ComputerFundamentals/Counters/.ipynb_checkpoints/CountersInMyHDL-checkpoint.ipynb
bsd-3-clause
from myhdl import * from myhdlpeek import Peeker import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline from sympy import * init_printing() import random #https://github.com/jrjohansson/version_information %load_ext version_information %version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, random #helper functions to read in the .v and .vhd generated files into python def VerilogTextReader(loc, printresult=True): with open(f'{loc}.v', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***Verilog modual from {loc}.v***\n\n', VerilogText) return VerilogText def VHDLTextReader(loc, printresult=True): with open(f'{loc}.vhd', 'r') as vText: VerilogText=vText.read() if printresult: print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText) return VerilogText """ Explanation: \title{Counters in myHDL} \author{Steven K Armour} \maketitle Counters play a vital role in Digital Hardware, ranging from Clock Dividers; (see below) to event triggers by recording the number of events that have occurred or will still need to occur (all the counters here in use a clock as the counting event but this is easily changed). Presented below are some basic HDL counters (Up, Down, Hybridized Up-Down) in myHDL. <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Refrances" data-toc-modified-id="Refrances-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Refrances</a></span></li><li><span><a href="#Libraries-and-Helper-functions" data-toc-modified-id="Libraries-and-Helper-functions-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Libraries and Helper functions</a></span></li><li><span><a href="#Counter-Specs" data-toc-modified-id="Counter-Specs-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Counter Specs</a></span></li><li><span><a href="#myHDL-modules-bitvector-type-behavior" data-toc-modified-id="myHDL-modules-bitvector-type-behavior-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>myHDL modules bitvector type behavior</a></span><ul class="toc-item"><li><span><a href="#up-counting-behavior" data-toc-modified-id="up-counting-behavior-4.1"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>up counting behavior</a></span></li><li><span><a href="#down-counting-behavior" data-toc-modified-id="down-counting-behavior-4.2"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>down counting behavior</a></span></li></ul></li><li><span><a href="#Up-Counter" data-toc-modified-id="Up-Counter-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Up-Counter</a></span><ul class="toc-item"><li><span><a href="#myHDL-testing" data-toc-modified-id="myHDL-testing-5.1"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>myHDL testing</a></span></li><li><span><a href="#Verilog-Code" data-toc-modified-id="Verilog-Code-5.2"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Verilog Code</a></span></li><li><span><a href="#Verilog-Testbench" data-toc-modified-id="Verilog-Testbench-5.3"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Verilog Testbench</a></span></li></ul></li><li><span><a href="#Down-Counter" data-toc-modified-id="Down-Counter-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Down Counter</a></span><ul class="toc-item"><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-6.1"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Code" data-toc-modified-id="Verilog-Code-6.2"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>Verilog Code</a></span></li><li><span><a href="#Verilog-Testbench" data-toc-modified-id="Verilog-Testbench-6.3"><span class="toc-item-num">6.3&nbsp;&nbsp;</span>Verilog Testbench</a></span></li></ul></li><li><span><a href="#Up/Down-Counter" data-toc-modified-id="Up/Down-Counter-7"><span class="toc-item-num">7&nbsp;&nbsp;</span>Up/Down Counter</a></span><ul class="toc-item"><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-7.1"><span class="toc-item-num">7.1&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Code" data-toc-modified-id="Verilog-Code-7.2"><span class="toc-item-num">7.2&nbsp;&nbsp;</span>Verilog Code</a></span></li><li><span><a href="#Verilog-Testbench" data-toc-modified-id="Verilog-Testbench-7.3"><span class="toc-item-num">7.3&nbsp;&nbsp;</span>Verilog Testbench</a></span></li></ul></li><li><span><a href="#Application:-Clock-Divider" data-toc-modified-id="Application:-Clock-Divider-8"><span class="toc-item-num">8&nbsp;&nbsp;</span>Application: Clock Divider</a></span><ul class="toc-item"><li><span><a href="#myHDL-Testing" data-toc-modified-id="myHDL-Testing-8.1"><span class="toc-item-num">8.1&nbsp;&nbsp;</span>myHDL Testing</a></span></li><li><span><a href="#Verilog-Code" data-toc-modified-id="Verilog-Code-8.2"><span class="toc-item-num">8.2&nbsp;&nbsp;</span>Verilog Code</a></span></li><li><span><a href="#Verilog-Testbench" data-toc-modified-id="Verilog-Testbench-8.3"><span class="toc-item-num">8.3&nbsp;&nbsp;</span>Verilog Testbench</a></span></li></ul></li></ul></div> Refrances @misc{loi le_2017, title={Verilog code for counter with testbench}, url={http://www.fpga4student.com/2017/03/verilog-code-for-counter-with-testbench.html}, journal={Fpga4student.com}, author={Loi Le, Van}, year={2017} } @misc{digilent_2018, title={Learn.Digilentinc | Counter and Clock Divider}, url={https://learn.digilentinc.com/Documents/262}, journal={Learn.digilentinc.com}, author={Digilent}, year={2018} } Libraries and Helper functions End of explanation """ CountVal=17 BitSize=int(np.log2(CountVal))+1; BitSize """ Explanation: Counter Specs End of explanation """ ModBV=modbv(0)[BitSize:] IntBV=intbv(0)[BitSize:] print(f"`ModBV` max is {ModBV.max}; min is {ModBV.min}") print(f"`IntBV` max is {IntBV.max}; min is {IntBV.min}") for _ in range(ModBV.max*2): try: ModBV+=1; IntBV+=1 print(f"`ModBV` value is {ModBV}; `IntBV` value is {IntBV}") except ValueError: ModBV+=1 print(f"`ModBV` value is {ModBV}; `IntBV` value is {IntBV} and INVALID") """ Explanation: myHDL modules bitvector type behavior up counting behavior End of explanation """ ModBV=modbv(2**BitSize -1)[BitSize:] IntBV=intbv(2**BitSize -1)[BitSize:] print(f"`ModBV` max is {ModBV.max}; min is {ModBV.min}") print(f"`IntBV` max is {IntBV.max}; min is {IntBV.min}") for _ in range(ModBV.max*2): try: ModBV-=1; IntBV-=1 print(f"`ModBV` value is {ModBV}; `IntBV` value is {IntBV}") except ValueError: ModBV-=0 print(f"`ModBV` value is {ModBV}; `IntBV` value is {IntBV} and INVALID") """ Explanation: down counting behavior End of explanation """ @block def Up_Counter(count, Trig, clk, rst, CountVal, BitSize): """ UpCounter Input: clk(bool): system clock feed rst(bool): clock reset signal Ouput: count (bit vector): current count value; count Trig(bool) Parmeter(Python Only): CountVal(int): value to count to BitSize (int): Bitvalue size is log_2(CountVal)+1 """ #internals count_i=Signal(modbv(0)[BitSize:]) Trig_i=Signal(bool(0)) @always(clk.posedge, rst.negedge) def logic(): if rst: count_i.next=0 Trig_i.next=0 elif count_i%CountVal==0 and count_i!=0: Trig_i.next=1 count_i.next=0 else: count_i.next=count_i+1 @always_comb def OuputBuffer(): count.next=count_i Trig.next=Trig_i return instances() """ Explanation: Up-Counter up counters are counters that count up to a target value from a lower starting value. The following counter is a simple one that uses the clock as incrementer (think one clock cycle as one swing of an old grandfather clock pendulum). But more complicated counters can use any signal as an incrementer. This Counter also has a signal the indicates that the counter has been triggered before the modulus values for the internal counter is reset. This is because this counter tries to reproduce the behavior of timers found on common apps that show how much time has elapsed since the counter has run up \begin{figure} \centerline{\includegraphics[width=10cm]{Up_Counter.png}} \caption{\label{fig:RP} Up_Counter Functianl Digram } \end{figure} End of explanation """ Peeker.clear() clk=Signal(bool(0)); Peeker(clk, 'clk') rst=Signal(bool(0)); Peeker(rst, 'rst') Trig=Signal(bool(0)); Peeker(Trig, 'Trig') count=Signal(modbv(0)[BitSize:]); Peeker(count, 'count') DUT=Up_Counter(count, Trig, clk, rst, CountVal, BitSize) def Up_CounterTB(): """ myHDL only Testbench for `Up_Counter` module """ @always(delay(1)) def ClkGen(): clk.next=not clk @instance def stimules(): i=0 while True: if i==int(CountVal*1.5): rst.next=1 elif i==int(CountVal*1.5)+1: rst.next=0 if i==int(CountVal*2.5): raise StopSimulation() i+=1 yield clk.posedge return instances() sim=Simulation(DUT, Up_CounterTB(), *Peeker.instances()).run() Peeker.to_wavedrom() Up_CounterData=Peeker.to_dataframe() Up_CounterData=Up_CounterData[Up_CounterData['clk']==1] Up_CounterData.drop('clk', axis=1, inplace=True) Up_CounterData.reset_index(drop=True, inplace=True) Up_CounterData """ Explanation: myHDL testing End of explanation """ DUT.convert() VerilogTextReader('Up_Counter'); """ Explanation: Verilog Code End of explanation """ ResetAt=int(CountVal*1.5)+1 StopAt=int(CountVal*2.5) @block def Up_CounterTBV(): """ myHDL -> Verilog Testbench for `Up_Counter` module """ clk=Signal(bool(0)) rst=Signal(bool(0)) Trig=Signal(bool(0)) count=Signal(modbv(0)[BitSize:]) @always_comb def print_data(): print(clk, rst, Trig, count) DUT=Up_Counter(count, Trig, clk, rst, CountVal, BitSize) @instance def clk_signal(): while True: clk.next = not clk yield delay(1) @instance def stimules(): i=0 while True: if i==ResetAt: rst.next=1 elif i==(ResetAt+1): rst.next=0 else: pass if i==StopAt: raise StopSimulation() i+=1 yield clk.posedge return instances() TB=Up_CounterTBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('Up_CounterTBV'); """ Explanation: \begin{figure} \centerline{\includegraphics[width=10cm]{Up_CounterRTL.png}} \caption{\label{fig:UCRTL} Up_Counter RTL Schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{Up_CounterSYN.png}} \caption{\label{fig:UCSYN} Up_Counter Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} Verilog Testbench End of explanation """ @block def Down_Counter(count, Trig, clk, rst, StartVal, BitSize): """ DownCounter Input: clk(bool): system clock feed rst(bool): clock reset signal Ouput: count (bit vector): current count value; count Trig(bool) Parmeter(Python Only): StartVal(int): value to count from BitSize (int): Bitvalue size is log_2(CountVal)+1 CatButt """ #internal counter value count_i=Signal(modbv(StartVal)[BitSize:]) @always(clk.posedge, rst.negedge) def logic(): if rst: count_i.next=StartVal Trig.next=0 elif count_i==0: Trig.next=1 count_i.next=StartVal else: count_i.next=count_i-1 @always_comb def OuputBuffer(): count.next=count_i return instances() """ Explanation: Down Counter Down Counters Count Down from a set upper value to a set target lower value. The following Down Counter is a simple revamp of the previous Up Counter. Thus it starts from the CountVal and counts down to zero to trigger the trigger signal that it has completed one countdown cycle before the internal counter resets to restart the countdown. \begin{figure} \centerline{\includegraphics[width=10cm]{}} \caption{\label{fig:RP} Down_Counter Functianl Digram (ToDo) } \end{figure} End of explanation """ Peeker.clear() clk=Signal(bool(0)); Peeker(clk, 'clk') rst=Signal(bool(0)); Peeker(rst, 'rst') Trig=Signal(bool(0)); Peeker(Trig, 'Trig') count=Signal(modbv(0)[BitSize:]); Peeker(count, 'count') DUT=Down_Counter(count, Trig, clk, rst, CountVal, BitSize) def Down_CounterTB(): """ myHDL only Testbench for `Down_Counter` module """ @always(delay(1)) def ClkGen(): clk.next=not clk @instance def stimules(): i=0 while True: if i==int(CountVal*1.5): rst.next=1 elif i==int(CountVal*1.5)+1: rst.next=0 if i==int(CountVal*2.5): raise StopSimulation() i+=1 yield clk.posedge return instances() sim=Simulation(DUT, Down_CounterTB(), *Peeker.instances()).run() Peeker.to_wavedrom() Down_CounterData=Peeker.to_dataframe() Down_CounterData=Down_CounterData[Down_CounterData['clk']==1] Down_CounterData.drop('clk', axis=1, inplace=True) Down_CounterData.reset_index(drop=True, inplace=True) Down_CounterData """ Explanation: myHDL Testing End of explanation """ DUT.convert() VerilogTextReader('Down_Counter'); """ Explanation: Verilog Code End of explanation """ ResetAt=int(CountVal*1.5) StopAt=int(CountVal*2.5) @block def Down_CounterTBV(): """ myHDL -> Verilog Testbench for `Down_Counter` module """ clk=Signal(bool(0)) rst=Signal(bool(0)) Trig=Signal(bool(0)) count=Signal(modbv(0)[BitSize:]) @always_comb def print_data(): print(clk, rst, Trig, count) DUT=Down_Counter(count, Trig, clk, rst, CountVal, BitSize) @instance def clk_signal(): while True: clk.next = not clk yield delay(1) @instance def stimules(): i=0 while True: if i==ResetAt: rst.next=1 elif i==(ResetAt+1): rst.next=0 else: pass if i==StopAt: raise StopSimulation() i+=1 yield clk.posedge return instances() TB=Down_CounterTBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('Down_CounterTBV'); """ Explanation: \begin{figure} \centerline{\includegraphics[width=10cm]{Down_CounterRTL.png}} \caption{\label{fig:DCRTL} Down_Counter RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{Down_CounterSYN.png}} \caption{\label{fig:DCSYN} Down_Counter Synthesized Schematic; Xilinx Vivado 2017.4} \end{figure} Verilog Testbench End of explanation """ #Create the Direction States for UpDown Counter DirStates=enum('Up', 'Down') print(f"`Up` state repersentation is {bin(DirStates.Up)}") print(f"`Down` state repersentation is {bin(DirStates.Down)}") @block def UpDown_Counter(Dir, count, Trig, clk, rst, CountVal, StartVal, BitSize): """ UpDownCounter, hybrid of a simple Up Counter and a simple Down Counter using `Dir` to control Up/Down count Direction Input: Dir(): clk(bool): system clock feed rst(bool): clock reset signal Ouput: count (bit vector): current count value; count Trig(bool) Parmeter(Python Only): CountVal(int): Highest Value for counter StartVal(int): starting value for internal counter BitSize (int): Bitvalue size is log_2(CountVal)+1 """ #internal counter value count_i=Signal(modbv(StartVal)[BitSize:]) @always(clk.posedge, rst.negedge) def logic(): if rst: count_i.next=StartVal Trig.next=0 #counter contanment elif count_i//CountVal==1 and rst==0: count_i.next=StartVal #up behavior elif Dir==DirStates.Up: count_i.next=count_i+1 #simple Triger at ends if count_i%CountVal==0: Trig.next=1 #down behavior elif Dir==DirStates.Down: count_i.next=count_i-1 #simple Triger at ends if count_i%CountVal==0: Trig.next=1 @always_comb def OuputBuffer(): count.next=count_i return instances() """ Explanation: Up/Down Counter This Counter incorporates both an Up Counter and Down Counter via hybridizing between the two via a direction control state machine \begin{figure} \centerline{\includegraphics[width=10cm]{}} \caption{\label{fig:RP} UpDown_Counter Functianl Digram (ToDo) } \end{figure} End of explanation """ Peeker.clear() clk=Signal(bool(0)); Peeker(clk, 'clk') rst=Signal(bool(0)); Peeker(rst, 'rst') Trig=Signal(bool(0)); Peeker(Trig, 'Trig') count=Signal(modbv(0)[BitSize:]); Peeker(count, 'count') Dir=Signal(DirStates.Up); Peeker(Dir, 'Dir') DUT=UpDown_Counter(Dir, count, Trig, clk, rst, CountVal, StartVal=CountVal//2, BitSize=BitSize) def UpDown_CounterTB(): """ myHDL only Testbench for `UpDown_Counter` module """ @always(delay(1)) def ClkGen(): clk.next=not clk @instance def stimules(): i=0 while True: if i==int(CountVal*1.5): Dir.next=DirStates.Down elif i==int(CountVal*2.5): rst.next=1 elif i==int(CountVal*2.5)+1: rst.next=0 if i==int(CountVal*3.5): raise StopSimulation() i+=1 yield clk.posedge return instances() sim=Simulation(DUT, UpDown_CounterTB(), *Peeker.instances()).run() Peeker.to_wavedrom() UpDown_CounterData=Peeker.to_dataframe() UpDown_CounterData=UpDown_CounterData[UpDown_CounterData['clk']==1] UpDown_CounterData.drop('clk', axis=1, inplace=True) UpDown_CounterData.reset_index(drop=True, inplace=True) UpDown_CounterData """ Explanation: myHDL Testing End of explanation """ DUT.convert() VerilogTextReader('UpDown_Counter'); """ Explanation: Verilog Code End of explanation """ StateChangeAt=int(CountVal*1.5) ResetAt=int(CountVal*2.5) StopAt=int(CountVal*3.5) @block def UpDown_CounterTBV(): """ myHDL -> Verilog Testbench for `Down_Counter` module """ clk=Signal(bool(0)) rst=Signal(bool(0)) Trig=Signal(bool(0)) count=Signal(modbv(0)[BitSize:]) Dir=Signal(DirStates.Up) DUT=UpDown_Counter(Dir, count, Trig, clk, rst, CountVal, StartVal=CountVal//2, BitSize=BitSize) @always_comb def print_data(): print(clk, rst, Trig, count) DUT=Down_Counter(count, Trig, clk, rst, CountVal, BitSize) @instance def clk_signal(): while True: clk.next = not clk yield delay(1) @instance def stimules(): i=0 while True: if i==StateChangeAt: Dir.next=DirStates.Down elif i==ResetAt: rst.next=1 elif i==ResetAt+1: rst.next=0 else: pass if i==StopAt: raise StopSimulation() i+=1 yield clk.posedge return instances() TB=UpDown_CounterTBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('UpDown_CounterTBV'); """ Explanation: \begin{figure} \centerline{\includegraphics[width=10cm]{UpDown_CounterRTL.png}} \caption{\label{fig:UDCRTL} UpDown_Counter RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{UpDown_CounterSYN.png}} \caption{\label{fig:UDCSYN} UpDown_Counter Synthesized schematic; Xilinx Vivado 2017.4} \end{figure} Verilog Testbench End of explanation """ @block def ClockDivider(Divisor, clkOut, count, clk,rst): """ Simple Clock Divider based on the Digilint Clock Divider https://learn.digilentinc.com/Documents/262 Input: Divisor(32 bit): the clock frequncy divide by value clk(bool): The input clock rst(bool): clockDivider Reset Ouput: clkOut(bool): the divided clock ouput count(32bit): the value of the internal counter """ count_i=Signal(modbv(0)[32:]) @always(clk.posedge, rst.posedge) def counter(): if rst: count_i.next=0 elif count_i==(Divisor-1): count_i.next=0 else: count_i.next=count_i+1 clkOut_i=Signal(bool(0)) @always(clk.posedge, rst.posedge) def clockTick(): if rst: clkOut_i.next=0 elif count_i==(Divisor-1): clkOut_i.next=not clkOut_i else: clkOut_i.next=clkOut_i @always_comb def OuputBuffer(): count.next=count_i clkOut.next=clkOut_i return instances() """ Explanation: Application: Clock Divider On common application in HDL for counters in build clock dividers. And while there are more specialized and advanced means to perform up or down frequency generation from a reference clock (see for example digital Phase Lock Loops). A simple clock divider is very useful HDL code to drive other HDL IPs that should/need a slower event rate than the Megahertz+ speeds of today's FPGAs \begin{figure} \centerline{\includegraphics[width=10cm]{}} \caption{\label{fig:RP} ClockDivider Functianl Digram (ToDo) } \end{figure} End of explanation """ Peeker.clear() clk=Signal(bool(0)); Peeker(clk, 'clk') Divisor=Signal(intbv(0)[32:]); Peeker(Divisor, 'Divisor') count=Signal(intbv(0)[32:]); Peeker(count, 'count') clkOut=Signal(bool(0)); Peeker(clkOut, 'clkOut') rst=Signal(bool(0)); Peeker(rst, 'rst') DUT=ClockDivider(Divisor, clkOut, count, clk,rst) def ClockDividerTB(): """ myHDL only Testbench for `ClockDivider` module """ @always(delay(1)) def ClkGen(): clk.next=not clk @instance def stimules(): for i in range(2,6+1): Divisor.next=i rst.next=0 #run clock time for _ in range(4*2**(i-1)): yield clk.posedge for j in range(1): if j==0: rst.next=1 yield clk.posedge raise StopSimulation() return instances() sim=Simulation(DUT, ClockDividerTB(), *Peeker.instances()).run() Peeker.to_wavedrom() ClockDividerData=Peeker.to_dataframe() ClockDividerData ClockDividerData_2=ClockDividerData[ClockDividerData['Divisor']==2] ClockDividerData_2.reset_index(drop=True, inplace=True) ClockDividerData_2.plot(y=['clk', 'clkOut']); ClockDividerData_3=ClockDividerData[ClockDividerData['Divisor']==3] ClockDividerData_3.reset_index(drop=True, inplace=True) ClockDividerData_3.plot(y=['clk', 'clkOut']); ClockDividerData_4=ClockDividerData[ClockDividerData['Divisor']==4] ClockDividerData_4.reset_index(drop=True, inplace=True) ClockDividerData_4.plot(y=['clk', 'clkOut']); ClockDividerData_5=ClockDividerData[ClockDividerData['Divisor']==5] ClockDividerData_5.reset_index(drop=True, inplace=True) ClockDividerData_5.plot(y=['clk', 'clkOut']); ClockDividerData_6=ClockDividerData[ClockDividerData['Divisor']==6] ClockDividerData_6.reset_index(drop=True, inplace=True) ClockDividerData_6.plot(y=['clk', 'clkOut']); DUT.convert() VerilogTextReader('ClockDivider'); """ Explanation: myHDL Testing End of explanation """ @block def ClockDividerTBV(): """ myHDL -> Verilog Testbench for `ClockDivider` module """ clk=Signal(bool(0)); Divisor=Signal(intbv(0)[32:]) count=Signal(intbv(0)[32:]) clkOut=Signal(bool(0)) rst=Signal(bool(0)) @always_comb def print_data(): print(clk, Divisor, count, clkOut, rst) DUT=ClockDivider(Divisor, clkOut, count, clk,rst) @instance def clk_signal(): while True: clk.next = not clk yield delay(1) @instance def stimules(): for i in range(2,6+1): Divisor.next=i rst.next=0 #run clock time for _ in range(4*2**(i-1)): yield clk.posedge for j in range(1): if j==0: rst.next=1 else: pass yield clk.posedge raise StopSimulation() return instances() TB=ClockDividerTBV() TB.convert(hdl="Verilog", initial_values=True) VerilogTextReader('ClockDividerTBV'); """ Explanation: Verilog Code \begin{figure} \centerline{\includegraphics[width=10cm]{ClockDividerRTL.png}} \caption{\label{fig:clkDivRTL} ClockDivider RTL schematic; Xilinx Vivado 2017.4} \end{figure} \begin{figure} \centerline{\includegraphics[width=10cm]{ClockDividerSYN.png}} \caption{\label{fig:clkDivRTL} ClockDivider synthesized schematic; Xilinx Vivado 2017.4} \end{figure} Verilog Testbench End of explanation """
volodymyrss/3ML
docs/notebooks/The 3ML workflow.ipynb
bsd-3-clause
from threeML import * """ Explanation: The 3ML workflow Generally, an analysis in 3ML is performed in 3 steps: Load the data: one or more datasets are loaded and then listed in a DataList object Define the model: a model for the data is defined by including one or more PointSource, ExtendedSource or ParticleSource instances Perform a likelihood or a Bayesian analysis: the data and the model are used together to perform either a Maximum Likelihood analysis, or a Bayesian analysis Loading data 3ML is built around the concept of plugins. A plugin is used to load a particular type of data, or the data from a particular instrument. There is a plugin of optical data, one for X-ray data, one for Fermi/LAT data and so on. Plugins instances can be added and removed at the loading stage without changing any other stage of the analysis (but of course, you need to rerun all stages to update the results). First, let's import 3ML: End of explanation """ # Get some example data from threeML.io.package_data import get_path_of_data_file data_path = get_path_of_data_file("datasets/xy_powerlaw.txt") # Create an instance of the XYLike plugin, which allows to analyze simple x,y points # with error bars xyl = XYLike.from_text_file("xyl", data_path) # Let's plot it just to see what we have loaded xyl.plot(x_scale='log', y_scale='log') """ Explanation: Let's start by loading one dataset, which in the 3ML workflow means creating an instance of the appropriate plugin: End of explanation """ data = DataList(xyl) """ Explanation: Now we need to create a DataList object, which in this case contains only one instance: End of explanation """ # Create the second instance, this time of a different type pha = get_path_of_data_file("datasets/ogip_powerlaw.pha") bak = get_path_of_data_file("datasets/ogip_powerlaw.bak") rsp = get_path_of_data_file("datasets/ogip_powerlaw.rsp") ogip = OGIPLike("ogip", pha, bak, rsp) # Now use both plugins data = DataList(xyl, ogip) """ Explanation: The DataList object can receive one or more plugin instances on initialization. So for example, to use two datasets we can simply do: End of explanation """ # This is equivalent to write data = DataList(xyl, ogip) my_plugins = [xyl, ogip] data = DataList(*my_plugins) """ Explanation: The DataList object can accept any number of plugins in input. You can also create a list of plugins, and then create a DataList using the "expansion" feature of the python language ('*'), like this: End of explanation """ # A point source with a power law spectrum source1_sp = Powerlaw() source1 = PointSource("source1", ra=23.5, dec=-22.7, spectral_shape=source1_sp) # Another source with a log-parabolic spectrum plus a power law source2_sp = Log_parabola() + Powerlaw() source2 = PointSource("source2", ra=30.5, dec=-27.1, spectral_shape=source2_sp) # A third source defined in terms of its Galactic latitude and longitude source3_sp = Cutoff_powerlaw() source3 = PointSource("source3", l=216.1, b=-74.56, spectral_shape=source3_sp) """ Explanation: This is useful if you need to create the list of plugins at runtime, for example looping over many files. Define the model After you have loaded your data, you need to define a model for them. A model is a collection of one or more sources. A source represents an astrophysical reality, like a star, a galaxy, a molecular cloud... There are 3 kinds of sources: PointSource, ExtendedSource and ParticleSource. The latter is used only in special situations. The models are defined using the package astromodels. Here we will only go through the basics. You can find a lot more information here: astromodels.readthedocs.org Point sources A point source is characterized by a name, a position, and a spectrum. These are some examples: End of explanation """ # An extended source with a Gaussian shape centered on R.A., Dec = (30.5, -27.1) # and a sigma of 3.0 degrees ext1_spatial = Gaussian_on_sphere(lon0=30.5, lat0=-27.1, sigma=3.0) ext1_spectral = Powerlaw() ext1 = ExtendedSource("ext1", ext1_spatial, ext1_spectral) # An extended source with a 3D function # (i.e., the function defines both the spatial and the spectral shape) ext2_spatial = Continuous_injection_diffusion() ext2 = ExtendedSource("ext2", ext2_spatial) """ Explanation: Extended sources An extended source is characterized by its spatial shape and its spectral shape: End of explanation """ model = Model(source1, source2, source3, ext1, ext2) # We can see a summary of the model like this: model.display() """ Explanation: NOTE: not all plugins support extended sources. For example, the XYLike plugin we used above do not, as it is meant for data without spatial resolution. Create the likelihood model Now that we have defined our sources, we can create a model simply as: End of explanation """ # Fix a parameter model.source1.spectrum.main.Powerlaw.K.fix = True # or model.source1.spectrum.main.Powerlaw.K.free = False # Free it again model.source1.spectrum.main.Powerlaw.K.free = True # or model.source1.spectrum.main.Powerlaw.K.fix = False # Change the value model.source1.spectrum.main.Powerlaw.K = 2.3 # or using physical units (need to be compatible with what shown # in the table above) model.source1.spectrum.main.Powerlaw.K = 2.3 * 1 / (u.cm**2 * u.s * u.TeV) # Change the boundaries for the parameter model.source1.spectrum.main.Powerlaw.K.bounds = (1e-10, 1.0) # you can use units here as well, like: model.source1.spectrum.main.Powerlaw.K.bounds = (1e-5 * 1 / (u.cm**2 * u.s * u.TeV), 10.0 * 1 / (u.cm**2 * u.s * u.TeV)) # Link two parameters so that they are forced to have the same value model.link(model.source2.spectrum.main.composite.K_1, model.source1.spectrum.main.Powerlaw.K) # Link two parameters with a law. The parameters of the law become free # parameters in the fit. In this case we impose a linear relationship # between the index of the log-parabolic spectrum and the index of the # powerlaw in source2: index_2 = a * alpha_1 + b. law = Line() model.link(model.source2.spectrum.main.composite.index_2, model.source2.spectrum.main.composite.alpha_1, law) # If you want to force them to be in a specific relationship, # say index_2 = alpha_1 + 1, just fix a and b to the corresponding values, # after the linking, like: # model.source2.spectrum.main.composite.index_2.Line.a = 1.0 # model.source2.spectrum.main.composite.index_2.Line.a.fix = True # model.source2.spectrum.main.composite.index_2.Line.b = 0.0 # model.source2.spectrum.main.composite.index_2.Line.b.fix = True # Now display() will show the links model.display() """ Explanation: You can easily interact with the model. For example: End of explanation """ new_model = Model(source1) source1_sp.K.bounds = (0.01, 100) """ Explanation: Now, for the following steps, let's keep it simple and let's use a single point source: End of explanation """ new_model.save("new_model.yml", overwrite=True) new_model_reloaded = load_model("new_model.yml") """ Explanation: A model can be saved to disk, and reloaded from disk, as: End of explanation """ data = DataList(ogip) jl = JointLikelihood(new_model, data) best_fit_parameters, likelihood_values = jl.fit() """ Explanation: The output is in YAML format, a human-readable text-based format. Perform the analysis Maximum likelihood analysis Now that we have the data and the model, we can perform an analysis very easily: End of explanation """ jl.results.display() """ Explanation: The output of the fit() method of the JointLikelihood object consists of two pandas DataFrame objects, which can be queried, saved to disk, reloaded and so on. Refer to the pandas manual for details. After the fit the JointLikelihood instance will have a .results attribute which contains the results of the fit. End of explanation """ jl.results.write_to("my_results.fits", overwrite=True) """ Explanation: This object can be saved to disk in a FITS file: End of explanation """ results_reloaded = load_analysis_results("my_results.fits") results_reloaded.display() """ Explanation: The produced FITS file contains the complete definition of the model and of the results, so it can be reloaded in a separate session as: End of explanation """ fluxes = jl.results.get_point_source_flux(100 * u.keV, 1 * u.MeV) # Same results would be obtained with # fluxes = results_reloaded.get_point_source_flux(100 * u.keV, 1 * u.MeV) """ Explanation: The flux of the source can be computed from the 'results' object (even in another session by reloading the FITS file), as: End of explanation """ plot_point_source_spectra(jl.results, ene_min=0.1, ene_max=1e6, num_ene=500, flux_unit='erg / (cm2 s)') """ Explanation: We can also plot the spectrum with its error region, as: End of explanation """ # A uniform prior can be defined directly, like: new_model.source1.spectrum.main.Powerlaw.index.prior = Uniform_prior(lower_bound=-10, upper_bound=10) # or it can be set using the currently defined boundaries new_model.source1.spectrum.main.Powerlaw.index.set_uninformative_prior(Uniform_prior) # The same for the Log_uniform prior new_model.source1.spectrum.main.Powerlaw.K.prior = Log_uniform_prior(lower_bound=1e-3, upper_bound=100) # or new_model.source1.spectrum.main.Powerlaw.K.set_uninformative_prior(Log_uniform_prior) new_model.display() """ Explanation: Bayesian analysis In a very similar way, we can also perform a Bayesian analysis. As a first step, we need to define the priors for all parameters: End of explanation """ bs = BayesianAnalysis(new_model, data) # This uses the emcee sampler samples = bs.sample(n_walkers=30, burn_in=100, n_samples=1000) """ Explanation: Then, we can perform our Bayesian analysis like: End of explanation """ bs.results.display() fluxes_bs = bs.results.get_point_source_flux(100 * u.keV, 1 * u.MeV) plot_point_source_spectra(bs.results, ene_min=0.1, ene_max=1e6, num_ene=500, flux_unit='erg / (cm2 s)') """ Explanation: The BayesianAnalysis object will now have a "results" member which will work exactly the same as explained for the Maximum Likelihood analysis (see above): End of explanation """ bs.corner_plot() """ Explanation: We can also produce easily a "corner plot", like: End of explanation """
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/migration/UJ14 AutoML for vision with Vertex AI Video Classification.ipynb
apache-2.0
! pip3 install -U google-cloud-aiplatform --user """ Explanation: Vertex SDK: AutoML video classification model Installation Install the latest (preview) version of Vertex SDK. End of explanation """ ! pip3 install google-cloud-storage """ Explanation: Install the Google cloud-storage library as well. End of explanation """ import os if not os.getenv("AUTORUN"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the Kernel Once you've installed the Vertex SDK and Google cloud-storage, you need to restart the notebook kernel so it can find the packages. End of explanation """ PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID """ Explanation: Before you begin GPU run-time Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select Runtime > Change Runtime Type > GPU Set up your GCP project The following steps are required, regardless of your notebook environment. Select or create a GCP project. When you first create an account, you get a $300 free credit towards your compute/storage costs. Make sure that billing is enabled for your project. Enable the Vertex APIs and Compute Engine APIs. Google Cloud SDK is already installed in Google Cloud Notebooks. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $ into these commands. End of explanation """ REGION = "us-central1" # @param {type: "string"} """ Explanation: Region You can also change the REGION variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend when possible, to choose the region closest to you. Americas: us-central1 Europe: europe-west4 Asia Pacific: asia-east1 You cannot use a Multi-Regional Storage bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see Region support for Vertex AI services End of explanation """ from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial. End of explanation """ import os import sys # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your Google Cloud account. This provides access # to your Cloud Storage bucket and lets you submit training jobs and prediction # requests. # If on Vertex, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this tutorial in a notebook locally, replace the string # below with the path to your service account key and run this cell to # authenticate your Google Cloud account. else: %env GOOGLE_APPLICATION_CREDENTIALS your_path_to_credentials.json # Log in to your account on Google Cloud ! gcloud auth login """ Explanation: Authenticate your GCP account If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step. Note: If you are on an Vertex notebook and run the cell, the cell knows to skip executing the authentication steps. End of explanation """ BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]": BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket. Set the name of your Cloud Storage bucket below. It must be unique across all Cloud Storage buckets. End of explanation """ ! gsutil mb -l $REGION gs://$BUCKET_NAME """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ ! gsutil ls -al gs://$BUCKET_NAME """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ import json import os import sys import time from google.cloud.aiplatform import gapic as aip from google.protobuf.json_format import MessageToJson, ParseDict from google.protobuf.struct_pb2 import Value """ Explanation: Set up variables Next, set up some variables used throughout the tutorial. Import libraries and define constants Import Vertex SDK Import the Vertex SDK into our Python environment. End of explanation """ # API Endpoint API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION) # Vertex AI location root path for your dataset, model and endpoint resources PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION """ Explanation: Vertex AI constants Setup up the following constants for Vertex AI: API_ENDPOINT: The Vertex AI API service endpoint for dataset, model, job, pipeline and endpoint services. PARENT: The Vertex AI location root path for dataset, model and endpoint resources. End of explanation """ # Video Dataset type VIDEO_SCHEMA = "google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" # Video Labeling type IMPORT_SCHEMA_VIDEO_CLASSIFICATION = "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_classification_io_format_1.0.0.yaml" # Video Training task TRAINING_VIDEO_CLASSIFICATION_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_classification_1.0.0.yaml" """ Explanation: AutoML constants Next, setup constants unique to AutoML video classification datasets and training: Dataset Schemas: Tells the managed dataset service which type of dataset it is. Data Labeling (Annotations) Schemas: Tells the managed dataset service how the data is labeled (annotated). Dataset Training Schemas: Tells the Vertex AI Pipelines service the task (e.g., classification) to train the model for. End of explanation """ # client options same for all services client_options = {"api_endpoint": API_ENDPOINT} def create_dataset_client(): client = aip.DatasetServiceClient(client_options=client_options) return client def create_model_client(): client = aip.ModelServiceClient(client_options=client_options) return client def create_pipeline_client(): client = aip.PipelineServiceClient(client_options=client_options) return client def create_endpoint_client(): client = aip.EndpointServiceClient(client_options=client_options) return client def create_prediction_client(): client = aip.PredictionServiceClient(client_options=client_options) return client def create_job_client(): client = aip.JobServiceClient(client_options=client_options) return client clients = {} clients["dataset"] = create_dataset_client() clients["model"] = create_model_client() clients["pipeline"] = create_pipeline_client() clients["endpoint"] = create_endpoint_client() clients["prediction"] = create_prediction_client() clients["job"] = create_job_client() for client in clients.items(): print(client) IMPORT_FILE = "gs://automl-video-demo-data/hmdb_split1_5classes_train_inf.csv" ! gsutil cat $IMPORT_FILE | head -n 10 """ Explanation: Clients The Vertex SDK works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the server (Vertex). You will use several clients in this tutorial, so set them all up upfront. Dataset Service for managed datasets. Model Service for managed models. Pipeline Service for training. Endpoint Service for deployment. Job Service for batch jobs and custom training. Prediction Service for serving. Note: Prediction has a different service endpoint. End of explanation """ DATA_SCHEMA = VIDEO_SCHEMA dataset = { "display_name": "hmdb_" + TIMESTAMP, "metadata_schema_uri": "gs://" + DATA_SCHEMA, } print( MessageToJson( aip.CreateDatasetRequest(parent=PARENT, dataset=dataset).__dict__["_pb"] ) ) """ Explanation: Example output: gs://automl-video-demo-data/hmdb51/_Rad_Schlag_die_Bank__cartwheel_f_cm_np1_le_med_0.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_ba_bad_8.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_fr_bad_3.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_fr_bad_4.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_fr_bad_5.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Bayer__Meisterin_Teresa_Stadler_cartwheel_f_cm_np1_le_med_0.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Bayer__Meisterin_Teresa_Stadler_cartwheel_f_cm_np1_le_med_2.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Boden_bung_Spoho_Eignungspr_fung_cartwheel_f_cm_np1_ri_med_2.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Bodenturnen_2004_cartwheel_f_cm_np1_le_med_0.avi,cartwheel,0.0,inf gs://automl-video-demo-data/hmdb51/Bodenturnen_2004_cartwheel_f_cm_np1_le_med_4.avi,cartwheel,0.0,inf Create a dataset projects.locations.datasets.create Request End of explanation """ request = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset) """ Explanation: Example output: { "parent": "projects/migration-ucaip-training/locations/us-central1", "dataset": { "displayName": "hmdb_20210228191029", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml" } } Call End of explanation """ result = request.result() print(MessageToJson(result.__dict__["_pb"])) """ Explanation: Response End of explanation """ # The full unique ID for the dataset dataset_id = result.name # The short numeric ID for the dataset dataset_short_id = dataset_id.split("/")[-1] print(dataset_id) # Saved for clean up dataset = {"name": dataset_id} """ Explanation: Example output: { "name": "projects/116273516712/locations/us-central1/datasets/7952037527982964736", "displayName": "hmdb_20210228191029", "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml", "labels": { "aiplatform.googleapis.com/dataset_metadata_schema": "VIDEO" }, "metadata": { "dataItemSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/dataitem/video_1.0.0.yaml" } } End of explanation """ LABEL_SCHEMA = IMPORT_SCHEMA_VIDEO_CLASSIFICATION import_config = { "gcs_source": {"uris": [IMPORT_FILE]}, "import_schema_uri": LABEL_SCHEMA, } print( MessageToJson( aip.ImportDataRequest( name=dataset_short_id, import_configs=[import_config] ).__dict__["_pb"] ) ) """ Explanation: projects.locations.datasets.import Request End of explanation """ request = clients["dataset"].import_data( name=dataset_id, import_configs=[import_config] ) """ Explanation: Example output: { "name": "7952037527982964736", "importConfigs": [ { "gcsSource": { "uris": [ "gs://automl-video-demo-data/hmdb_split1_5classes_train_inf.csv" ] }, "importSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/ioformat/video_classification_io_format_1.0.0.yaml" } ] } Call End of explanation """ result = request.result() print(MessageToJson(result.__dict__["_pb"])) """ Explanation: Response End of explanation """ TRAINING_SCHEMA = TRAINING_VIDEO_CLASSIFICATION_SCHEMA task = ParseDict({}, Value()) training_pipeline = { "display_name": "hmdb_" + TIMESTAMP, "input_data_config": {"dataset_id": dataset_short_id}, "model_to_upload": {"display_name": "hmdb_" + TIMESTAMP}, "training_task_definition": TRAINING_SCHEMA, "training_task_inputs": task, } print( MessageToJson( aip.CreateTrainingPipelineRequest( parent=PARENT, training_pipeline=training_pipeline ).__dict__["_pb"] ) ) """ Explanation: Example output: {} Train a model projects.locations.trainingPipelines.create Request End of explanation """ request = clients["pipeline"].create_training_pipeline( parent=PARENT, training_pipeline=training_pipeline ) """ Explanation: Example output: { "parent": "projects/migration-ucaip-training/locations/us-central1", "trainingPipeline": { "displayName": "hmdb_20210228191029", "inputDataConfig": { "datasetId": "7952037527982964736" }, "trainingTaskDefinition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_classification_1.0.0.yaml", "trainingTaskInputs": {}, "modelToUpload": { "displayName": "hmdb_20210228191029" } } } Call End of explanation """ print(MessageToJson(request.__dict__["_pb"])) """ Explanation: Response End of explanation """ # The full unique ID for the training pipeline training_pipeline_id = request.name # The short numeric ID for the training pipeline training_pipeline_short_id = training_pipeline_id.split("/")[-1] print(training_pipeline_id) """ Explanation: Example output: { "name": "projects/116273516712/locations/us-central1/trainingPipelines/3361945917925097472", "displayName": "hmdb_20210228191029", "inputDataConfig": { "datasetId": "7952037527982964736" }, "trainingTaskDefinition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_classification_1.0.0.yaml", "trainingTaskInputs": {}, "modelToUpload": { "displayName": "hmdb_20210228191029" }, "state": "PIPELINE_STATE_PENDING", "createTime": "2021-02-28T19:17:39.815377Z", "updateTime": "2021-02-28T19:17:39.815377Z" } End of explanation """ request = clients["pipeline"].get_training_pipeline(name=training_pipeline_id) """ Explanation: projects.locations.trainingPipelines.get Call End of explanation """ print(MessageToJson(request.__dict__["_pb"])) """ Explanation: Response End of explanation """ while True: response = clients["pipeline"].get_training_pipeline(name=training_pipeline_id) if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED: print("Training job has not completed:", response.state) model_to_deploy_name = None if response.state == aip.PipelineState.PIPELINE_STATE_FAILED: break else: model_id = response.model_to_upload.name print("Training Time:", response.end_time - response.start_time) break time.sleep(20) print(model_id) """ Explanation: Example output: { "name": "projects/116273516712/locations/us-central1/trainingPipelines/3361945917925097472", "displayName": "hmdb_20210228191029", "inputDataConfig": { "datasetId": "7952037527982964736" }, "trainingTaskDefinition": "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_classification_1.0.0.yaml", "trainingTaskInputs": {}, "modelToUpload": { "displayName": "hmdb_20210228191029" }, "state": "PIPELINE_STATE_RUNNING", "createTime": "2021-02-28T19:17:39.815377Z", "startTime": "2021-02-28T19:17:40.089331Z", "updateTime": "2021-02-28T19:17:40.089331Z" } End of explanation """ request = clients["model"].list_model_evaluations(parent=model_id) """ Explanation: Evaluate the model projects.locations.models.evaluations.list Call End of explanation """ model_evaluations = [json.loads(MessageToJson(mel.__dict__["_pb"])) for mel in request] print(json.dumps(model_evaluations, indent=2)) # The evaluation slice evaluation_slice = request.model_evaluations[0].name """ Explanation: Response End of explanation """ request = clients["model"].get_model_evaluation(name=evaluation_slice) """ Explanation: Example output ``` [ { "name": "projects/116273516712/locations/us-central1/models/5031242063400665088/evaluations/6719412425478635520", "metricsSchemaUri": "gs://google-cloud-aiplatform/schema/modelevaluation/classification_metrics_1.0.0.yaml", "metrics": { "confidenceMetrics": [ { "confidenceThreshold": 0.0891612, "precision": 0.2, "recall": 1.0, "f1Score": 0.33333334 }, { "recall": 1.0, "confidenceThreshold": 0.09073429, "precision": 0.20289855, "f1Score": 0.33734939 }, { "recall": 1.0, "f1Score": 0.34146342, "confidenceThreshold": 0.09176466, "precision": 0.20588236 }, # REMOVED FOR BREVITY { { "displayName": "pullup", "id": "2856417959264387072" }, { "displayName": "golf", "id": "5162260968478081024" }, { "displayName": "ride_horse", "id": "6315182473084928000" }, { "displayName": "cartwheel", "id": "7468103977691774976" } ] } }, "createTime": "2021-02-28T20:56:43.050002Z", "sliceDimensions": [ "annotationSpec" ] } ] ``` projects.locations.models.evaluations.get Call End of explanation """ print(MessageToJson(request.__dict__["_pb"])) """ Explanation: Response End of explanation """ test_items = ! gsutil cat $IMPORT_FILE | head -n2 cols = str(test_items[0]).split(",") test_item_1 = str(cols[0]) test_label_1 = str(cols[1]) print(test_item_1, test_label_1) cols = str(test_items[1]).split(",") test_item_2 = str(cols[0]) test_label_2 = str(cols[1]) print(test_item_2, test_label_2) """ Explanation: Example output: ``` { "name": "projects/116273516712/locations/us-central1/models/5031242063400665088/evaluations/6719412425478635520", "metricsSchemaUri": "gs://google-cloud-aiplatform/schema/modelevaluation/classification_metrics_1.0.0.yaml", "metrics": { "confusionMatrix": { "rows": [ [ 14.0, 0.0, 0.0, 0.0, 0.0 ], [ 0.0, 14.0, 0.0, 0.0, 0.0 ], [ 0.0, 0.0, 14.0, 0.0, 0.0 ], [ 0.0, 0.0, 0.0, 14.0, 0.0 ], [ 0.0, 0.0, 0.0, 0.0, 14.0 ] ], "annotationSpecs": [ { "displayName": "kick_ball", "id": "1703496454657540096" }, { "displayName": "pullup", "id": "2856417959264387072" }, { "displayName": "golf", "id": "5162260968478081024" }, { "displayName": "ride_horse", "id": "6315182473084928000" }, { "displayName": "cartwheel", "id": "7468103977691774976" } ] }, "confidenceMetrics": [ { "precision": 0.2, "recall": 1.0, "confidenceThreshold": 0.0891612, "f1Score": 0.33333334 }, { "recall": 1.0, "f1Score": 0.33734939, "confidenceThreshold": 0.09073429, "precision": 0.20289855 }, { "precision": 0.20588236, "f1Score": 0.34146342, "confidenceThreshold": 0.09176466, "recall": 1.0 }, { "confidenceThreshold": 0.09279072, "f1Score": 0.34739456, "precision": 0.2102102, "recall": 1.0 }, # REMOVED FOR BREVITY { "recall": 0.071428575, "f1Score": 0.13333334, "precision": 1.0, "confidenceThreshold": 0.6023364 }, { "f1Score": 0.055555556, "precision": 1.0, "confidenceThreshold": 0.6101756, "recall": 0.028571429 }, { "recall": 0.014285714, "precision": 1.0, "confidenceThreshold": 0.6113689, "f1Score": 0.028169014 } ], "auPrc": 1.0 }, "createTime": "2021-02-28T20:56:43.050002Z", "sliceDimensions": [ "annotationSpec" ] } ``` Make batch predictions Make a batch prediction file End of explanation """ import json import tensorflow as tf gcs_input_uri = "gs://" + BUCKET_NAME + "/test.jsonl" with tf.io.gfile.GFile(gcs_input_uri, "w") as f: data = { "content": test_item_1, "mimeType": "video/avi", "timeSegmentStart": "0.0s", "timeSegmentEnd": "inf", } f.write(json.dumps(data) + "\n") data = { "content": test_item_2, "mimeType": "video/avi", "timeSegmentStart": "0.0s", "timeSegmentEnd": "inf", } f.write(json.dumps(data) + "\n") print(gcs_input_uri) !gsutil cat $gcs_input_uri """ Explanation: Example output: gs://automl-video-demo-data/hmdb51/_Rad_Schlag_die_Bank__cartwheel_f_cm_np1_le_med_0.avi cartwheel gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_ba_bad_8.avi cartwheel Make the batch input file Let's now make a batch input file, which you store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each video. The dictionary contains the key/value pairs: content: The Cloud Storage path to the video. mimeType: The content type. In our example, it is an avi file. timeSegmentStart: The start timestamp in the video to do prediction on. Note, the timestamp must be specified as a string and followed by s (second), m (minute) or h (hour). timeSegmentEnd: The end timestamp in the video to do prediction on. End of explanation """ batch_prediction_job = { "display_name": "hmdb_" + TIMESTAMP, "model": model_id, "input_config": { "instances_format": "jsonl", "gcs_source": {"uris": [gcs_input_uri]}, }, "model_parameters": ParseDict( { "confidenceThreshold": 0.5, "maxPredictions": 2, "segmentClassification": True, "shotClassification": True, "oneSecIntervalClassification": True, }, Value(), ), "output_config": { "predictions_format": "jsonl", "gcs_destination": { "output_uri_prefix": "gs://" + f"{BUCKET_NAME}/batch_output/" }, }, "dedicated_resources": { "machine_spec": {"machine_type": "n1-standard-2", "accelerator_count": 0}, "starting_replica_count": 1, "max_replica_count": 1, }, } print( MessageToJson( aip.CreateBatchPredictionJobRequest( parent=PARENT, batch_prediction_job=batch_prediction_job ).__dict__["_pb"] ) ) """ Explanation: Example output: gs://migration-ucaip-trainingaip-20210228191029/test.jsonl {"content": "gs://automl-video-demo-data/hmdb51/_Rad_Schlag_die_Bank__cartwheel_f_cm_np1_le_med_0.avi", "mimeType": "video/avi", "timeSegmentStart": "0.0s", "timeSegmentEnd": "inf"} {"content": "gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_ba_bad_8.avi", "mimeType": "video/avi", "timeSegmentStart": "0.0s", "timeSegmentEnd": "inf"} projects.locations.batchPredictionJobs.create Request End of explanation """ request = clients["job"].create_batch_prediction_job( parent=PARENT, batch_prediction_job=batch_prediction_job ) """ Explanation: Example output: { "parent": "projects/migration-ucaip-training/locations/us-central1", "batchPredictionJob": { "displayName": "hmdb_20210228191029", "model": "projects/116273516712/locations/us-central1/models/5031242063400665088", "inputConfig": { "instancesFormat": "jsonl", "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210228191029/test.jsonl" ] } }, "modelParameters": { "segmentClassification": true, "maxPredictions": 2.0, "shotClassification": true, "confidenceThreshold": 0.5, "oneSecIntervalClassification": true }, "outputConfig": { "predictionsFormat": "jsonl", "gcsDestination": { "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210228191029/batch_output/" } }, "dedicatedResources": { "machineSpec": { "machineType": "n1-standard-2" }, "startingReplicaCount": 1, "maxReplicaCount": 1 } } } Call End of explanation """ print(MessageToJson(request.__dict__["_pb"])) """ Explanation: Response End of explanation """ # The fully qualified ID for the batch job batch_job_id = request.name # The short numeric ID for the batch job batch_job_short_id = batch_job_id.split("/")[-1] print(batch_job_id) """ Explanation: Example output: { "name": "projects/116273516712/locations/us-central1/batchPredictionJobs/5275975759557558272", "displayName": "hmdb_20210228191029", "model": "projects/116273516712/locations/us-central1/models/5031242063400665088", "inputConfig": { "instancesFormat": "jsonl", "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210228191029/test.jsonl" ] } }, "modelParameters": { "oneSecIntervalClassification": true, "confidenceThreshold": 0.5, "maxPredictions": 2.0, "shotClassification": true, "segmentClassification": true }, "outputConfig": { "predictionsFormat": "jsonl", "gcsDestination": { "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210228191029/batch_output/" } }, "state": "JOB_STATE_PENDING", "completionStats": { "incompleteCount": "-1" }, "createTime": "2021-02-28T22:47:21.875565Z", "updateTime": "2021-02-28T22:47:21.875565Z" } End of explanation """ request = clients["job"].get_batch_prediction_job(name=batch_job_id) """ Explanation: projects.locations.batchPredictionJobs.get Call End of explanation """ print(MessageToJson(request.__dict__["_pb"])) """ Explanation: Response End of explanation """ def get_latest_predictions(gcs_out_dir): """ Get the latest prediction subfolder using the timestamp in the subfolder name""" folders = !gsutil ls $gcs_out_dir latest = "" for folder in folders: subfolder = folder.split("/")[-2] if subfolder.startswith("prediction-"): if subfolder > latest: latest = folder[:-1] return latest while True: response = clients["job"].get_batch_prediction_job(name=batch_job_id) if response.state != aip.JobState.JOB_STATE_SUCCEEDED: print("The job has not completed:", response.state) if response.state == aip.JobState.JOB_STATE_FAILED: break else: folder = get_latest_predictions( response.output_config.gcs_destination.output_uri_prefix ) ! gsutil ls $folder/prediction*.jsonl ! gsutil cat $folder/prediction*.jsonl break time.sleep(60) """ Explanation: Example output: { "name": "projects/116273516712/locations/us-central1/batchPredictionJobs/5275975759557558272", "displayName": "hmdb_20210228191029", "model": "projects/116273516712/locations/us-central1/models/5031242063400665088", "inputConfig": { "instancesFormat": "jsonl", "gcsSource": { "uris": [ "gs://migration-ucaip-trainingaip-20210228191029/test.jsonl" ] } }, "modelParameters": { "oneSecIntervalClassification": true, "confidenceThreshold": 0.5, "shotClassification": true, "maxPredictions": 2.0, "segmentClassification": true }, "outputConfig": { "predictionsFormat": "jsonl", "gcsDestination": { "outputUriPrefix": "gs://migration-ucaip-trainingaip-20210228191029/batch_output/" } }, "state": "JOB_STATE_RUNNING", "completionStats": { "incompleteCount": "2" }, "createTime": "2021-02-28T22:47:21.875565Z", "startTime": "2021-02-28T22:47:22.041508Z", "updateTime": "2021-02-28T22:47:22.486289Z" } End of explanation """ delete_dataset = True delete_model = True delete_pipeline = True delete_batchjob = True delete_bucket = True # Delete the dataset using the Vertex AI fully qualified identifier for the dataset try: if delete_dataset: clients["dataset"].delete_dataset(name=dataset_id) except Exception as e: print(e) # Delete the model using the Vertex AI fully qualified identifier for the model try: if delete_model: clients["model"].delete_model(name=model_id) except Exception as e: print(e) # Delete the training pipeline using the Vertex AI fully qualified identifier for the training pipeline try: if delete_pipeline: clients["pipeline"].delete_training_pipeline(name=training_pipeline_id) except Exception as e: print(e) # Delete the batch job using the Vertex AI fully qualified identifier for the batch job try: if delete_batchjob: clients["job"].delete_batch_prediction_job(name=batch_job_id) except Exception as e: print(e) if delete_bucket and "BUCKET_NAME" in globals(): ! gsutil rm -r gs://$BUCKET_NAME """ Explanation: Example output: gs://migration-ucaip-trainingaip-20210228191029/batch_output/prediction-hmdb_20210228191029-2021-02-28T22:47:21.701608Z/predictions_00001.jsonl gs://migration-ucaip-trainingaip-20210228191029/batch_output/prediction-hmdb_20210228191029-2021-02-28T22:47:21.701608Z/predictions_00002.jsonl {"instance":{"content":"gs://automl-video-demo-data/hmdb51/Acrobacias_de_un_fenomeno_cartwheel_f_cm_np1_ba_bad_8.avi","mimeType":"video/avi","timeSegmentStart":"0.0s","timeSegmentEnd":"inf"},"prediction":[]} {"instance":{"content":"gs://automl-video-demo-data/hmdb51/_Rad_Schlag_die_Bank__cartwheel_f_cm_np1_le_med_0.avi","mimeType":"video/avi","timeSegmentStart":"0.0s","timeSegmentEnd":"inf"},"prediction":[{"id":"7468103977691774976","displayName":"cartwheel","type":"shot-classification","timeSegmentStart":"0.066666s","timeSegmentEnd":"0.226666s","confidence":0.5290586},{"id":"7468103977691774976","displayName":"cartwheel","type":"one-sec-interval-classification","timeSegmentStart":"1.346666s","timeSegmentEnd":"1.346666s","confidence":0.5290586},{"id":"7468103977691774976","displayName":"cartwheel","type":"segment-classification","timeSegmentStart":"0s","timeSegmentEnd":"2.766667s","confidence":0.52444863},{"id":"7468103977691774976","displayName":"cartwheel","type":"shot-classification","timeSegmentStart":"0.266666s","timeSegmentEnd":"2.226666s","confidence":0.51983875},{"id":"7468103977691774976","displayName":"cartwheel","type":"one-sec-interval-classification","timeSegmentStart":"1.586666s","timeSegmentEnd":"1.586666s","confidence":0.51983875}]} Cleaning up To clean up all GCP resources used in this project, you can delete the GCP project you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial. End of explanation """
jordan-melendez/buqeyemodel
docs/notebooks/truncation_recap.ipynb
mit
df0 = 0 Q = 0.33 # Must be 2d array, with orders spanning the last axis (columns) coeffs = np.array( [[1.0, 1.0, 1.0], # Set 1, orders 0, 1, 2 [1.0, 0.5, 0.1], # Set 2, orders 0, 1, 2 [1.0, 0.1, 0.1] # Set 3, orders 0, 1, 2 ] ) # The truncation model accepts *partial sums*, # i.e., order-by-order predictions y_n, not the coefficients! y1 = gm.partials(coeffs, ratio=Q) y_best = y1[:, -1] # Set up a scale-invariant truncation object test1 = gm.TruncationPointwise(df=df0) # Fit the model to data. Hyperparameters get updated here test1.fit(y=y1, ratio=Q) # Compute degree of belief (dob) intervals, aka credible intervals dob68 = lower68, upper68 = test1.interval(0.68, orders=2) pdf_heights68 = test1.pdf(lower68[:, None], orders=2) dob95 = lower95, upper95 = test1.interval(0.95, orders=2) pdf_heights95 = test1.pdf(lower95[:, None], orders=2) # Plot the results delta_k = np.linspace(-0.15, 0.15, 100) # Differences from the true y pdfs = test1.pdf(delta_k[:, None, None] + y1, orders=2) fig, axs = plt.subplots(1, len(coeffs), figsize=(7, 2.4)) for i, ax in enumerate(axs.ravel()): ax.plot(delta_k, pdfs[:, i]) # Plot dob intervals as vertical lines for dob in dob68-y_best: ax.vlines(dob[i], 0, pdf_heights68[i], linestyle='-.', lw=0.5) for dob in dob95-y_best: ax.vlines(dob[i], 0, pdf_heights95[i], linestyle='--', lw=0.5) # Format the plot as in Furnstahl et al. for comparison ax.set_title(r"$\mathrm{{pr}}(\Delta_2 \,\vert\, c=[{},\, {},\, {}])$".format(*coeffs[i])) ax.set_xlabel(r"$\Delta_2$") ax.set_ylim([0, 17]) ax.set_xlim([-0.13, 0.13]) ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(np.arange(0, 18, 2))) ax.yaxis.set_minor_locator(mpl.ticker.FixedLocator(np.arange(1, 19, 2))) ax.xaxis.set_minor_locator(mpl.ticker.FixedLocator(np.linspace(-0.15, 0.15, 31))) """ Explanation: Bayesian truncation errors in chiral effective field theory: A Pointwise Approach Here we reproduce, using the improved truncation error model based on conjugate priors, some of the results in the following papers (with some notational changes) Furnstahl et al., Quantifying Truncation Errors in Effective Field Theory Melendez et al., Bayesian truncation errors in chiral effective field theory: Nucleon-nucleon observables In these papers, the convergence pattern of nucleon-nucleon scattering observables, including the total and differential cross sections and a set of spin scattering observables, were studied to infer the effective field theory (EFT) truncation error. Given the $k$ lowest order EFTs, a sequence of observable calculations $\{y_0, ..., y_k\}$ can be computed for a generic observable $y$. It is assumed in the above papers that one can write the sum of all contributions as \begin{align} y = y_{\mathrm{ref}}\sum_{n=0}^\infty c_n Q^n \end{align} for iid observable coefficients $c_n$. Hence, the $k$ lowest orders can be conditioned upon to estimate the remaining higher orders, and the full summation. The expansion parameter $Q$ is considered a ratio of low- and high-energy scales, with the low energy scale a combination of the relative scattering momentum $p$ and pion mass $m_\pi$, and the high energy scale is $\Lambda_b$, also known as the breakdown scale. The specific parameterization is \begin{align} Q = \frac{m_\pi^n + p^n}{m_\pi^{n-1} + p^{n-1}} \frac{1}{\Lambda_b} \end{align} with $n=8$ used in Melendez et al. The hierarchical model from the above papers considered the following prior sets, and Melendez et al. focused on set C with $\bar c_< = 0.25$ and $\bar c_> = 10$. <!--- | Set | $pr(c_n | \bar c)$ | $pr(\bar c)$ | | :---: | :------------------: | :------------: | | A | $\frac{1}{2\bar c}\theta(\bar c - |c_n|)$ | $\frac{1}{\bar c\ln\bar c_> /\bar c_<}\theta(\bar c - \bar c_<) \theta(\bar c_> - \bar c)$ | | B | $\frac{1}{2\bar c}\theta(\bar c - |c_n|)$ | $\frac{1}{\sqrt{2\pi}\bar c \sigma} e^{-(\ln \bar c)^2/2\sigma^2}$ | | C | $\frac{1}{\sqrt{2\pi}\bar c} e^{-c_n^2/2\bar c}$ | $\frac{1}{\bar c\ln\bar c_> /\bar c_<}\theta(\bar c - \bar c_<) \theta(\bar c_> - \bar c)$ | ---> \begin{array}{ccc} \hline \mathrm{Set} & pr(c_n | \bar c) & pr(\bar c) \ \hline A & \frac{1}{2\bar c}\theta(\bar c - |c_n|) & \frac{1}{\bar c\ln\bar c_> /\bar c_<}\theta(\bar c - \bar c_<) \theta(\bar c_> - \bar c) \ B & \frac{1}{2\bar c}\theta(\bar c - |c_n|) & \frac{1}{\sqrt{2\pi}\bar c \sigma} e^{-(\ln \bar c)^2/2\sigma^2} \ C & \frac{1}{\sqrt{2\pi}\bar c} e^{-c_n^2/2\bar c} & \frac{1}{\bar c\ln\bar c_> /\bar c_<}\theta(\bar c - \bar c_<) \theta(\bar c_> - \bar c) \ \hline \end{array} This package instead employs a conjugate prior set, where the $c_n$ are Gaussian and an inverse $\chi^2$ is placed on $\bar c^2$, \begin{align} \bar c^2 \sim \chi^{-2}(\nu_0, \tau_0^2) \end{align} where $\nu_0$ and $\tau_0$ are the prior degrees of freedom and scale parameters, respectively. The inverse $\chi^2$ density is given by \begin{align} \chi^{-2}(z; \nu, \tau^2) = \frac{(\nu\tau^2/2)^{\nu/2}}{\Gamma(\nu/2)} z^{-\nu/2-1} e^{-\frac{\nu\tau^2}{2z}} \end{align} Here we compare the results of the very convenient conjugate formulation to the prior published results. Proof of Concept Many proof of concept tests were performed in Furnstahl et al. Here we reproduce some of those tests with this package. Since setting $\nu = 0$ is equivalent to prior set $C$ with $\bar c_< = 1/\bar c_> = 0$, the results should be identical in this case. The basic steps for using this uncorrelated model are Define a TruncationPointwise model object with hyperparameters $\nu$ and $\tau$ Use the fit method to update hyperparameters based on the order-by-order predictions $y_n$, expansion parameter $Q$, and reference scale $y_{\mathrm{ref}}$. Then call other methods, such as interval to get posteriors or degree of belief intervals for the truncation error. End of explanation """ Image('../images/Furnstahl_fig4.jpg', width=WIDE_IMG_WIDTH) """ Explanation: Compare the above figure with the blue curves from Fig. 4 in Furnstahl et al., reproduced below: End of explanation """ df_test = 0.6 tau_test = 0.8 a_test = df_test/2. b_test = df_test * tau_test**2 / 2 ig = stats.invgamma(a=a_test, scale=b_test) def cbar_sq_prior_melendez_etal(x, lower, upper): # pr(cbar**2) for set C return np.where((lower <= x) & (x <= upper), 1 / (np.log(upper / lower) * x), 0.) cbarsq = np.linspace(0.1**2, 1, 1000) prior_vals = cbar_sq_prior_melendez_etal(cbarsq, 0.25**2, 10**2) fig, ax = plt.subplots(figsize=(3.4, 3.4)) ax.plot(cbarsq, ig.pdf(cbarsq), label=r'$\chi^{-2}$') ax.plot(cbarsq, prior_vals, label='Set C') ax.set_xlabel(r"$\bar c^2$") ax.set_ylabel(r"$\mathrm{pr}(\bar c^2)$") ax.legend(); """ Explanation: The original paper states that Fig. 4 uses the "leading-omitted-term approximation" to the truncation error, but this is a misprint. Thus, the curves are indeed identical. This package allows any $\nu$ and $\tau^2$ to be chosen instead, each of which allow pdfs to be computed efficiently without any manual integration. NN Scattering Observables Although the total cross section was covered in Furnstahl et al., it and other observables were more extensively studied in Melendez et al. Here we will reproduce some of the key figures from Melendez et al. with a slightly altered prior on $\bar c$. Choose Hyperparameters First let's figure out the hyperparameters of the inverse $\chi^2$ distribution that best reproduce the "Set C" prior with $\bar c_> = 10$ and $\bar c_< = 0.25$, which was the most extensively used prior set of Melendez et al. Scipy has an inverse gamma distribution, which is equivalent to the inverse $\chi^2$ distribution, with hyperparameters defined by \begin{align} a & = \frac{\nu}{2} \ b & = \frac{\nu \tau^2}{2} \end{align} End of explanation """ # Constants: proton/neutron masses and hbar m_p = 938.27208 # MeV/c^2 m_n = 939.56541 # MeV/c^2 hbarc = 197.33 # Mev-fm def E_to_p(E_lab, interaction): """Return p in MeV. Parameters ---------- energy = float lab energy given in MeV. interaction = str {"pp", "nn", "np"} """ if interaction == "pp": m1, m2 = m_p, m_p if interaction == "nn": m1, m2 = m_n, m_n if interaction == "np": m1, m2 = m_n, m_p p_rel = np.sqrt( E_lab * m2**2 * (E_lab + 2 * m1) / ((m1 + m2)**2 + 2 * m2 * E_lab) ) return p_rel def Q_approx(E, Lambda_b, interaction, single_expansion=False): if single_expansion: m_pi = 0 else: m_pi = 138 # Set to 0 to just return p/Lambda_b # Interpolate to smooth the transition from m_pi to p n = 8 p = E_to_p(E, interaction) q = (m_pi**n + p**n) / (m_pi**(n-1) + p**(n-1)) / Lambda_b return q def ratio(X, Lambda_b): '''Assume energies are in the first column of X''' return Q_approx(X[:, 0], Lambda_b, interaction='np').ravel() """ Explanation: It looks like $\nu \approx 0.6$ and $\tau \approx 0.8$ work nicely! Let's see if we can reproduce old results now. Setup Observables are considered at many values of the kinematic parameters $E_{\mathrm{lab}}$ and $\theta$. The expansion parameter is assumed to vary in energy so we must provide a callable function rather than a constant as before. Thus we will first define some functions for computing the expansion parameter. End of explanation """ nn_url = 'https://github.com/buqeye/buqeyebox/blob/master/nn_scattering/scattering_observables_EKM_R-0p9fm.h5?raw=true' response = urllib.request.urlopen(nn_url) h5file = tables.open_file("nn_observables_eft.h5", driver="H5FD_CORE", driver_core_image=response.read(), driver_core_backing_store=0) SGT = h5file.get_node('/SGT').read() DSG = h5file.get_node('/DSG').read() AY = h5file.get_node('/PB').read() A = h5file.get_node('/A').read() D = h5file.get_node('/D').read() AXX = h5file.get_node('/AXX').read() AYY = h5file.get_node('/AYY').read() q_cm = h5file.get_node('/q_cm').read() t_lab = h5file.get_node('/t_lab').read() degrees = h5file.get_node('/degrees').read() q_cm *= hbarc nn_online_pot = 'pwa93' nn_online_url = 'https://github.com/buqeye/buqeyebox/blob/master/nn_scattering/NN-online-Observables.h5?raw=true' nno_response = urllib.request.urlopen(nn_online_url) nn_online_file = tables.open_file("nn_online_example.h5", driver="H5FD_CORE", driver_core_image=nno_response.read(), driver_core_backing_store=0) SGT_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/SGT').read() DSG_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/DSG').read()[:, :-1] AY_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/PB').read()[:, :-1] A_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/A').read()[:, :-1] D_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/D').read()[:, :-1] AXX_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/AXX').read()[:, :-1] AYY_nn_online = nn_online_file.get_node('/' + nn_online_pot + '/AYY').read()[:, :-1] SGT.shape """ Explanation: Now import the relevant data for each observable from a precomputed hdf5 file (this requires the h5py package). Here we use Epelbaum, Krebs, and Meißner's $R=0.9$ fm potential up to N4LO as the order-by-order predictions, and the pwa93 as experimental data. End of explanation """ sgt_ref = SGT[0] dsg_ref = DSG[-1] cmaps = [plt.get_cmap(name) for name in ['Oranges', 'Greens', 'Blues', 'Reds']] # markers = ['o', '^', 'v', 's'] markers = None dark_col = 0.9 medium_col = 0.5 light_col = 0.25 dark_colors = [cmap(dark_col) for cmap in cmaps] medium_colors = [cmap(medium_col) for cmap in cmaps] light_colors = [cmap(light_col) for cmap in cmaps] """ Explanation: The reference scale $y_{\mathrm{ref}}$ was chosen to be $y_0$ for the total and differential cross section, but it was argued that $y_{\mathrm{ref}} = 1$ was sufficient for the spin observables. Here we set up interpolators that could be used with any energy or $\theta$ value. End of explanation """ orders = np.array([0, 2, 3, 4, 5]) # Chiral EFT orders nn_excluded = [0] # Ignore these orders in prediction Lambdab = 600 sgt_ratio = Q_approx(t_lab, Lambdab, interaction='np') print(sgt_ratio.shape, sgt_ref.shape) sgt_truncation = gm.TruncationPointwise(df=0, scale=0, excluded=nn_excluded) sgt_truncation.fit(SGT.T, ratio=sgt_ratio, ref=sgt_ref, orders=orders) fig, ax = plt.subplots(figsize=(3.4, 3.4)) for i, (n, coeff) in enumerate(zip(orders[1:], sgt_truncation.coeffs_.T)): ax.plot(t_lab, coeff, label=r'$c_{n}$'.format(n=n), c=medium_colors[i]); ax.legend(); """ Explanation: Total Cross Section In Melendez et al., we produced plots of residuals with statistical error bands for various EFT regulators and also ran model checking tests. The residual is defined as \begin{align} y_{\mathrm{res}} \equiv y_{\mathrm{th}} - y_{\mathrm{exp}} \end{align} First let's define the values of energies to be used in the analysis and get the imported data End of explanation """ fig, axes = plt.subplots(2, 2, figsize=(3.4, 3.2), sharex=True, sharey=True) for i, n in enumerate(orders[1:]): # Compute truncation errors for all orders less than y_i, skipping i=0 sgt_truncation = gm.TruncationPointwise(df=df_test, scale=tau_test, excluded=nn_excluded) sgt_truncation.fit(SGT[:i+2].T, ratio=sgt_ratio, ref=sgt_ref, orders=orders[:i+2]) sgt_intervals = sgt_lower, sgt_upper = sgt_truncation.interval([0.68, 0.95]) - SGT_nn_online[:, None] # Plot lines and bands for j in range(i, 4): ax = axes.ravel()[j] ax.plot(t_lab, SGT[i+1] - SGT_nn_online, color=dark_colors[i], zorder=i-5) ax.fill_between(t_lab, sgt_lower[1, ..., i], sgt_upper[1, ..., i], facecolor=light_colors[i], zorder=i-5) ax.fill_between(t_lab, sgt_lower[0, ..., i], sgt_upper[0, ..., i], facecolor=medium_colors[i], zorder=i-5) # Format ax = axes.ravel()[i] ax.axhline(0, 0, 1, ls='--', c='k', lw=1) ax.set_ylim(-13, 2) ax.set_xlim(0, 350) ax.set_yticks([-12, -9, -6, -3, 0]) ax.xaxis.set_minor_locator(mpl.ticker.FixedLocator(np.linspace(50, 350, 4))) ax.yaxis.set_minor_locator(mpl.ticker.FixedLocator(np.arange(-10.5, 2, 1.5))) ax.tick_params(which='both', direction='in') ax.text(0.05, 0.05, 'N$^{}$LO'.format(i+1), transform=ax.transAxes) fig.tight_layout(h_pad=0.4, w_pad=0.2); """ Explanation: Let's start by reproducing the Fig. 7 from Melendez et al. We will again begin by fitting the model, and then calling interval to find the the truncation error bands. End of explanation """ Image('../images/SGT_residuals_R0p9.png', width=NARROW_IMG_WIDTH, format='png') """ Explanation: Compare the above figure with Fig. 7 from Melendez et al. End of explanation """ dsg_energy = 96 DSG_1d = np.squeeze(DSG[:, t_lab == dsg_energy]) dsg_ratio = Q_approx(dsg_energy, Lambdab, interaction='np') dsg_ref = DSG_1d[-1] DSG_nn_online_1d = np.squeeze(DSG_nn_online[t_lab == dsg_energy]) print(DSG_1d.shape, DSG_nn_online_1d.shape) """ Explanation: Differential Cross Section Now set up differential cross section data End of explanation """ fig, axes = plt.subplots(2, 2, figsize=(3.4, 3.2), sharex=True, sharey=True) for i, n in enumerate(orders[1:]): # Compute truncation errors for all orders less than y_i, skipping i=0 dsg_truncation = gm.TruncationPointwise(df=df_test, scale=tau_test, excluded=nn_excluded) dsg_truncation.fit(DSG_1d[:i+2].T, ratio=dsg_ratio, ref=dsg_ref, orders=orders[:i+2]) dsg_intervals = dsg_lower, dsg_upper = dsg_truncation.interval([0.68, 0.95]) - DSG_nn_online_1d[:, None] # Plot lines and bands for j in range(i, 4): ax = axes.ravel()[j] ax.plot(degrees, DSG_1d[i+1] - DSG_nn_online_1d, color=dark_colors[i], zorder=i-5) ax.fill_between(degrees, dsg_lower[1, ..., i], dsg_upper[1, ..., i], facecolor=light_colors[i], zorder=i-5) ax.fill_between(degrees, dsg_lower[0, ..., i], dsg_upper[0, ..., i], facecolor=medium_colors[i], zorder=i-5) # Format ax = axes.ravel()[i] ax.set_ylim([-5, 2]) ax.set_xlim([1, 179]) ax.set_yticks([-4.5, -3.0, -1.5, 0.0, 1.5]) ax.set_xticks([60, 120]) ax.axhline(0, ls='--', c='k', lw=1) ax.xaxis.set_minor_locator(mpl.ticker.FixedLocator(np.arange(0, 180, 20))) ax.yaxis.set_minor_locator(mpl.ticker.FixedLocator(np.arange(-3.75, 2, 1.5))) ax.text(0.05, 0.05, 'N$^{}$LO'.format(i+1), transform=ax.transAxes) ax.tick_params(which='both', direction='in') axes[1, 0].set_xlabel(r'$\theta$ (deg)') axes[1, 1].set_xlabel(r'$\theta$ (deg)') fig.tight_layout(h_pad=0.4, w_pad=0.3); """ Explanation: Below we repeat the analysis for the differential cross section End of explanation """ Image('../images/DSG_residuals_R0p9.png', format='png', width=NARROW_IMG_WIDTH) """ Explanation: Compare the above figure with Fig. 8 from Melendez et al.: End of explanation """ t_lab_consistency = np.arange(20, 341, 20) ratio_consistency = Q_approx(t_lab_consistency, Lambdab, interaction='np') SGT_consistency = SGT[:, np.isin(t_lab, t_lab_consistency)] sgt_ref_consistency = SGT_consistency[0] N = len(t_lab_consistency) band_dobs = np.linspace(0.001, 1, 100) dobs = np.arange(0.1, 1, 0.1) beta = False fig = plt.figure(figsize=(3.4, 3.4)) ax = fig.add_subplot(111) ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.plot([0, 1], [0, 1], c='k') ax.set_xlabel('DoB') ax.set_ylabel(r'Success Rate, $N={}$'.format(N)) ax.set_title('Total Cross Section Consistency Plot') consistency_markers = ['s', 'D', 'o'] for i in range(3): sgt_cons = gm.TruncationPointwise(df=df_test, scale=tau_test, excluded=nn_excluded) idx = i+2 sgt_cons.fit(SGT_consistency[:idx].T, ratio=ratio_consistency, ref=sgt_ref_consistency, orders=orders[:idx]) D_CI, bands = sgt_cons.credible_diagnostic( data=SGT_consistency[idx], dobs=dobs, band_intervals=[0.68, 0.95, 0.99], band_dobs=band_dobs, beta=beta) ax.plot(dobs, D_CI[:, -1], c=medium_colors[i], marker=consistency_markers[i], markeredgecolor='k', markeredgewidth=0.5, markersize=8, label='N$^{}$LO'.format(idx-1)) # Make gray error bands if not beta: ax.fill_between(band_dobs, bands[0, 0], bands[0, 1], color='gray', alpha=0.25) ax.fill_between(band_dobs, bands[1, 0], bands[1, 1], color='gray', alpha=0.25) else: ax.fill_betweenx(band_dobs, bands[0, 0], bands[0, 1], color='gray', alpha=0.25) ax.fill_betweenx(band_dobs, bands[1, 0], bands[1, 1], color='gray', alpha=0.25) ax.legend(); """ Explanation: Model Checking Two model checking diagnostics were employed in Melendez et al., consistency plots and $\Lambda_b$ posteriors. Consistency plots check whether a $100\alpha\%$ degree of belief interval contains true value of the truncation error approximately $100\alpha\%$ of the time. Validation data can be checked in this manner using the credible_diagnostic method, which computes the average number of points contained in a given credible interval. The data should lie close to the diagonal in the plot below. The 68% and 95% gray uncertainty bands allow for deviations from the diagonal due to the finite set of data. End of explanation """ Image('../images/sgt_consistency.png', format='png', width=NARROW_IMG_WIDTH) """ Explanation: Again, compare this with Fig. 12 of Melendez et al.: End of explanation """ def Lb_logprior(Lambda_b): """Melendez et al., Eq. (31)""" return np.where((300 <= Lambda_b) & (Lambda_b <= 1500), np.log(1. / Lambda_b), -np.inf) def compute_posterior_intervals(model, data, ratios, ref, orders, max_idx, logprior, Lb): model.fit(data[:max_idx+1].T, ratio=ratios[0], ref=ref, orders=orders[:max_idx+1]) log_like = np.array([model.log_likelihood(ratio=ratio) for ratio in ratios]) log_like += logprior posterior = np.exp(log_like - np.max(log_like)) posterior /= np.trapz(posterior, x=Lb) # Normalize bounds = np.zeros((2,2)) for i, p in enumerate([0.68, 0.95]): bounds[i] = gm.hpd_pdf(pdf=posterior, alpha=p, x=Lb) median = gm.median_pdf(pdf=posterior, x=Lb) return posterior, bounds, median def draw_summary_statistics(bounds68, bounds95, median, height=0, ax=None): if ax is None: ax = plt.gca() ax.plot(bounds68, [height, height], c='darkgrey', lw=6, solid_capstyle='round') ax.plot(bounds95, [height, height], c='darkgrey', lw=2, solid_capstyle='round') ax.plot([median], [height], c='white', marker='o', zorder=10, markersize=3) t_lab_Lb = np.array([96, 143, 200, 300]) degrees_Lb = np.array([60, 120]) X_Lb = gm.cartesian(t_lab_Lb, degrees_Lb) Lb_colors = light_colors[2:] Lambda_b_array = np.arange(1, 1501, 1) # scale invariant: df = 0 Lb_model = gm.TruncationPointwise(df=0, excluded=nn_excluded) ratios_sgt_Lb = [Q_approx(t_lab_Lb, Lb, interaction='np') for Lb in Lambda_b_array] ratios_dsg_Lb = [Q_approx(X_Lb[:, 0], Lb, interaction='np') for Lb in Lambda_b_array] logprior = Lb_logprior(Lambda_b_array) # Mask unused SGT data, and compute results sgt_Lb = SGT[:, np.isin(t_lab, t_lab_Lb)] sgt_Lb_n3lo_result = compute_posterior_intervals( Lb_model, sgt_Lb, ratios_sgt_Lb, ref=sgt_Lb[0], orders=orders, max_idx=3, logprior=logprior, Lb=Lambda_b_array) sgt_Lb_n4lo_result = compute_posterior_intervals( Lb_model, sgt_Lb, ratios_sgt_Lb, ref=sgt_Lb[0], orders=orders, max_idx=4, logprior=logprior, Lb=Lambda_b_array) # Mask unused DSG data, and compute results dsg_Lb = np.reshape(DSG[:, np.isin(t_lab, t_lab_Lb)][..., np.isin(degrees, degrees_Lb)], (len(orders), -1)) dsg_Lb_n3lo_result = compute_posterior_intervals( Lb_model, dsg_Lb, ratios_dsg_Lb, ref=dsg_Lb[0], orders=orders, max_idx=3, logprior=logprior, Lb=Lambda_b_array) dsg_Lb_n4lo_result = compute_posterior_intervals( Lb_model, dsg_Lb, ratios_dsg_Lb, ref=dsg_Lb[0], orders=orders, max_idx=4, logprior=logprior, Lb=Lambda_b_array) # Concatenate all spin observable data into one long vector, and compute results spins_Lb = np.concatenate([ np.reshape(spin[:, np.isin(t_lab, t_lab_Lb)][..., np.isin(degrees, degrees_Lb)], (len(orders), -1)) for spin in [AY, D, A, AXX, AYY]], axis=1) ratios_spins_Lb = np.concatenate([ratios_dsg_Lb for i in [AY, D, A, AXX, AYY]], axis=1) spins_Lb_n3lo_result = compute_posterior_intervals( Lb_model, spins_Lb, ratios_spins_Lb, ref=1, orders=orders, max_idx=3, logprior=logprior, Lb=Lambda_b_array) spins_Lb_n4lo_result = compute_posterior_intervals( Lb_model, spins_Lb, ratios_spins_Lb, ref=1, orders=orders, max_idx=4, logprior=logprior, Lb=Lambda_b_array) # Gather the above results results = [ sgt_Lb_n3lo_result, sgt_Lb_n4lo_result, dsg_Lb_n3lo_result, dsg_Lb_n4lo_result, spins_Lb_n3lo_result, spins_Lb_n4lo_result ] # Plot each posterior and its summary statistics fig, ax = plt.subplots(1, 1, figsize=(3.4, 3.4)) for i, (posterior, bounds, median) in enumerate(results): posterior = posterior / (1.2*np.max(posterior)) # Scale so they're all the same height # Make the lines taper off Lb_vals = Lambda_b_array[posterior > 1e-2] posterior = posterior[posterior > 1e-2] # Plot and fill posterior, and add summary statistics ax.plot(Lb_vals, posterior-i, c='darkgrey') ax.fill_between(Lb_vals, -i, posterior-i, facecolor=Lb_colors[i % 2]) draw_summary_statistics(*bounds, median, ax=ax, height=-i) # Plot formatting ax.set_yticks([-0, -2, -4]) ax.set_yticks([-1.1, -3.1], minor=True) ax.set_yticklabels([r'$\sigma$', r'$\displaystyle\frac{d\sigma}{d\Omega}$', r'$X_{pqik}$']) ax.tick_params(axis='both', which='both', direction='in') ax.tick_params(which='major', length=0) ax.tick_params(which='minor', length=7, right=True) ax.set_xlim(0, 1200) ax.set_xticks([0, 300, 600, 900, 1200]) ax.set_xlabel(r'$\Lambda_b$ (MeV)') ax.grid(axis='x') ax.set_axisbelow(True) """ Explanation: Each curve shows a slight discrepancy (only a few points) with Fig. 12, but that is expected since the priors are not identical between the figures. Now let's move on to pr($\Lambda_b | c$). The $\Lambda_b$ posteriors were computed with uninformative Set C prior, which corresponds exactly to $\nu_0 = 0$ for the inverse chi squared prior. Thus, the figures generated below should correspond exactly with those in the paper. Below, are some helper functions, including one to call the log_likelihood method, multiply by a prior, normalize the posterior, and extract summary statistics for the Lambda_b parameter. Since the posteriors can be quite skewed, a highest probability density interval is calculated using the hpd_pdf function, rather than a simple symmetric credible interval. End of explanation """ Image('../images/Lambdab_posteriors.png', format='png', width=NARROW_IMG_WIDTH) """ Explanation: Compare the above figure with Fig. 22 of Melendez et al.: End of explanation """
henchc/Rediscovering-Text-as-Data
07-Textual-Similarity/01-Textual-Similarity.ipynb
mit
!wget https://ndownloader.figshare.com/files/3686778 -P data/ %%capture !unzip data/3686778 -d data/ """ Explanation: Textual Similarity This notebook is designed to reproduce several findings from Andrew Piper's article "Novel Devotions: Conversional Reading, Computational Modeling, and the Modern Novel" (<i>New Literary History</i> 46.1 (2015), 63-98). See especially Fig 2 (p 72), Fig 4 (p 75), and Table 1 (p 79). Piper has made his research corpus of novels available here: https://figshare.com/articles/txtlab_Novel450/2062002 We'll download it with wget: End of explanation """ %matplotlib inline import numpy as np from datascience import * """ Explanation: Bag of Words (BoW) language model Today we'll see our first, admittedly primitive, computational model of language called "Bag of Words". This model was very popular in early text analysis, and continues to be used today. In fact, the models that have replaced it are still very difficult to actually interpret, giving the BoW approach a slight advantage if we want to understand why the model makes certain decisions. Getting into the model we'll have to revisit Term Frequency (think Counter). We'll then see the Document-Term Matrix (DTM), which we've discusssed briefly before. We'll have to normalize these counts if we want to compare. Then we'll look at the available Python libraries to streamline this process. Once we have our BoW model we can analyze it in a high-dimensional vector space, which gives us more insights into the similarities and clustering of different texts. We'll then get into Piper's analysis. We'll build our model from scratch with numpy and datascience libraries before we get into the higher level libraries: End of explanation """ with open('data/Augustine-Confessions.txt') as f: confessions = f.read() confessions """ Explanation: Let's read in Augustine's Confessions text: End of explanation """ confessions_list = confessions.split('\n'*6) len(confessions_list) """ Explanation: There should be 13 books, which are fortunately separated by six line breaks: End of explanation """ confessions_list[0] """ Explanation: Let's peek at the first: End of explanation """ import spacy nlp = spacy.load('en', parser=False) first_book = confessions_list[0] parsed = nlp(first_book) first_token_list = [token.text for token in parsed] first_token_list """ Explanation: Term Frequency Revisited We'll remember from last week, that while split might be a quick way to get tokens, it's not the most accurate because it doesn't separate punctuation and contractions. We'll use spacy again to get tokens. End of explanation """ from collections import Counter word_freq = Counter(first_token_list) word_freq.most_common(20) """ Explanation: Now we can use Counter to get the term frequency: End of explanation """ from sklearn.feature_extraction.text import CountVectorizer CountVectorizer? """ Explanation: Challenge Write some code to get the 20 most common words of the second book. How similar are they to those of the first book? Document-Term Matrix If we plan to compare word frequencies across texts, we could collate these Counter dictionaries for each book in Confessions. But we don't want to write all that code! There is an easy function that streamlines the process called CountVectorizer. We saw it in the first notebook with Moretti, but didn't really explain what it does. Let's look at the docstring: End of explanation """ cv = CountVectorizer() dtm = cv.fit_transform(confessions_list) dtm """ Explanation: Cool. So we'll create the CountVectorizer object, then transform it on our list of documents, here that would be the books in Augustine's Confessions. End of explanation """ # de-sparsify desparse = dtm.toarray() # create labels for columns word_list = cv.get_feature_names() # create a new Table dtm_tb = Table(word_list).with_rows(desparse) dtm_tb """ Explanation: What's this? A sparse matrix just means that some cells in the table don't have value. Why? Because the vocabulary base is not the same for all the books! Let's try to demonstrate this. End of explanation """ dtm_tb['read'] """ Explanation: Welcome to the Document Term Matrix. This is a core concept in NLP and text analysis. It's not that complicated! We have columns for each word in the entire corpus. Then each row is for each document. In our case, that's books in Confessions. The values are the word count for that word in the corresponding document. Note that there are many 0s, that word just doesn't show up in that document! We can call up frequencies for a given word for each chapter easily, since they are the column names: End of explanation """ len(dtm_tb['read']) """ Explanation: Looks to be about 13 counts, one for each book, let's double check! End of explanation """ toks_tab = Table() toks_tab.append_column(label="Word List", values=word_list) toks_tab.append_column(label="Frequency", values=sum(desparse)) # this sum(desparse) will sum the word count column toks_tab.show() """ Explanation: Normalization Piper notes: The words were thus normalized according to their relative importance within the work. [95] Let's get the total number of occurences of each word in the whole text. The key to the code below is sum(desparse), which sums the column for all the books in our matrix: End of explanation """ row_sums = np.sum(desparse, axis=1) normed = desparse/row_sums[:,None] dtm_tb = Table(word_list).with_rows(normed) dtm_tb """ Explanation: Cool, but we already know how to do this much faster with Counter. Let's take this another step further. In order to make apples-to-apples comparisons across Books, we can normalize our values by dividing each word count by the total number of words in its Book. To do that, we'll need to sum on axis=1, which means summing the row (number of words in that book), as opposed to summing the column. Once we have the total number of words in that Book, we can get the percentage of words that one particular word accounts for, and we can do that for every word across the matrix! End of explanation """ dtm_tb['abandoned'] """ Explanation: Reading the matrix above, we see that the word "abandoned" accounts for .0145406% of words in Book 1, and .0277855% of words in Book 2. We can still grab out the normalized frequencies of the word 'read' for each book: End of explanation """ from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS ENGLISH_STOP_WORDS """ Explanation: For a variety of reasons we like to remove words like "the", "of", "and", etc. These are refered to as 'stopwords.' As Piper notes in footnote 24: I removed stop words and only kept those words that appeared in at least sixty percent of the documents (twelve of the twenty parts). [95] End of explanation """ ye_olde_stop_words = ['thou','thy','thee', 'thine', 'ye', 'hath','hast', 'wilt','aught',\ 'art', 'dost','doth', 'shall', 'shalt','tis','canst','thyself',\ 'didst', 'yea', 'wert'] stop_words = list(ENGLISH_STOP_WORDS) + ye_olde_stop_words # remove stopwords from column list dtm_tb = dtm_tb.drop(stop_words) # it is often more efficient to perform operations on arrays rather than tables dtm_array = dtm_tb.to_array() dtm_array """ Explanation: Since we are using an older translation of Augustine, we have to remove archaic forms of these stopwords as well. End of explanation """ from sklearn.feature_extraction.text import TfidfTransformer cv = CountVectorizer(stop_words = stop_words) dtm = cv.fit_transform(confessions_list) tt = TfidfTransformer(norm='l1',use_idf=False) dtm_tf = tt.fit_transform(dtm) word_list = cv.get_feature_names() dtm_array = dtm_tf.toarray() """ Explanation: Question In the script above, we normalized term frequencies before removing stopwords. However, it would have been just as easy to do those steps in the opposite order. Are there situations where this decision has more or less of an impact on the output? Note: Generally stopwords are removed before counting term frequencies and normalization. Streamlining That was a lot of work, if this is such a common task hasn't someone streamlined this? In fact, we can simply instruct CountVectorizer not to include stopwords at all and another function, TfidfTransformer, normalizes easily. End of explanation """ Table(word_list).with_rows(dtm_array) """ Explanation: Note: If you are processing a text that uses only contemporary English, it may be unnecessary to import the list of stopwords explicitly. Simply pass the value "english" into the "stop_words" argument in CountVectorizer. End of explanation """ dtm_array = dtm_tf.toarray() dtm_array """ Explanation: Vector Space Model of Language My question was: how does a vocabulary that runs throughout the majority of a work change over the course of that work? I then calculated the Euclidean distance between each of the twenty parts of the work based on the frequency of the remaining words and stored those results in a symmetrical distance table. [95] Great, now we have a matrix with normalized frequencies of all the words in the entire corpus. Right now our corpus is just all the books in Augustine's Confessions. Let's move away from the table and just create a list of 13 vectors with only the normalized frequency values, one for each Book. End of explanation """ dtm_array[0] """ Explanation: Each vector has a number of coordinates equal to the number of unique words in the corpus. Let's just take Book 1: End of explanation """ a = (2,6) b = (5,10) euc_dist = np.sqrt( (a[0]-b[0])**2 + (a[1]-b[1])**2 ) euc_dist """ Explanation: One way to measure the similarity of texts, which Piper uses in his article, would be to measure the Euclidean distance between their coordinates in space. According to Wikipedia: The Euclidean distance or Euclidean metric is the "ordinary" straight-line distance between two points in Euclidean space $\mathrm{d}(\mathbf{b},\mathbf{a})=\sqrt{(a_1-b_1)^2 + (a_2-b_2)^2}$ Let's consider a simple 2 dimensional model. We have two point in space: End of explanation """ import matplotlib.pyplot as plt %matplotlib inline plt.scatter([a[0], b[0]], [a[1], b[1]]) plt.plot([a[0], b[0]], [a[1], b[1]]) plt.show() """ Explanation: We can visualize this too: End of explanation """ a = (2,6,15) b = (5,10,3) euc_dist = np.sqrt( (a[0]-b[0])**2 + (a[1]-b[1])**2 + (a[2]-b[2])**2 ) euc_dist import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D mpl.rcParams['legend.fontsize'] = 10 fig = plt.figure() ax = fig.gca(projection='3d') ax.scatter([a[0], b[0]], [a[1], b[1]], [a[2], b[2]]) ax.plot([a[0], b[0]], [a[1], b[1]], [a[2], b[2]]) plt.show() """ Explanation: We can think of this 2 dimensional distance between 2 points as looking at 2 different texts. In this very simple 2-d model though, we only have 2 words in the entire corpus! (2,6) and (5,10) would be the absolute counts for each text. Imagine: ``` Document 1: the dog the dog dog dog dog dog Document 2: the dog the dog the dog the dog the dog dog dog dog dog dog ``` That would yield the comparison above. If we added a third point (document), we could see which 2 documents were closest to one another! Ok, not too bad, but how do we do this with hundreds or thousands of dimensions (words) acorss hundreds or thousands of points (documents)? Well it actually scales the same way! Here it is for 3 dimensions: $\mathrm{d}(\mathbf{b},\mathbf{a})=\sqrt{(a_1-b_1)^2 + (a_2-b_2)^2 + (a_3-b_3)^2}$ End of explanation """ from scipy.spatial import distance distance.euclidean(a,b) """ Explanation: We don't have to use our cool formula to calculate this, or to scale it up for n dimensions. That's what scipy is for: End of explanation """ mpl.rcParams['legend.fontsize'] = 10 origin = (0,0,0) fig = plt.figure() ax = fig.gca(projection='3d') ax.scatter([a[0], b[0], origin[0]], [a[1], b[1], origin[1]], [a[2], b[2], origin[2]]) ax.plot([origin[0], a[0]], [origin[1], a[1]], [origin[2], a[2]]) ax.plot([origin[0], b[0]], [origin[1], b[1]], [origin[2], b[2]]) plt.show() """ Explanation: Another measure of two vectors, more common for text analysis, is called cosine similarity. According to Wikipedia: Cosine similarity is a measure of similarity between two non-zero vectors of an inner product space that measures the cosine of the angle between them. The cosine of 0° is 1, and it is less than 1 for any other angle. It is thus a judgment of orientation and not magnitude: two vectors with the same orientation have a cosine similarity of 1, two vectors at 90° have a similarity of 0, and two vectors diametrically opposed have a similarity of -1, independent of their magnitude. $\text{similarity} = \cos(\theta) = {\mathbf{A} \cdot \mathbf{B} \over \|\mathbf{A}\|2 \|\mathbf{B}\|_2} = \frac{ \sum\limits{i=1}^{n}{A_i B_i} }{ \sqrt{\sum\limits_{i=1}^{n}{A_i^2}} \sqrt{\sum\limits_{i=1}^{n}{B_i^2}} }$ Essentially we want to take the cosine of the angle formed between two vectors (documents). We start the vector at the origin and measure the angle between the two vectors we're interested in. End of explanation """ a = (2,6) b = (5,10) # don't worry about the formula so much as the intuition behind it: angle between vectors cos_dist = 1 - (a[0]*b[0] + a[1]*b[1]) / ( np.sqrt(a[0]**2 + a[1]**2 ) * np.sqrt(b[0]**2 + b[1]**2 ) ) cos_dist """ Explanation: Let's go back to two dimensions for the vanilla numpy calculation: End of explanation """ distance.cosine(a,b) """ Explanation: Of course, scipy has taken care of this for us too: End of explanation """ a = (2,6,15) b = (5,10,3) distance.cosine(a,b) """ Explanation: For the 3-d model: End of explanation """ a = (2,6) b = (5,10) c = (14,11) print(distance.euclidean(a,b)) print(distance.euclidean(a,c)) print(distance.euclidean(b,c)) """ Explanation: Challenge Try passing different values into both the euclidean and cosine distance functions. What is your intuition about these different measurements? Remember that all values in the Term-Frequency Matrix are positive, between [0,1], and that most are very small. Visualizing Texts in Vector Space Let's walk through this now. Say we have 3 texts, a, b, and c. The whole corpus, again, only has 2 words (dimensions)! End of explanation """ point_matrix = np.array([a,b,c]) point_matrix """ Explanation: We'll make a matrix for the points: End of explanation """ from sklearn.metrics import pairwise pairwise.pairwise_distances(point_matrix, metric='euclidean') """ Explanation: Now we can use sklearn's pairwise_distances method to compare each book to each book: End of explanation """ dist_matrix = pairwise.pairwise_distances(dtm_tf, metric='euclidean') title_list = ['Book '+str(i+1) for i in range(len(confessions_list))] Table(title_list).with_rows(dist_matrix) """ Explanation: Cool! We got what we calculated. Note: the results are mirrored because the columns and rows are both the same texts. We can do the same thing on Augustine's Confessions, remember the rows are for each Book too!: End of explanation """ from sklearn.manifold import MDS mds = MDS(n_components = 2, dissimilarity="precomputed") embeddings = mds.fit_transform(dist_matrix) _, ax = plt.subplots(figsize=(10,10)) ax.scatter(embeddings[:,0], embeddings[:,1], alpha=0) for i in range(13): ax.annotate(i+1, ((embeddings[i,0], embeddings[i,1]))) """ Explanation: Visualizing hundreds of dimensions is difficult for us. So we can use multi-dimensional scaling (MDS) to put this into a 2-d graph for us: End of explanation """ from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=2) kmeans.fit_predict(dist_matrix) """ Explanation: Homework Try visualizing the textual similarities again using the Cosine distance. How does that change the result? Why? Brief Aside: K-Means Clustering Tries to find natural groupings among points, once we tell it how many groups to look for. End of explanation """ def text_splitter(text): n = int(len(text)/20) # get length n of each part text_list = [text[i*n:(i+1)*n] for i in range(20)] # slice out the text return(text_list) """ Explanation: A standard clustering test such as k-means indicates that the two clusters consist of Books 11–12, with Book 13 being grouped with Books 1–10.) [71] This array (length 13) classifies each book into the n_clusters we decide based on their vector similarities. We won't do much more clustering, but just know that it's an unsupervised machine learning algorithm to classify data. We have to choose how many classes (categories) there are, and the algorithm will decide in which bucket to place the observation. The Conversional Novel The first step was to divide each novel into twenty equal parts. Rather than rely on the irregularity of chapter divisions, which can vary within and between works, this process creates standard units of analysis. [95] Instead of actually using chapter divisions, Piper elects to split each novel into 20 equal parts. We can write a function text_splitter that will take in a str of the text and return a list of 20 equal parts: End of explanation """ def text_distances(text_list): from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.metrics import pairwise ye_olde_stop_words = ['thou','thy','thee', 'thine', 'ye', 'hath','hast', 'wilt','aught',\ 'art', 'dost','doth', 'shall', 'shalt','tis','canst','thyself',\ 'didst', 'yea', 'wert'] stop_words = list(ENGLISH_STOP_WORDS)+ye_olde_stop_words cv = CountVectorizer(stop_words = stop_words, min_df=0.6) dtm = cv.fit_transform(text_list) tt = TfidfTransformer(norm='l1',use_idf=False) dtm_tf = tt.fit_transform(dtm) dist_matrix = pairwise.pairwise_distances(dtm_tf, metric='euclidean') return(dist_matrix) """ Explanation: I then calculated the Euclidean distance between each of the twenty parts of the work based on the frequency of the remaining words and stored those results in a symmetrical distance table. In the end, for each work I had a 20x20 table of distances between every part of a work to every other, in which the distances are considered to be measures of the similarity of the language between a work’s individual parts. [95] Piper then calculates the Euclidean distances between each part to every other part. So we'll have to calculate the distance and use our pairwise method. We can write a function for that too! To make it better, let's have it take in a list of texts that our text_splitter will output: End of explanation """ def in_half_dist(matrix): n = len(matrix) # length of work, should be 20 d1 = [] # will hold distances for first half d2 = [] # will hold distances for second half for i in range(int(n/2)-1): # loop through first half of work (10 in our case) for j in range(i+1, int(n/2)): # loop through itself (first half again) d1.append(matrix[i,j]) # append distance between one part to another (in first half) for i in range(int(n/2), n-1): for j in range(i+1, n): d2.append(matrix[i,j]) return(abs(sum(d1)-sum(d2))/len(d1)) # take average of each distance array and subtract 2 from 1 """ Explanation: Piper the introduces two new ideas. for the in-half distance I took the average distance of each part in the first half of a work to every other part in that half and subtracted it from the average distance of every part of the second half to itself. [95] Let's write a function that does that, and have it take in our matrix returned by text_distances: End of explanation """ def cross_half_dist(matrix): n = len(matrix) # number of parts, here 20 d = [] # will hold distnaces for i in range(int(n/2)): # loop through first half for j in range(int(n/2), n): # loop through second half d.append(matrix[i,j]) # append distance between first and second return(sum(d)/len(d)) # take average """ Explanation: Great! And now for his second measure: For the cross-half distance, I took the average distance between all of the first ten parts of a work to all of the second ten parts of a work, similar to the process used in group average clustering. [95] Let's write another function: End of explanation """ def text_measures(text): text_list = text_splitter(text) dist_matrix = text_distances(text_list) return(cross_half_dist(dist_matrix), in_half_dist(dist_matrix)) """ Explanation: Awesome! We can also write ourselves a quick function to call the four functions we just wrote: End of explanation """ text_measures(confessions) """ Explanation: text_measures should now return two values. The first values is the cross_half_dist and the second values is the in_half_dist. Let's test this out on Augustine's `Confessions': End of explanation """ metadata_tb = Table.read_table('data/2_txtlab_Novel450.csv') metadata_tb.show(5) """ Explanation: Looks good! Now we can read in the corpus Piper used: End of explanation """ metadata_tb = metadata_tb.where('language', "English") metadata_tb.show(5) """ Explanation: We'll stick with English so we don't have to think about the possible issues of going between languages: End of explanation """ corpus_path = 'data/2_txtalb_Novel450/' def text_measures_alt(text_name): with open(corpus_path+text_name, 'r') as file_in: text = file_in.read() text_list = text_splitter(text) dist_matrix = text_distances(text_list) return(cross_half_dist(dist_matrix), in_half_dist(dist_matrix)) """ Explanation: We'll slightly change our text_measures function so that it can read in the file of the text we want to read in, instead of taking the confessions string we already had: End of explanation """ measures = metadata_tb.apply(text_measures_alt, 'filename') measures """ Explanation: Now we can use Table's apply method to call the function text_measures_alt on all the files in the corpus: End of explanation """ metadata_tb['Cross-Half'] = measures[:,0] metadata_tb['In-Half'] = measures[:,1] metadata_tb.show(5) """ Explanation: Let's add these measures to our Table: End of explanation """ def get_zscores(values): import numpy as np mn = np.mean(values) st = np.std(values) zs = [] for x in values: z = (x-mn)/st zs.append(z) return zs """ Explanation: If we want to see which novels stick out, we might be interested in the z-score for a particular novel. This is how many standard devations the novel is away from the mean. Let's write a function: End of explanation """ metadata_tb['Cross-Z-Score'] = get_zscores(measures[:,0]) metadata_tb['In-Z-Score'] = get_zscores(measures[:,1]) metadata_tb.show(5) """ Explanation: Now we can add these to the Table too: End of explanation """ metadata_tb.scatter('In-Half', 'Cross-Half') """ Explanation: Scatter plot, please! End of explanation """
chrinide/optunity
notebooks/basic-cross-validation.ipynb
bsd-3-clause
import optunity import optunity.cross_validation """ Explanation: Basic: cross-validation This notebook explores the main elements of Optunity's cross-validation facilities, including: standard cross-validation using strata and clusters while constructing folds using different aggregators We recommend perusing the <a href="http://docs.optunity.net/user/cross_validation.html">related documentation</a> for more details. Nested cross-validation is available as a separate notebook. End of explanation """ data = list(range(6)) labels = [True] * 3 + [False] * 3 """ Explanation: We start by generating some toy data containing 6 instances which we will partition into folds. End of explanation """ def f(x_train, y_train, x_test, y_test): print("") print("train data:\t" + str(x_train) + "\t train labels:\t" + str(y_train)) print("test data:\t" + str(x_test) + "\t test labels:\t" + str(y_test)) return 0.0 """ Explanation: Standard cross-validation <a id=standard></a> Each function to be decorated with cross-validation functionality must accept the following arguments: - x_train: training data - x_test: test data - y_train: training labels (required only when y is specified in the cross-validation decorator) - y_test: test labels (required only when y is specified in the cross-validation decorator) These arguments will be set implicitly by the cross-validation decorator to match the right folds. Any remaining arguments to the decorated function remain as free parameters that must be set later on. Lets start with the basics and look at Optunity's cross-validation in action. We use an objective function that simply prints out the train and test data in every split to see what's going on. End of explanation """ f_2folds = optunity.cross_validated(x=data, y=labels, num_folds=2)(f) print("using 2 folds") f_2folds() # f_2folds as defined above would typically be written using decorator syntax as follows # we don't do that in these examples so we can reuse the toy objective function @optunity.cross_validated(x=data, y=labels, num_folds=2) def f_2folds(x_train, y_train, x_test, y_test): print("") print("train data:\t" + str(x_train) + "\t train labels:\t" + str(y_train)) print("test data:\t" + str(x_test) + "\t test labels:\t" + str(y_test)) return 0.0 """ Explanation: We start with 2 folds, which leads to equally sized train and test partitions. End of explanation """ f_3folds = optunity.cross_validated(x=data, y=labels, num_folds=3)(f) print("using 3 folds") f_3folds() """ Explanation: If we use three folds instead of 2, we get 3 iterations in which the training set is twice the size of the test set. End of explanation """ f_2x3folds = optunity.cross_validated(x=data, y=labels, num_folds=3, num_iter=2)(f) print("using 2x3 folds") f_2x3folds() """ Explanation: If we do two iterations of 3-fold cross-validation (denoted by 2x3 fold), two sets of folds are generated and evaluated. End of explanation """ strata = [[0, 1], [2, 3]] f_stratified = optunity.cross_validated(x=data, y=labels, strata=strata, num_folds=3)(f) f_stratified() """ Explanation: Using strata and clusters<a id=strata-clusters></a> Strata are defined as sets of instances that should be spread out across folds as much as possible (e.g. stratify patients by age). Clusters are sets of instances that must be put in a single fold (e.g. cluster measurements of the same patient). Optunity allows you to specify strata and/or clusters that must be accounted for while construct cross-validation folds. Not all instances have to belong to a stratum or clusters. Strata We start by illustrating strata. Strata are specified as a list of lists of instances indices. Each list defines one stratum. We will reuse the toy data and objective function specified above. We will create 2 strata with 2 instances each. These instances will be spread across folds. We create two strata: ${0, 1}$ and ${2, 3}$. End of explanation """ clusters = [[0, 1], [2, 3]] f_clustered = optunity.cross_validated(x=data, y=labels, clusters=clusters, num_folds=3)(f) f_clustered() """ Explanation: Clusters Clusters work similarly, except that now instances within a cluster are guaranteed to be placed within a single fold. The way to specify clusters is identical to strata. We create two clusters: ${0, 1}$ and ${2, 3}$. These pairs will always occur in a single fold. End of explanation """ strata = [[0, 1, 2]] clusters = [[0, 3], [4, 5]] f_strata_clustered = optunity.cross_validated(x=data, y=labels, clusters=clusters, strata=strata, num_folds=3)(f) f_strata_clustered() """ Explanation: Strata and clusters Strata and clusters can be used together. Lets say we have the following configuration: 1 stratum: ${0, 1, 2}$ 2 clusters: ${0, 3}$, ${4, 5}$ In this particular example, instances 1 and 2 will inevitably end up in a single fold, even though they are part of one stratum. This happens because the total data set has size 6, and 4 instances are already in clusters. End of explanation """ @optunity.cross_validated(x=data, num_folds=3) def f(x_train, x_test): result = x_test[0] print(result) return result f(1) """ Explanation: Aggregators <a id=aggregators></a> Aggregators are used to combine the scores per fold into a single result. The default approach used in cross-validation is to take the mean of all scores. In some cases, we might be interested in worst-case or best-case performance, the spread, ... Opunity allows passing a custom callable to be used as aggregator. The default aggregation in Optunity is to compute the mean across folds. End of explanation """ @optunity.cross_validated(x=data, num_folds=3, aggregator=max) def fmax(x_train, x_test): result = x_test[0] print(result) return result fmax(1) @optunity.cross_validated(x=data, num_folds=3, aggregator=min) def fmin(x_train, x_test): result = x_test[0] print(result) return result fmin(1) """ Explanation: This can be replaced by any function, e.g. min or max. End of explanation """ @optunity.cross_validated(x=data, num_folds=3, aggregator=optunity.cross_validation.mean_and_list) def f_full(x_train, x_test, coeff): return x_test[0] * coeff # evaluate f mean_score, all_scores = f_full(1.0) print(mean_score) print(all_scores) """ Explanation: Retaining intermediate results Often, it may be useful to retain all intermediate results, not just the final aggregated data. This is made possible via optunity.cross_validation.mean_and_list aggregator. This aggregator computes the mean for internal use in cross-validation, but also returns a list of lists containing the full evaluation results. End of explanation """ opt_coeff, info, _ = optunity.minimize(f_full, coeff=[0, 1], num_evals=10) print(opt_coeff) print("call log") for args, val in zip(info.call_log['args']['coeff'], info.call_log['values']): print(str(args) + '\t\t' + str(val)) """ Explanation: Note that a cross-validation based on the mean_and_list aggregator essentially returns a tuple of results. If the result is iterable, all solvers in Optunity use the first element as the objective function value. You can let the cross-validation procedure return other useful statistics too, which you can access from the solver trace. End of explanation """ data = list(range(20)) labels = [1 if i%4==0 else 0 for i in range(20)] @optunity.cross_validated(x=data, y=labels, num_folds=5) def unbalanced_folds(x_train, y_train, x_test, y_test): print("") print("train data:\t" + str(x_train) + "\ntrain labels:\t" + str(y_train)) + '\n' print("test data:\t" + str(x_test) + "\ntest labels:\t" + str(y_test)) + '\n' return 0.0 unbalanced_folds() """ Explanation: Cross-validation with scikit-learn <a id=cv-sklearn></a> In this example we will show how to use cross-validation methods that are provided by scikit-learn in conjunction with Optunity. To do this we provide Optunity with the folds that scikit-learn produces in a specific format. In supervised learning datasets often have unbalanced labels. When performing cross-validation with unbalanced data it is good practice to preserve the percentage of samples for each class across folds. To achieve this label balance we will use <a href="http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedKFold.html">StratifiedKFold</a>. End of explanation """ from sklearn.cross_validation import StratifiedKFold stratified_5folds = StratifiedKFold(labels, n_folds=5) folds = [[list(test) for train, test in stratified_5folds]] @optunity.cross_validated(x=data, y=labels, folds=folds, num_folds=5) def balanced_folds(x_train, y_train, x_test, y_test): print("") print("train data:\t" + str(x_train) + "\ntrain labels:\t" + str(y_train)) + '\n' print("test data:\t" + str(x_test) + "\ntest labels:\t" + str(y_test)) + '\n' return 0.0 balanced_folds() """ Explanation: Notice above how the test label sets have a varying number of postive samples, some have none, some have one, and some have two. End of explanation """ data = list(range(6)) labels = [True] * 3 + [False] * 3 fold1 = [[0, 3], [1, 4], [2, 5]] fold2 = [[0, 5], [1, 4], [0, 3]] # notice what happens when the indices are not unique folds = [fold1, fold2] @optunity.cross_validated(x=data, y=labels, folds=folds, num_folds=3, num_iter=2) def multiple_iters(x_train, y_train, x_test, y_test): print("") print("train data:\t" + str(x_train) + "\t train labels:\t" + str(y_train)) print("test data:\t" + str(x_test) + "\t\t test labels:\t" + str(y_test)) return 0.0 multiple_iters() """ Explanation: Now all of our train sets have four positive samples and our test sets have one positive sample. To use predetermined folds, place a list of the test sample idices into a list. And then insert that list into another list. Why so many nested lists? Because you can perform multiple cross-validation runs by setting num_iter appropriately and then append num_iter lists of test samples to the outer most list. Note that the test samples for a given fold are the idicies that you provide and then the train samples for that fold are all of the indices from all other test sets joined together. If not done carefully this may lead to duplicated samples in a train set and also samples that fall in both train and test sets of a fold if a datapoint is in multiple folds' test sets. End of explanation """
NEONScience/NEON-Data-Skills
tutorials/Python/Hyperspectral/uncertainty-and-validation/hyperspectral_validation_py/hyperspectral_validation_py.ipynb
agpl-3.0
import h5py import csv import numpy as np import os import gdal import matplotlib.pyplot as plt import sys from math import floor import time import warnings warnings.filterwarnings('ignore') %matplotlib inline """ Explanation: syncID: 84457ead9b964c8d916eacde9f271ec7 title: "Assessing Spectrometer Accuracy using Validation Tarps with Python" description: "Learn to analyze the difference between rasters taken a few days apart to assess the uncertainty between days." dateCreated: 2017-06-21 authors: Tristan Goulden contributors: Donal O'Leary estimatedTime: 0.5 hour packagesLibraries: numpy, gdal, matplotlib topics: hyperspectral-remote-sensing, remote-sensing languagesTool: python dataProduct: code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/Hyperspectral/uncertainty-and-validation/hyperspectral_validation_py/hyperspectral_validation_py.ipynb tutorialSeries: rs-uncertainty-py-series urlTitle: hyperspectral-validation-py In this tutorial we will learn how to retrieve relfectance curves from a pre-specified coordainte in a NEON AOP HDF5 file, learn how to read a tab delimited text file, retrieve bad band window indexes and mask portions of a reflectance curve, plot reflectance curves on a graph and save the file, gain an understanding of some sources of uncertainty in NIS data. <div id="ds-objectives" markdown="1"> ### Objectives After completing this tutorial, you will be able to: * Retrieve relfectance curves from a pre-specified coordainte in a NEON AOP HDF5 file, * Read a tab delimited text file * Retrive bad band window indexes and mask portions of a reflectance curve * Plot reflectance curves on a graph and save the file * Explain some sources of uncertainty in NEON image spectrometry data. ### Install Python Packages * **numpy** * **pandas** * **gdal** * **matplotlib** * **h5py** * **IPython.display** ### Download Data To complete this tutorial, you will use data available from the NEON 2017 Data Institute. This tutorial uses the following files: <ul> <li>CHEQ_Tarp_03_02_refl_bavg.txt (9 KB)</li> <li>CHEQ_Tarp_48_01_refl_bavg.txt (9 KB)</li> <li>NEON_D05_CHEQ_DP1_20160912_160540_reflectance.h5 (2.7 GB)</li> </ul> Which may be downloaded <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fofeb6d6-9ebf-4310-814f-9ae4aea8fbd9" target="_blank">from our ShareFile directory here<a/>. <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fofeb6d6-9ebf-4310-814f-9ae4aea8fbd9" class="link--button link--arrow"> Download Dataset</a> The LiDAR and imagery data used to create this raster teaching data subset were collected over the <a href="http://www.neonscience.org/" target="_blank"> National Ecological Observatory Network's</a> <a href="http://www.neonscience.org/science-design/field-sites/" target="_blank" >field sites</a> and processed at NEON headquarters. The entire dataset can be accessed on the <a href="http://data.neonscience.org" target="_blank"> NEON data portal</a>. These data are a part of the NEON 2017 Remote Sensing Data Institute. The complete archive may be found here -<a href="https://neondata.sharefile.com/d-s11d5c8b9c53426db"> NEON Teaching Data Subset: Data Institute 2017 Data Set</a> ### Recommended prerequisites We recommend you complete the following tutorials prior to this tutorial to have the necessary background. 1. <a href="https://www.neonscience.org/neon-aop-hdf5-py"> *NEON AOP Hyperspectral Data in HDF5 format with Python*</a> 1. <a href="https://www.neonscience.org/neon-hsi-aop-functions-python"> *Band Stacking, RGB & False Color Images, and Interactive Widgets in Python*</a> 1. <a href="https://www.neonscience.org/plot-spec-sig-python/"> *Plot a Spectral Signature in Python*</a> </div> In this tutorial we will be examing the accuracy of the Neon Imaging Spectrometer (NIS) against targets with known reflectance. The targets consist of two 10 x 10 m tarps which have been specially designed to have 3% reflectance (black tarp) and 48% reflectance (white tarp) across all of the wavelengths collected by the NIS (see images below). During the Sept. 12 2016 flight over the Chequamegon-Nicolet National Forest, an area in D05 which is part of Steigerwaldt (STEI) site, these tarps were deployed in a gravel pit. During the airborne overflight, observations were also taken over the tarps with an ASD field spectrometer. The ASD measurments provide a validation source against the the airborne measurements. <figure class="half"> <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarps_close.jpg"> <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarps_close.jpg"> </a> <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarps_far.jpg"> <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarps_far.jpg"> </a> </figure> <figure> <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarps_aerial.jpg"> <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarps_aerial.jpg"></a> <figcaption> The validation tarps, 3% reflectance (black tarp) and 48% reflectance (white tarp), laid out in the field. Source: National Ecological Observatory Network (NEON) </figcaption> </figure> To test the accuracy, we will utilize reflectance curves from the tarps as well as from the associated flight line and execute absolute and relative comparisons. The major error sources in the NIS can be generally categorized into the following sources: Calibration of the sensor Quality of ortho-rectification Accuracy of radiative transfer code and subsequent ATCOR interpolation Selection of atmospheric input parameters Terrain relief Terrain cover Note that the manual for ATCOR, the atmospheric correction software used by AOP, specifies the accuracy of reflectance retrievals to be between 3 and 5% of total reflectance. The tarps are located in a flat area, therefore, influences by terrain releif should be minimal. We will ahve to keep the remining errors in mind as we analyze the data. Get Started We'll start by adding all of the necessary libraries to our python script. End of explanation """ def h5refl2array(h5_filename): hdf5_file = h5py.File(h5_filename,'r') #Get the site name file_attrs_string = str(list(hdf5_file.items())) file_attrs_string_split = file_attrs_string.split("'") sitename = file_attrs_string_split[1] refl = hdf5_file[sitename]['Reflectance'] reflArray = refl['Reflectance_Data'] refl_shape = reflArray.shape wavelengths = refl['Metadata']['Spectral_Data']['Wavelength'] #Create dictionary containing relevant metadata information metadata = {} metadata['shape'] = reflArray.shape metadata['mapInfo'] = refl['Metadata']['Coordinate_System']['Map_Info'] #Extract no data value & set no data value to NaN\n", metadata['scaleFactor'] = float(reflArray.attrs['Scale_Factor']) metadata['noDataVal'] = float(reflArray.attrs['Data_Ignore_Value']) metadata['bad_band_window1'] = (refl.attrs['Band_Window_1_Nanometers']) metadata['bad_band_window2'] = (refl.attrs['Band_Window_2_Nanometers']) metadata['projection'] = refl['Metadata']['Coordinate_System']['Proj4'].value metadata['EPSG'] = int(refl['Metadata']['Coordinate_System']['EPSG Code'].value) mapInfo = refl['Metadata']['Coordinate_System']['Map_Info'].value mapInfo_string = str(mapInfo); #print('Map Info:',mapInfo_string)\n", mapInfo_split = mapInfo_string.split(",") #Extract the resolution & convert to floating decimal number metadata['res'] = {} metadata['res']['pixelWidth'] = mapInfo_split[5] metadata['res']['pixelHeight'] = mapInfo_split[6] #Extract the upper left-hand corner coordinates from mapInfo\n", xMin = float(mapInfo_split[3]) #convert from string to floating point number\n", yMax = float(mapInfo_split[4]) #Calculate the xMax and yMin values from the dimensions\n", xMax = xMin + (refl_shape[1]*float(metadata['res']['pixelWidth'])) #xMax = left edge + (# of columns * resolution)\n", yMin = yMax - (refl_shape[0]*float(metadata['res']['pixelHeight'])) #yMin = top edge - (# of rows * resolution)\n", metadata['extent'] = (xMin,xMax,yMin,yMax), metadata['ext_dict'] = {} metadata['ext_dict']['xMin'] = xMin metadata['ext_dict']['xMax'] = xMax metadata['ext_dict']['yMin'] = yMin metadata['ext_dict']['yMax'] = yMax hdf5_file.close return reflArray, metadata, wavelengths """ Explanation: As well as our function to read the hdf5 reflectance files and associated metadata End of explanation """ print('Start CHEQ tarp uncertainty script') ## You will need to change these filepaths according to your own machine ## As you can see here, I saved the files downloaded above into my ~/Git/data/ directory h5_filename = '/Users/olearyd/Git/data/NEON_D05_CHEQ_DP1_20160912_160540_reflectance.h5' tarp_48_filename = '/Users/olearyd/Git/data/CHEQ_Tarp_48_01_refl_bavg.txt' tarp_03_filename = '/Users/olearyd/Git/data/CHEQ_Tarp_03_02_refl_bavg.txt' """ Explanation: Define the location where you are holding the data for the data institute. The h5_filename will be the flightline which contains the tarps, and the tarp_48_filename and tarp_03_filename contain the field validated spectra for the white and black tarp respectively, organized by wavelength and reflectance. End of explanation """ tarp_48_center = np.array([727487,5078970]) tarp_03_center = np.array([727497,5078970]) """ Explanation: We want to pull the spectra from the airborne data from the center of the tarp to minimize any errors introduced by infiltrating light in adjecent pixels, or through errors in ortho-rectification (source 2). We have pre-determined the coordinates for the center of each tarp which are as follows: 48% reflectance tarp UTMx: 727487, UTMy: 5078970 3% reflectance tarp UTMx: 727497, UTMy: 5078970 <figure> <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarp_centers.jpg"> <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/neon-aop/tarp_centers.jpg"></a> <figcaption> The validation tarps, 3% reflectance (black tarp) and 48% reflectance (white tarp), laid out in the field. Source: National Ecological Observatory Network (NEON) </figcaption> </figure> Let's define these coordaintes End of explanation """ [reflArray,metadata,wavelengths] = h5refl2array(h5_filename) """ Explanation: Now we'll use our function designed for NEON AOP's HDF5 files to access the hyperspectral data End of explanation """ bad_band_window1 = (metadata['bad_band_window1']) bad_band_window2 = (metadata['bad_band_window2']) index_bad_window1 = [i for i, x in enumerate(wavelengths) if x > bad_band_window1[0] and x < bad_band_window1[1]] index_bad_window2 = [i for i, x in enumerate(wavelengths) if x > bad_band_window2[0] and x < bad_band_window2[1]] """ Explanation: Within the reflectance curves there are areas with noisey data due to atmospheric windows in the water absorption bands. For this exercise we do not want to plot these areas as they obscure detailes in the plots due to their anamolous values. The meta data assocaited with these band locations is contained in the metadata gatherd by our function. We will pull out these areas as 'bad band windows' and determine which indexes in the reflectance curves contain the bad bands End of explanation """ index_bad_windows = index_bad_window1+index_bad_window2 """ Explanation: Now join the list of indexes together into a single variable End of explanation """ tarp_48_data = np.genfromtxt(tarp_48_filename, delimiter = '\t') tarp_03_data = np.genfromtxt(tarp_03_filename, delimiter = '\t') """ Explanation: The reflectance data is saved in files which are 'tab delimited.' We will use a numpy function (genfromtxt) to quickly import the tarp reflectance curves observed with the ASD using the '\t' delimeter to indicate tabs are used. End of explanation """ tarp_48_data[index_bad_windows] = np.nan tarp_03_data[index_bad_windows] = np.nan """ Explanation: Now we'll set all the data inside of those windows to NaNs (not a number) so they will not be included in the plots End of explanation """ x_tarp_48_index = int((tarp_48_center[0] - metadata['ext_dict']['xMin'])/float(metadata['res']['pixelWidth'])) y_tarp_48_index = int((metadata['ext_dict']['yMax'] - tarp_48_center[1])/float(metadata['res']['pixelHeight'])) x_tarp_03_index = int((tarp_03_center[0] - metadata['ext_dict']['xMin'])/float(metadata['res']['pixelWidth'])) y_tarp_03_index = int((metadata['ext_dict']['yMax'] - tarp_03_center[1])/float(metadata['res']['pixelHeight'])) """ Explanation: The next step is to determine which pixel in the reflectance data belongs to the center of each tarp. To do this, we will subtract the tarp center pixel location from the upper left corner pixels specified in the map info of the H5 file. This information is saved in the metadata dictionary output from our function that reads NEON AOP HDF5 files. The difference between these coordaintes gives us the x and y index of the reflectance curve. End of explanation """ plt.figure(1) tarp_48_reflectance = np.asarray(reflArray[y_tarp_48_index,x_tarp_48_index,:], dtype=np.float32)/metadata['scaleFactor'] tarp_48_reflectance[index_bad_windows] = np.nan plt.plot(wavelengths,tarp_48_reflectance,label = 'Airborne Reflectance') plt.plot(wavelengths,tarp_48_data[:,1], label = 'ASD Reflectance') plt.title('CHEQ 20160912 48% tarp') plt.xlabel('Wavelength (nm)'); plt.ylabel('Refelctance (%)') plt.legend() #plt.savefig('CHEQ_20160912_48_tarp.png',dpi=300,orientation='landscape',bbox_inches='tight',pad_inches=0.1) plt.figure(2) tarp_03_reflectance = np.asarray(reflArray[y_tarp_03_index,x_tarp_03_index,:], dtype=np.float32)/ metadata['scaleFactor'] tarp_03_reflectance[index_bad_windows] = np.nan plt.plot(wavelengths,tarp_03_reflectance,label = 'Airborne Reflectance') plt.plot(wavelengths,tarp_03_data[:,1],label = 'ASD Reflectance') plt.title('CHEQ 20160912 3% tarp') plt.xlabel('Wavelength (nm)'); plt.ylabel('Refelctance (%)') plt.legend() #plt.savefig('CHEQ_20160912_3_tarp.png',dpi=300,orientation='landscape',bbox_inches='tight',pad_inches=0.1) """ Explanation: Next, we will plot both the curve from the airborne data taken at the center of the tarps as well as the curves obtained from the ASD data to provide a visualisation of their consistency for both tarps. Once generated, we will also save the figure to a pre-determined location. End of explanation """ plt.figure(3) plt.plot(wavelengths,tarp_48_reflectance-tarp_48_data[:,1]) plt.title('CHEQ 20160912 48% tarp absolute difference') plt.xlabel('Wavelength (nm)'); plt.ylabel('Absolute Refelctance Difference (%)') #plt.savefig('CHEQ_20160912_48_tarp_absolute_diff.png',dpi=300,orientation='landscape',bbox_inches='tight',pad_inches=0.1) plt.figure(4) plt.plot(wavelengths,tarp_03_reflectance-tarp_03_data[:,1]) plt.title('CHEQ 20160912 3% tarp absolute difference') plt.xlabel('Wavelength (nm)'); plt.ylabel('Absolute Refelctance Difference (%)') #plt.savefig('CHEQ_20160912_3_tarp_absolute_diff.png',dpi=300,orientation='landscape',bbox_inches='tight',pad_inches=0.1) """ Explanation: This produces plots showing the results of the ASD and airborne measurements over the 48% tarp. Visually, the comparison between the two appears to be fairly good. However, over the 3% tarp we appear to be over-estimating the reflectance. Large absolute differences could be associated with ATCOR input parameters (source 4). For example, the user must input the local visibility, which is related to aerosal optical thickness (AOT). We don't measure this at every site, therefore input a standard parameter for all sites. Given the 3% reflectance tarp has much lower overall reflactance, it may be more informative to determine what the absolute difference between the two curves are and plot that as well. End of explanation """ plt.figure(5) plt.plot(wavelengths,100*np.divide(tarp_48_reflectance-tarp_48_data[:,1],tarp_48_data[:,1])) plt.title('CHEQ 20160912 48% tarp percent difference') plt.xlabel('Wavelength (nm)'); plt.ylabel('Percent Refelctance Difference') plt.ylim((-100,100)) #plt.savefig('CHEQ_20160912_48_tarp_relative_diff.png',dpi=300,orientation='landscape',bbox_inches='tight',pad_inches=0.1) plt.figure(6) plt.plot(wavelengths,100*np.divide(tarp_03_reflectance-tarp_03_data[:,1],tarp_03_data[:,1])) plt.title('CHEQ 20160912 3% tarp percent difference') plt.xlabel('Wavelength (nm)'); plt.ylabel('Percent Refelctance Difference') plt.ylim((-100,150)) #plt.savefig('CHEQ_20160912_3_tarp_relative_diff.png',dpi=300,orientation='landscape',bbox_inches='tight',pad_inches=0.1) """ Explanation: From this we are able to see that the 48% tarp actually has larger absolute differences than the 3% tarp. The 48% tarp performs poorly at the shortest and longest waveleghts as well as near the edges of the 'bad band windows.' This is related to difficulty in calibrating the sensor in these sensitive areas (source 1). Let's now determine the result of the percent difference, which is the metric used by ATCOR to report accuracy. We can do this by calculating the ratio of the absolute difference between curves to the total reflectance End of explanation """
phobson/statsmodels
examples/notebooks/glm_formula.ipynb
bsd-3-clause
from __future__ import print_function import statsmodels.api as sm import statsmodels.formula.api as smf star98 = sm.datasets.star98.load_pandas().data formula = 'SUCCESS ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT + \ PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF' dta = star98[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP', 'PCTCHRT', 'PCTYRRND', 'PERMINTE', 'AVYRSEXP', 'AVSALK', 'PERSPENK', 'PTRATIO', 'PCTAF']] endog = dta['NABOVE'] / (dta['NABOVE'] + dta.pop('NBELOW')) del dta['NABOVE'] dta['SUCCESS'] = endog """ Explanation: Generalized Linear Models (Formula) This notebook illustrates how you can use R-style formulas to fit Generalized Linear Models. To begin, we load the Star98 dataset and we construct a formula and pre-process the data: End of explanation """ mod1 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit() mod1.summary() """ Explanation: Then, we fit the GLM model: End of explanation """ def double_it(x): return 2 * x formula = 'SUCCESS ~ double_it(LOWINC) + PERASIAN + PERBLACK + PERHISP + PCTCHRT + \ PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF' mod2 = smf.glm(formula=formula, data=dta, family=sm.families.Binomial()).fit() mod2.summary() """ Explanation: Finally, we define a function to operate customized data transformation using the formula framework: End of explanation """ print(mod1.params[1]) print(mod2.params[1] * 2) """ Explanation: As expected, the coefficient for double_it(LOWINC) in the second model is half the size of the LOWINC coefficient from the first model: End of explanation """
edwardd1/phys202-2015-work
assignments/assignment05/InteractEx03.ipynb
mit
%matplotlib inline from matplotlib import pyplot as plt import numpy as np from IPython.html.widgets import interact, interactive, fixed from IPython.display import display """ Explanation: Interact Exercise 3 Imports End of explanation """ def soliton(x, t, c, a): """Return phi(x, t) for a soliton wave with constants c and a.""" incosh = ((c ** .5)/2)*(x - (c * t) - a) outcosh = .5*c*((1/np.cosh(incosh)) ** 2) return outcosh #raise NotImplementedError() assert np.allclose(soliton(np.array([0]),0.0,1.0,0.0), np.array([0.5])) """ Explanation: Using interact for animation with data A soliton is a constant velocity wave that maintains its shape as it propagates. They arise from non-linear wave equations, such has the Korteweg–de Vries equation, which has the following analytical solution: $$ \phi(x,t) = \frac{1}{2} c \mathrm{sech}^2 \left[ \frac{\sqrt{c}}{2} \left(x - ct - a \right) \right] $$ The constant c is the velocity and the constant a is the initial location of the soliton. Define soliton(x, t, c, a) function that computes the value of the soliton wave for the given arguments. Your function should work when the postion x or t are NumPy arrays, in which case it should return a NumPy array itself. End of explanation """ tmin = 0.0 tmax = 10.0 tpoints = 100 t = np.linspace(tmin, tmax, tpoints) xmin = 0.0 xmax = 10.0 xpoints = 200 x = np.linspace(xmin, xmax, xpoints) c = 1.0 a = 0.0 """ Explanation: To create an animation of a soliton propagating in time, we are going to precompute the soliton data and store it in a 2d array. To set this up, we create the following variables and arrays: End of explanation """ phi = np.zeros((xpoints,tpoints), dtype=(float)) x1, t1 = np.meshgrid(t, x) phi = soliton(x1, t1, c, a) #print (phi.shape) #raise NotImplementedError() assert phi.shape==(xpoints, tpoints) assert phi.ndim==2 assert phi.dtype==np.dtype(float) assert phi[0,0]==soliton(x[0],t[0],c,a) """ Explanation: Compute a 2d NumPy array called phi: It should have a dtype of float. It should have a shape of (xpoints, tpoints). phi[i,j] should contain the value $\phi(x[i],t[j])$. End of explanation """ def plot_soliton_data(i=0.): #Changed to a float for better graph manipulation """Plot the soliton data at t[i] versus x.""" x1, t1 = np.meshgrid(i, x) phi = soliton(x1, t1, c, a) return plt.plot(phi) #raise NotImplementedError() plot_soliton_data(0) plt.ylim(0.,.6) assert True # leave this for grading the plot_soliton_data function """ Explanation: Write a plot_soliton_data(i) function that plots the soliton wave $\phi(x, t[i])$. Customize your plot to make it effective and beautiful. End of explanation """ interact(plot_soliton_data, a=(0.,10.,.1)); plt.ylim(0.,.6) #raise NotImplementedError() assert True # leave this for grading the interact with plot_soliton_data cell """ Explanation: Use interact to animate the plot_soliton_data function versus time. End of explanation """
Intel-Corporation/tensorflow
tensorflow/lite/g3doc/performance/quantization_debugger.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The TensorFlow Authors. End of explanation """ # Quantization debugger is available from TensorFlow 2.7.0 !pip uninstall -y tensorflow !pip install tf-nightly !pip install tensorflow_datasets --upgrade # imagenet_v2 needs latest checksum import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf import tensorflow_datasets as tfds import tensorflow_hub as hub #@title Boilerplates and helpers MODEL_URI = 'https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/classification/5' def process_image(data): data['image'] = tf.image.resize(data['image'], (224, 224)) / 255.0 return data # Representative dataset def representative_dataset(dataset): def _data_gen(): for data in dataset.batch(1): yield [data['image']] return _data_gen def eval_tflite(tflite_model, dataset): """Evaluates tensorflow lite classification model with the given dataset.""" interpreter = tf.lite.Interpreter(model_content=tflite_model) interpreter.allocate_tensors() input_idx = interpreter.get_input_details()[0]['index'] output_idx = interpreter.get_output_details()[0]['index'] results = [] for data in representative_dataset(dataset)(): interpreter.set_tensor(input_idx, data[0]) interpreter.invoke() results.append(interpreter.get_tensor(output_idx).flatten()) results = np.array(results) gt_labels = np.array(list(dataset.map(lambda data: data['label'] + 1))) accuracy = ( np.sum(np.argsort(results, axis=1)[:, -5:] == gt_labels.reshape(-1, 1)) / gt_labels.size) print(f'Top-5 accuracy (quantized): {accuracy * 100:.2f}%') model = tf.keras.Sequential([ tf.keras.layers.Input(shape=(224, 224, 3), batch_size=1), hub.KerasLayer(MODEL_URI) ]) model.compile( loss='sparse_categorical_crossentropy', metrics='sparse_top_k_categorical_accuracy') model.build([1, 224, 224, 3]) # Prepare dataset with 100 examples ds = tfds.load('imagenet_v2', split='test[:1%]') ds = ds.map(process_image) converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.representative_dataset = representative_dataset(ds) converter.optimizations = [tf.lite.Optimize.DEFAULT] quantized_model = converter.convert() test_ds = ds.map(lambda data: (data['image'], data['label'] + 1)).batch(16) loss, acc = model.evaluate(test_ds) print(f'Top-5 accuracy (float): {acc * 100:.2f}%') eval_tflite(quantized_model, ds) """ Explanation: Inspecting Quantization Errors with Quantization Debugger <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/lite/performance/quantization_debugger"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/quantization_debugger.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/performance/quantization_debugger.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/performance/quantization_debugger.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> <td> <a href="https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/classification/5"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a> </td> </table> Although full-integer quantization provides improved model size and latency, the quantized model won't always work as expected. It's usually expected for the model quality (e.g. accuracy, mAP, WER) to be slightly lower than the original float model. However, there are cases where the model quality can go below your expectation or generated completely wrong results. When this problem happens, it's tricky and painful to spot the root cause of the quantization error, and it's even more difficult to fix the quantization error. To assist this model inspection process, quantization debugger can be used to identify problematic layers, and selective quantization can leave those problematic layers in float so that the model accuracy can be recovered at the cost of reduced benefit from quantization. Note: This API is experimental, and there might be breaking changes in the API in the course of improvements. Quantization Debugger Quantization debugger makes it possible to do quantization quality metric analysis in the existing model. Quantization debugger can automate processes for running model with a debug dataset, and collecting quantization quality metrics for each tensors. Note: Quantization debugger and selective quantization currently only works for full-integer quantization with int8 activations. Prerequisites If you already have a pipeline to quantize a model, you have all necessary pieces to run quantization debugger! Model to quantize Representative dataset In addition to model and data, you will need to use a data processing framework (e.g. pandas, Google Sheets) to analyze the exported results. Setup This section prepares libraries, MobileNet v3 model, and test dataset of 100 images. End of explanation """ converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset(ds) # my_debug_dataset should have the same format as my_representative_dataset debugger = tf.lite.experimental.QuantizationDebugger( converter=converter, debug_dataset=representative_dataset(ds)) """ Explanation: We can see that the original model has a much higher top-5 accuracy for our small dataset, while the quantized model has a significant accuracy loss. Step 1. Debugger preparation Easiest way to use the quantization debugger is to provide tf.lite.TFLiteConverter that you have been using to quantize the model. End of explanation """ debugger.run() """ Explanation: Step 2. Running the debugger and getting the results When you call QuantizationDebugger.run(), the debugger will log differences between float tensors and quantized tensors for the same op location, and process them with given metrics. End of explanation """ RESULTS_FILE = '/tmp/debugger_results.csv' with open(RESULTS_FILE, 'w') as f: debugger.layer_statistics_dump(f) !head /tmp/debugger_results.csv """ Explanation: The processed metrics can be accessed with QuantizationDebugger.layer_statistics, or can be dumped to a text file in CSV format with QuantizationDebugger.layer_statistics_dump(). End of explanation """ layer_stats = pd.read_csv(RESULTS_FILE) layer_stats.head() """ Explanation: For each row in the dump, the op name and index comes first, followed by quantization parameters and error metrics (including user-defined error metrics, if any). The resulting CSV file can be used to pick problematic layers with large quantization error metrics. With pandas or other data processing libraries, we can inspect detailed per-layer error metrics. End of explanation """ layer_stats['range'] = 255.0 * layer_stats['scale'] layer_stats['rmse/scale'] = layer_stats.apply( lambda row: np.sqrt(row['mean_squared_error']) / row['scale'], axis=1) layer_stats[['op_name', 'range', 'rmse/scale']].head() plt.figure(figsize=(15, 5)) ax1 = plt.subplot(121) ax1.bar(np.arange(len(layer_stats)), layer_stats['range']) ax1.set_ylabel('range') ax2 = plt.subplot(122) ax2.bar(np.arange(len(layer_stats)), layer_stats['rmse/scale']) ax2.set_ylabel('rmse/scale') plt.show() """ Explanation: Step 3. Data analysis There are various ways to analyze the resulting. First, let's add some useful metrics derived from the debugger's outputs. (scale means the quantization scale factor for each tensor.) Range (256 / scale) RMSE / scale (sqrt(mean_squared_error) / scale) The RMSE / scale is close to 1 / sqrt(12) (~ 0.289) when quantized distribution is similar to the original float distribution, indicating a good quantized model. The larger the value is, it's more likely for the layer not being quantized well. End of explanation """ layer_stats[layer_stats['rmse/scale'] > 0.7][[ 'op_name', 'range', 'rmse/scale', 'tensor_name' ]] """ Explanation: There are many layers with wide ranges, and some layers that have high RMSE/scale values. Let's get the layers with high error metrics. End of explanation """ suspected_layers = list( layer_stats[layer_stats['rmse/scale'] > 0.7]['tensor_name']) """ Explanation: With these layers, you can try selective quantization to see if not quantizing those layers improves model quality. End of explanation """ suspected_layers.extend(list(layer_stats[:5]['tensor_name'])) """ Explanation: In addition to these, skipping quantization for the first few layers also helps improving quantized model's quality. End of explanation """ debug_options = tf.lite.experimental.QuantizationDebugOptions( denylisted_nodes=suspected_layers) debugger = tf.lite.experimental.QuantizationDebugger( converter=converter, debug_dataset=representative_dataset(ds), debug_options=debug_options) selective_quantized_model = debugger.get_nondebug_quantized_model() eval_tflite(selective_quantized_model, ds) """ Explanation: Selective Quantization Selective quantization skips quantization for some nodes, so that the calculation can happen in the original floating-point domain. When correct layers are skipped, we can expect some model quality recovery at the cost of increased latency and model size. However, if you're planning to run quantized models on integer-only accelerators (e.g. Hexagon DSP, EdgeTPU), selective quantization would cause fragmentation of the model and would result in slower inference latency mainly caused by data transfer cost between CPU and those accelerators. To prevent this, you can consider running quantization aware training to keep all the layers in integer while preserving the model accuracy. Quantization debugger's option accepts denylisted_nodes and denylisted_ops options for skipping quantization for specific layers, or all instances of specific ops. Using suspected_layers we prepared from the previous step, we can use quantization debugger to get a selectively quantized model. End of explanation """ debug_options = tf.lite.experimental.QuantizationDebugOptions( denylisted_ops=['MEAN']) debugger = tf.lite.experimental.QuantizationDebugger( converter=converter, debug_dataset=representative_dataset(ds), debug_options=debug_options) selective_quantized_model = debugger.get_nondebug_quantized_model() eval_tflite(selective_quantized_model, ds) """ Explanation: The accuracy is still lower compared to the original float model, but we have notable improvement from the whole quantized model by skipping quantization for ~10 layers out of 111 layers. You can also try to not quantized all ops in the same class. For example, to skip quantization for all mean ops, you can pass MEAN to denylisted_ops. End of explanation """ debug_options = tf.lite.experimental.QuantizationDebugOptions( layer_debug_metrics={ 'mean_abs_error': (lambda diff: np.mean(np.abs(diff))) }, layer_direct_compare_metrics={ 'correlation': lambda f, q, s, zp: (np.corrcoef(f.flatten(), (q.flatten() - zp) / s)[0, 1]) }, model_debug_metrics={ 'argmax_accuracy': (lambda f, q: np.mean(np.argmax(f) == np.argmax(q))) }) debugger = tf.lite.experimental.QuantizationDebugger( converter=converter, debug_dataset=representative_dataset(ds), debug_options=debug_options) debugger.run() CUSTOM_RESULTS_FILE = '/tmp/debugger_results.csv' with open(CUSTOM_RESULTS_FILE, 'w') as f: debugger.layer_statistics_dump(f) custom_layer_stats = pd.read_csv(CUSTOM_RESULTS_FILE) custom_layer_stats[['op_name', 'mean_abs_error', 'correlation']].tail() """ Explanation: With these techniques, we are able to improve the quantized MobileNet V3 model accuracy. Next we'll explore advanced techniques to improve the model accuracy even more. Advanced usages Whith following features, you can futher customize your debugging pipeline. Custom metrics By default, the quantization debugger emits five metrics for each float-quant difference: tensor size, standard deviation, mean error, max absolute error, and mean squared error. You can add more custom metrics by passing them to options. For each metrics, the result should be a single float value and the resulting metric will be an average of metrics from all examples. layer_debug_metrics: calculate metric based on diff for each op outputs from float and quantized op outputs. layer_direct_compare_metrics: rather than getting diff only, this will calculate metric based on raw float and quantized tensors, and its quantization parameters (scale, zero point) model_debug_metrics: only used when float_model_(path|content) is passed to the debugger. In addition to the op-level metrics, final layer output is compared to the reference output from the original float model. End of explanation """ debugger.model_statistics """ Explanation: The result of model_debug_metrics can be separately seen from debugger.model_statistics. End of explanation """ from tensorflow.lite.python import convert """ Explanation: Using (internal) mlir_quantize API to access in-depth features Note: Some features in the folowing section, TFLiteConverter._experimental_calibrate_only and converter.mlir_quantize are experimental internal APIs, and subject to change in a non-backward compatible way. End of explanation """ converter = tf.lite.TFLiteConverter.from_keras_model(model) converter.representative_dataset = representative_dataset(ds) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter._experimental_calibrate_only = True calibrated_model = converter.convert() # Note that enable_numeric_verify and enable_whole_model_verify are set. quantized_model = convert.mlir_quantize( calibrated_model, enable_numeric_verify=True, enable_whole_model_verify=True) debugger = tf.lite.experimental.QuantizationDebugger( quant_debug_model_content=quantized_model, debug_dataset=representative_dataset(ds)) """ Explanation: Whole model verify mode The default behavior for the debug model generation is per-layer verify. In this mode, the input for float and quantize op pair is from the same source (previous quantized op). Another mode is whole-model verify, where the float and quantize models are separated. This mode would be useful to observe how the error is being propagated down the model. To enable, enable_whole_model_verify=True to convert.mlir_quantize while generating the debug model manually. End of explanation """ selective_quantized_model = convert.mlir_quantize( calibrated_model, denylisted_nodes=suspected_layers) eval_tflite(selective_quantized_model, ds) """ Explanation: Selective quantization from an already calibrated model You can directly call convert.mlir_quantize to get the selective quantized model from already calibrated model. This would be particularly useful when you want to calibrate the model once, and experiment with various denylist combinations. End of explanation """
tpin3694/tpin3694.github.io
sql/aliases.ipynb
mit
# Ignore %load_ext sql %sql sqlite:// %config SqlMagic.feedback = False """ Explanation: Title: Using Aliases Slug: aliases Summary: Using Aliases in SQL. Date: 2017-01-16 12:00 Category: SQL Tags: Basics Authors: Chris Albon Note: This tutorial was written using Catherine Devlin's SQL in Jupyter Notebooks library. If you have not using a Jupyter Notebook, you can ignore the two lines of code below and any line containing %%sql. Furthermore, this tutorial uses SQLite's flavor of SQL, your version might have some differences in syntax. For more, check out Learning SQL by Alan Beaulieu. End of explanation """ %%sql -- Create a table of criminals CREATE TABLE criminals (pid, name, age, sex, city, minor); INSERT INTO criminals VALUES (412, 'James Smith', 15, 'M', 'Santa Rosa', 1); INSERT INTO criminals VALUES (234, 'Bill James', 22, 'M', 'Santa Rosa', 0); INSERT INTO criminals VALUES (632, 'Stacy Miller', 23, 'F', 'Santa Rosa', 0); INSERT INTO criminals VALUES (621, 'Betty Bob', NULL, 'F', 'Petaluma', 1); INSERT INTO criminals VALUES (162, 'Jaden Ado', 49, 'M', NULL, 0); INSERT INTO criminals VALUES (901, 'Gordon Ado', 32, 'F', 'Santa Rosa', 0); INSERT INTO criminals VALUES (512, 'Bill Byson', 21, 'M', 'Santa Rosa', 0); INSERT INTO criminals VALUES (411, 'Bob Iton', NULL, 'M', 'San Francisco', 0); """ Explanation: Create Data End of explanation """ %%sql -- Select all names from the table 'c' SELECT c.name -- From the criminals table, now called c FROM criminals AS c """ Explanation: Alias Criminals Table A C, Then Select All Names From C End of explanation """
jhillairet/scikit-rf
doc/source/examples/metrology/One Port Tiered Calibration.ipynb
bsd-3-clause
!ls {"oneport_tiered_calibration/"} """ Explanation: One Port Tiered Calibration Intro A one-port network analyzer can be used to measure a two-port device, provided that the device is reciprocal. This is accomplished by performing two calibrations, which is why its called a tiered calibration. First, the VNA is calibrated at the test-port like normal. This is called the first tier. Next, the device is connected to the test-port, and a calibration is performed at the far end of the device, the second tier. A diagram is shown below, This notebook will demonstrate how to use skrf to do a two-tiered one-port calibration. We'll use data that was taken to characterize a waveguide-to-CPW probe. So, for this specific example the diagram above looks like: Some Data The data available is the folders 'tier1/' and 'tier2/'. End of explanation """ !ls {"oneport_tiered_calibration/tier1/"} """ Explanation: (if you dont have the git repo for these examples, the data for this notebook can be found here) In each folder you will find the two sub-folders, called 'ideals/' and 'measured/'. These contain touchstone files of the calibration standards ideal and measured responses, respectively. End of explanation """ !ls {"oneport_tiered_calibration/tier1/measured/"} """ Explanation: The first tier is at waveguide interface, and consisted of the following set of standards short delay short load radiating open (literally an open waveguide) End of explanation """ from skrf.calibration import OnePort import skrf as rf %matplotlib inline from pylab import * rf.stylely() tier1_ideals = rf.read_all_networks('oneport_tiered_calibration/tier1/ideals/') tier1_measured = rf.read_all_networks('oneport_tiered_calibration/tier1/measured/') tier1 = OnePort(measured = tier1_measured, ideals = tier1_ideals, name = 'tier1', sloppy_input=True) tier1 """ Explanation: Creating Calibrations Tier 1 First defining the calibration for Tier 1 End of explanation """ tier2_ideals = rf.read_all_networks('oneport_tiered_calibration/tier2/ideals/') tier2_measured = rf.read_all_networks('oneport_tiered_calibration/tier2/measured/') tier2 = OnePort(measured = tier2_measured, ideals = tier2_ideals, name = 'tier2', sloppy_input=True) tier2 """ Explanation: Because we saved corresponding ideal and measured standards with identical names, the Calibration will automatically align our standards upon initialization. (More info on creating Calibration objects this can be found in the docs.) Similarly for the second tier 2, Tier 2 End of explanation """ tier1.error_ntwk.plot_s_db() title('Tier 1 Error Network') """ Explanation: Error Networks Each one-port Calibration contains a two-port error network, that is determined from the calculated error coefficients. The error network for tier1 models the VNA, while the error network for tier2 represents the VNA and the DUT. These can be visualized through the parameter 'error_ntwk'. For tier 1, End of explanation """ tier2.error_ntwk.plot_s_db() title('Tier 2 Error Network') """ Explanation: Similarly for tier 2, End of explanation """ dut = tier1.error_ntwk.inv ** tier2.error_ntwk dut.name = 'probe' dut.plot_s_db() title('Probe S-parameters') ylim(-60,10) """ Explanation: De-embedding the DUT As previously stated, the error network for tier1 models the VNA, and the error network for tier2 represents the VNA+DUT. So to determine the DUT's response, we cascade the inverse S-parameters of the VNA with the VNA+DUT. $$ DUT = VNA^{-1}\cdot (VNA \cdot DUT)$$ In skrf, this is done as follows End of explanation """ !ls {"probe*"} """ Explanation: You may want to save this to disk, for future use, dut.write_touchstone() End of explanation """
RNAer/Calour
doc/source/notebooks/microbiome_step_by_step.ipynb
bsd-3-clause
import calour as ca """ Explanation: Microbiome experiment step-by-step analysis This is a jupyter notebook example of how to load, process and plot data from a microbiome experiment using Calour. Setup Import the calour module End of explanation """ ca.set_log_level(11) """ Explanation: (optional) Set the level of feedback messages from calour can use: 1 for debug (lots of feedback on each command) 11 for info (useful information from some commands) 21 for warning (just warning messages) The Calour default is warning (21) End of explanation """ %matplotlib notebook """ Explanation: Also enable interactive plots inside the jupyter notebook End of explanation """ dat=ca.read_amplicon('data/chronic-fatigue-syndrome.biom', 'data/chronic-fatigue-syndrome.sample.txt', normalize=10000,min_reads=1000) print(dat) """ Explanation: Loading the data For an amplicon experiment we use ca.read_amplicon() First parameter is the location+name of the biom table file (can be hdf5/json/txt biom table - see here for details) Second (optional) parameter is the sample mapping file locaion+name. First column should be the sample id (identical to the sample ids in the biom table). Rest of the column are information fields about each sample. normalize=XXX : tells calour to rescale each sample to XXX reads (by dividing each feature frequency by the total number of reads in the sample and multiplying by XXX). Alternatively, can use normalize=None to skip normalization (i.e. in the case the biom table is already rarified) min_reads=XXX : throw away samples with less than min_reads total (before normalization). Useful to get rid of samples with small number of reads. Can use min_reads=None to keep all samples. We will use the data from: Giloteaux, L., Goodrich, J.K., Walters, W.A., Levine, S.M., Ley, R.E. and Hanson, M.R., 2016. Reduced diversity and altered composition of the gut microbiome in individuals with myalgic encephalomyelitis/chronic fatigue syndrome. Microbiome, 4(1), p.30. End of explanation """ dat=dat.filter_abundance(10) """ Explanation: Process the data Get rid of the features (bacteria) with small amount of reads We throw away all features with total reads (over all samples) < 10 (after each sample was normalized to 10k reads/sample). So a bacteria present (with 1 read) in 10 samples will be kept, as well as a bacteria present in only one sample, with 10 reads in this sample. Note alternatively we could filter based on mean reads/sample or fraction of samples where the feature is present. Each method filters away slightly different bacteria. See filtering notebook for details on the filtering functions. End of explanation """ datc=dat.cluster_features() """ Explanation: Cluster (reorder) the features so similarly behaving bacteria are close to each other Features are clustered (hierarchical clustering) based on euaclidian distance between features (over all samples) following normalizing each feature to mean 0 std 1. For more details and examples, see sorting notebook or cluster_features documentation Note that if we have a lot of features, clustering is slow, so it is recommended to first filter away the non-interesting features. End of explanation """ datc=datc.sort_samples('Physical_functioning') datc=datc.sort_samples('Subject') """ Explanation: Sort the samples according to physical functioning and Disease state Note that order within each group of similar value is maintained. We first sort by physical functioning, then sort by the disease state. So within each disease state, samples will still be sorted by physical functioning. End of explanation """ datc.plot(sample_field='Subject', gui='jupyter') """ Explanation: Plotting the data Columns (x-axis) are the samples, rows (y-axis) are the features. We will show on the x-axis the host-individual field of each sample. we will use the jupyter notebook GUI so we will see the interactive plot in the notebook. Alternatively we could use the qt5 GUI to see the plot in a separate standalone window. A few cool things we can do with the interactive plot: Click with the mouse on the heatmap to see details about the feature/sample selected (including information from dbBact). use SHIFT+UP or SHIFT+DOWN to zoom in/out on the features use UP/DOWN to scroll up/down on the features use SHIFT+RIGHT or SHIFT+LEFT to zoom in/out on the samples use RIGHT/LEFT to scroll left/right on the samples See here for more details End of explanation """ datc=datc.sort_samples('Sex') datc=datc.sort_samples('Subject') datc.plot(sample_field='Subject', gui='jupyter',barx_fields=['Sex']) """ Explanation: Adding a field to the top bar Now let's add the values of the "Sex" field into the xbar on top First we'll also sort by sex, so values will be continuous (note we then sort by the disease state to get the two groups separated). End of explanation """ dd=datc.diff_abundance(field='Subject',val1='Control',val2='Patient', random_seed=2018) """ Explanation: Differential abundance testing Let's look for bacteria separating sick from healthy We ask it to find all bacteria significantly different between samples with 'Control' and 'Patient' in the 'Subject' field. By default calour uses the mean of the ranks of each feature (over all samples), with dsFDR multiple hypothesis correction. For more information, see notebook and function doc End of explanation """ dd.plot(sample_field='Subject', gui='jupyter') """ Explanation: Plotting the differentially abundant features Let's plot to see the behavior of these bacteria. The output of diff_abundance is an Experiment with only the significant bacteria, which are sorted by the effect size. On the bottom is the bacteria with the largest effect size (higher in Control compared to Patient). End of explanation """ ax, enriched=dd.plot_diff_abundance_enrichment(term_type='combined',ignore_exp=[12]) """ Explanation: dbBact term enrichment We can ask what is special in the bacteria significanly higher in the Control vs. the Patient group and vice versa. We supply the parameter ignore_exp=[12] to ignore annotations regarding this experiment (expid=12) since it is already in the dbBact database. Note since we need to get the per-feature annotations from dbBact, we need a live internet connection to run this command. End of explanation """ enriched.feature_metadata """ Explanation: The enriched terms are in a calour experiment class (terms are features, bacteria are samples), so we can see the list of enriched terms with the p-value (pval) and effect size (odif) End of explanation """
materialsvirtuallab/matgenb
notebooks/2018-07-24-Adsorption on solid surfaces.ipynb
bsd-3-clause
# Import statements from pymatgen import Structure, Lattice, MPRester, Molecule from pymatgen.analysis.adsorption import * from pymatgen.core.surface import generate_all_slabs from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from matplotlib import pyplot as plt %matplotlib inline # Note that you must provide your own API Key, which can # be accessed via the Dashboard at materialsproject.org mpr = MPRester() """ Explanation: Supplemental Information This notebook is intended to serve as a supplement to the manuscript "High-throughput workflows for determining adsorption energies on solid surfaces." It outlines basic use of the code and workflow software that has been developed for processing surface slabs and placing adsorbates according to symmetrically distinct sites on surface facets. Installation To use this notebook, we recommend installing python via Anaconda, which includes jupyter and the associated iPython notebook software. The code used in this project primarily makes use of two packages, pymatgen and atomate, which are installable via pip or the matsci channel on conda (e. g. conda install -c matsci pymatgen atomate). Development versions with editable code may be installed by cloning the repositories and using python setup.py develop. Example 1: AdsorbateSiteFinder (pymatgen) An example using the the AdsorbateSiteFinder class in pymatgen is shown below. We begin with an import statement for the necessay modules. To use the MP RESTful interface, you must provide your own API key either in the MPRester call i.e. mpr=MPRester("YOUR_API_KEY") or provide in in your .pmgrc.yaml configuration file. API keys can be accessed at materialsproject.org under your "Dashboard." End of explanation """ fcc_ni = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3.5), ["Ni"], [[0, 0, 0]]) slabs = generate_all_slabs(fcc_ni, max_index=1, min_slab_size=8.0, min_vacuum_size=10.0) ni_111 = [slab for slab in slabs if slab.miller_index==(1,1,1)][0] """ Explanation: We create a simple fcc structure, generate it's distinct slabs, and select the slab with a miller index of (1, 1, 1). End of explanation """ asf_ni_111 = AdsorbateSiteFinder(ni_111) ads_sites = asf_ni_111.find_adsorption_sites() print(ads_sites) assert len(ads_sites) == 4 """ Explanation: We make an instance of the AdsorbateSiteFinder and use it to find the relevant adsorption sites. End of explanation """ fig = plt.figure() ax = fig.add_subplot(111) plot_slab(ni_111, ax, adsorption_sites=True) """ Explanation: We visualize the sites using a tool from pymatgen. End of explanation """ fig = plt.figure() ax = fig.add_subplot(111) adsorbate = Molecule("H", [[0, 0, 0]]) ads_structs = asf_ni_111.generate_adsorption_structures(adsorbate, repeat=[1, 1, 1]) plot_slab(ads_structs[0], ax, adsorption_sites=False, decay=0.09) """ Explanation: Use the AdsorbateSiteFinder.generate_adsorption_structures method to generate structures of adsorbates. End of explanation """ fig = plt.figure() axes = [fig.add_subplot(2, 3, i) for i in range(1, 7)] mats = {"mp-23":(1, 0, 0), # FCC Ni "mp-2":(1, 1, 0), # FCC Au "mp-13":(1, 1, 0), # BCC Fe "mp-33":(0, 0, 1), # HCP Ru "mp-30": (2, 1, 1), "mp-5229":(1, 0, 0), } # Cubic SrTiO3 #"mp-2133":(0, 1, 1)} # Wurtzite ZnO for n, (mp_id, m_index) in enumerate(mats.items()): struct = mpr.get_structure_by_material_id(mp_id) struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure() slabs = generate_all_slabs(struct, 1, 5.0, 2.0, center_slab=True) slab_dict = {slab.miller_index:slab for slab in slabs} asf = AdsorbateSiteFinder.from_bulk_and_miller(struct, m_index, undercoord_threshold=0.10) plot_slab(asf.slab, axes[n]) ads_sites = asf.find_adsorption_sites() sop = get_rot(asf.slab) ads_sites = [sop.operate(ads_site)[:2].tolist() for ads_site in ads_sites["all"]] axes[n].plot(*zip(*ads_sites), color='k', marker='x', markersize=10, mew=1, linestyle='', zorder=10000) mi_string = "".join([str(i) for i in m_index]) axes[n].set_title("{}({})".format(struct.composition.reduced_formula, mi_string)) axes[n].set_xticks([]) axes[n].set_yticks([]) axes[4].set_xlim(-2, 5) axes[4].set_ylim(-2, 5) fig.savefig('slabs.png', dpi=200) !open slabs.png """ Explanation: Example 2: AdsorbateSiteFinder for various surfaces In this example, the AdsorbateSiteFinder is used to find adsorption sites on different structures and miller indices. End of explanation """ from fireworks import LaunchPad lpad = LaunchPad() lpad.reset('', require_password=False) """ Explanation: Example 3: Generating a workflow from atomate In this example, we demonstrate how MatMethods may be used to generate a full workflow for the determination of DFT-energies from which adsorption energies may be calculated. Note that this requires a working instance of FireWorks and its dependency, MongoDB. Note that MongoDB can be installed via Anaconda. End of explanation """ from atomate.vasp.workflows.base.adsorption import get_wf_surface, get_wf_surface_all_slabs """ Explanation: Import the necessary workflow-generating function from atomate: End of explanation """ co = Molecule("CO", [[0, 0, 0], [0, 0, 1.23]]) h = Molecule("H", [[0, 0, 0]]) """ Explanation: Adsorption configurations take the form of a dictionary with the miller index as a string key and a list of pymatgen Molecule instances as the values. End of explanation """ struct = mpr.get_structure_by_material_id("mp-23") # fcc Ni struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure() slabs = generate_all_slabs(struct, 1, 5.0, 2.0, center_slab=True) slab_dict = {slab.miller_index:slab for slab in slabs} ni_slab_111 = slab_dict[(1, 1, 1)] wf = get_wf_surface([ni_slab_111], molecules=[co, h]) lpad.add_wf(wf) """ Explanation: Workflows are generated using the a slab a list of molecules. End of explanation """ lpad.get_wf_summary_dict(1) """ Explanation: The workflow may be inspected as below. Note that there are 9 optimization tasks correponding the slab, and 4 distinct adsorption configurations for each of the 2 adsorbates. Details on running FireWorks, including singleshot launching, queue submission, workflow management, and more can be found in the FireWorks documentation. End of explanation """ from pymatgen.core.periodic_table import * from pymatgen.core.surface import get_symmetrically_distinct_miller_indices import tqdm lpad.reset('', require_password=False) """ Explanation: Note also that running FireWorks via atomate may require system specific tuning (e. g. for VASP parameters). More information is available in the atomate documentation. Example 4 - Screening of oxygen evolution electrocatalysts on binary oxides This final example is intended to demonstrate how to use the MP API and the adsorption workflow to do an initial high-throughput study of oxygen evolution electrocatalysis on binary oxides of transition metals. End of explanation """ OH = Molecule("OH", [[0, 0, 0], [-0.793, 0.384, 0.422]]) O = Molecule("O", [[0, 0, 0]]) OOH = Molecule("OOH", [[0, 0, 0], [-1.067, -0.403, 0.796], [-0.696, -0.272, 1.706]]) adsorbates = [OH, O, OOH] """ Explanation: For oxygen evolution, a common metric for the catalytic activity of a given catalyst is the theoretical overpotential corresponding to the mechanism that proceeds through OH*, O*, and OOH*. So we can define our adsorbates: End of explanation """ elements = [Element.from_Z(i) for i in range(1, 103)] trans_metals = [el for el in elements if el.is_transition_metal] # tqdm adds a progress bar so we can see the progress of the for loop for metal in tqdm.tqdm_notebook(trans_metals): # Get relatively stable structures with small unit cells data = mpr.get_data("{}-O".format(metal.symbol)) data = [datum for datum in data if datum["e_above_hull"] < 0.05] data = sorted(data, key = lambda x: x["nsites"]) struct = Structure.from_str(data[0]["cif"], fmt='cif') # Put in conventional cell settings struct = SpacegroupAnalyzer(struct).get_conventional_standard_structure() # Get distinct miller indices for low-index facets wf = get_wf_surface_all_slabs(struct, adsorbates) lpad.add_wf(wf) print("Processed: {}".format(struct.formula)) """ Explanation: Then we can retrieve the structures using the MP rest interface, and write a simple for loop which creates all of the workflows corresponding to every slab and every adsorption site for each material. The code below will take ~15 minutes. This could be parallelized to be more efficient, but is not for simplicity in this case. End of explanation """
saravanakumar-periyasamy/deep-learning
image-classification/dlnd_image_classification.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm import problem_unittests as tests import tarfile cifar10_dataset_folder_path = 'cifar-10-batches-py' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('cifar-10-python.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar: urlretrieve( 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 'cifar-10-python.tar.gz', pbar.hook) if not isdir(cifar10_dataset_folder_path): with tarfile.open('cifar-10-python.tar.gz') as tar: tar.extractall() tar.close() tests.test_folder_path(cifar10_dataset_folder_path) """ Explanation: Image Classification In this project, you'll classify images from the CIFAR-10 dataset. The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images. Get the Data Run the following cell to download the CIFAR-10 dataset for python. End of explanation """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import helper import numpy as np # Explore the dataset batch_id = 2 sample_id = 6 helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id) """ Explanation: Explore the Data The dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named data_batch_1, data_batch_2, etc.. Each batch contains the labels and images that are one of the following: * airplane * automobile * bird * cat * deer * dog * frog * horse * ship * truck Understanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the batch_id and sample_id. The batch_id is the id for a batch (1-5). The sample_id is the id for a image and label pair in the batch. Ask yourself "What are all possible labels?", "What is the range of values for the image data?", "Are the labels in order or random?". Answers to questions like these will help you preprocess the data and end up with better predictions. End of explanation """ def normalize(x): """ Normalize a list of sample image data in the range of 0 to 1 : x: List of image data. The image shape is (32, 32, 3) : return: Numpy array of normalize data """ # TODO: Implement Function return x / 256 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_normalize(normalize) """ Explanation: Implement Preprocess Functions Normalize In the cell below, implement the normalize function to take in image data, x, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as x. End of explanation """ def one_hot_encode(x): """ One hot encode a list of sample labels. Return a one-hot encoded vector for each label. : x: List of sample Labels : return: Numpy array of one-hot encoded labels """ # TODO: Implement Function one_hot_vectors = np.zeros((len(x),10)) i = 0 for label in x: one_hot_vectors[i][label] = 1 i = i+1 return one_hot_vectors """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_one_hot_encode(one_hot_encode) """ Explanation: One-hot encode Just like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the one_hot_encode function. The input, x, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to one_hot_encode. Make sure to save the map of encodings outside the function. Hint: Don't reinvent the wheel. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode) """ Explanation: Randomize Data As you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset. Preprocess all the data and save it Running the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import pickle import problem_unittests as tests import helper # Load the Preprocessed Validation data valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb')) """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ import tensorflow as tf def neural_net_image_input(image_shape): """ Return a Tensor for a bach of image input : image_shape: Shape of the images : return: Tensor for image input. """ # TODO: Implement Function return tf.placeholder(tf.float32, [None,image_shape[0],image_shape[1],image_shape[2]], "x") def neural_net_label_input(n_classes): """ Return a Tensor for a batch of label input : n_classes: Number of classes : return: Tensor for label input. """ # TODO: Implement Function return tf.placeholder(tf.float32, [None,n_classes], "y") def neural_net_keep_prob_input(): """ Return a Tensor for keep probability : return: Tensor for keep probability. """ # TODO: Implement Function return tf.placeholder(tf.float32, name="keep_prob") """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tf.reset_default_graph() tests.test_nn_image_inputs(neural_net_image_input) tests.test_nn_label_inputs(neural_net_label_input) tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input) """ Explanation: Build the network For the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project. Note: If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages to build each layer, except the layers you build in the "Convolutional and Max Pooling Layer" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup. However, if you would like to get the most out of this course, try to solve all the problems without using anything from the TF Layers packages. You can still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the conv2d class, tf.layers.conv2d, you would want to use the TF Neural Network version of conv2d, tf.nn.conv2d. Let's begin! Input The neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions * Implement neural_net_image_input * Return a TF Placeholder * Set the shape using image_shape with batch size set to None. * Name the TensorFlow placeholder "x" using the TensorFlow name parameter in the TF Placeholder. * Implement neural_net_label_input * Return a TF Placeholder * Set the shape using n_classes with batch size set to None. * Name the TensorFlow placeholder "y" using the TensorFlow name parameter in the TF Placeholder. * Implement neural_net_keep_prob_input * Return a TF Placeholder for dropout keep probability. * Name the TensorFlow placeholder "keep_prob" using the TensorFlow name parameter in the TF Placeholder. These names will be used at the end of the project to load your saved model. Note: None for shapes in TensorFlow allow for a dynamic size. End of explanation """ def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides): """ Apply convolution then max pooling to x_tensor :param x_tensor: TensorFlow Tensor :param conv_num_outputs: Number of outputs for the convolutional layer :param conv_ksize: kernal size 2-D Tuple for the convolutional layer :param conv_strides: Stride 2-D Tuple for convolution :param pool_ksize: kernal size 2-D Tuple for pool :param pool_strides: Stride 2-D Tuple for pool : return: A tensor that represents convolution and max pooling of x_tensor """ # TODO: Implement Function weight = tf.Variable(tf.truncated_normal([conv_ksize[0], conv_ksize[1], x_tensor.get_shape().as_list()[3], conv_num_outputs],0,5e-2)) bias = tf.Variable(tf.zeros(conv_num_outputs)) strides = [1, conv_strides[0], conv_strides[1], 1] conv_layer = tf.nn.conv2d(x_tensor, weight, strides, padding='SAME') conv_layer = tf.nn.bias_add(conv_layer, bias) conv_layer = tf.nn.relu(conv_layer) ksize = [1, pool_ksize[0], pool_ksize[1], 1] kstrides = [1, pool_strides[0], pool_strides[1], 1] return tf.nn.max_pool(conv_layer, ksize, kstrides, padding='SAME') """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_con_pool(conv2d_maxpool) """ Explanation: Convolution and Max Pooling Layer Convolution layers have a lot of success with images. For this code cell, you should implement the function conv2d_maxpool to apply convolution then max pooling: * Create the weight and bias using conv_ksize, conv_num_outputs and the shape of x_tensor. * Apply a convolution to x_tensor using weight and conv_strides. * We recommend you use same padding, but you're welcome to use any padding. * Add bias * Add a nonlinear activation to the convolution. * Apply Max Pooling using pool_ksize and pool_strides. * We recommend you use same padding, but you're welcome to use any padding. Note: You can't use TensorFlow Layers or TensorFlow Layers (contrib) for this layer, but you can still use TensorFlow's Neural Network package. You may still use the shortcut option for all the other layers. End of explanation """ def flatten(x_tensor): """ Flatten x_tensor to (Batch Size, Flattened Image Size) : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions. : return: A tensor of size (Batch Size, Flattened Image Size). """ # TODO: Implement Function return tf.reshape(x_tensor,[-1,(x_tensor.get_shape().as_list()[1] * x_tensor.get_shape().as_list()[2] * x_tensor.get_shape().as_list()[3])]) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_flatten(flatten) """ Explanation: Flatten Layer Implement the flatten function to change the dimension of x_tensor from a 4-D tensor to a 2-D tensor. The output should be the shape (Batch Size, Flattened Image Size). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. End of explanation """ def fully_conn(x_tensor, num_outputs): """ Apply a fully connected layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function weights = tf.Variable(tf.truncated_normal([x_tensor.shape.as_list()[1], num_outputs],0,5e-2)) bias = tf.Variable(tf.truncated_normal([num_outputs])) fc = tf.add(tf.matmul(x_tensor, weights), bias) return tf.nn.relu(fc) # return tf.contrib.layers.fully_connected(x_tensor, num_outputs) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_fully_conn(fully_conn) """ Explanation: Fully-Connected Layer Implement the fully_conn function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. End of explanation """ def output(x_tensor, num_outputs): """ Apply a output layer to x_tensor using weight and bias : x_tensor: A 2-D tensor where the first dimension is batch size. : num_outputs: The number of output that the new tensor should be. : return: A 2-D tensor where the second dimension is num_outputs. """ # TODO: Implement Function weights = tf.Variable(tf.truncated_normal([x_tensor.shape.as_list()[1], num_outputs],0,5e-2)) bias = tf.Variable(tf.truncated_normal([num_outputs])) return tf.add(tf.matmul(x_tensor, weights), bias) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_output(output) """ Explanation: Output Layer Implement the output function to apply a fully connected layer to x_tensor with the shape (Batch Size, num_outputs). Shortcut option: you can use classes from the TensorFlow Layers or TensorFlow Layers (contrib) packages for this layer. For more of a challenge, only use other TensorFlow packages. Note: Activation, softmax, or cross entropy should not be applied to this. End of explanation """ def conv_net(x_tensor, keep_prob): """ Create a convolutional neural network model : x: Placeholder tensor that holds image data. : keep_prob: Placeholder tensor that hold dropout keep probability. : return: Tensor that represents logits """ conv_num_outputs1 = 32 conv_num_outputs2 = 64 conv_num_outputs3 = 128 conv_ksize1 = (3,3) conv_ksize2 = (4,4) conv_ksize3 = (5,5) conv_strides1 = (1,1) conv_strides2 = (1,1) conv_strides3 = (1,1) pool_ksize = (2,2) pool_strides = pool_ksize fc1_num_outputs = 1024 fc2_num_outputs = 512 fc3_num_outputs = 256 num_outputs = 10 # TODO: Apply 1, 2, or 3 Convolution and Max Pool layers # Play around with different number of outputs, kernel size and stride # Function Definition from Above: x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs1, conv_ksize1, conv_strides1, pool_ksize, pool_strides) # x_tensor = tf.nn.dropout(x_tensor, keep_prob) x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs2, conv_ksize2, conv_strides2, pool_ksize, pool_strides) x_tensor = tf.nn.dropout(x_tensor, keep_prob) x_tensor = conv2d_maxpool(x_tensor, conv_num_outputs3, conv_ksize3, conv_strides3, pool_ksize, pool_strides) # x_tensor = tf.nn.dropout(x_tensor, keep_prob) # TODO: Apply a Flatten Layer # Function Definition from Above: x_tensor = flatten(x_tensor) # TODO: Apply 1, 2, or 3 Fully Connected Layers # Play around with different number of outputs # Function Definition from Above: x_tensor = fully_conn(x_tensor, fc1_num_outputs) # x_tensor = tf.nn.dropout(x_tensor, keep_prob) x_tensor = fully_conn(x_tensor, fc2_num_outputs) # x_tensor = tf.nn.dropout(x_tensor, keep_prob) x_tensor = fully_conn(x_tensor, fc3_num_outputs) x_tensor = tf.nn.dropout(x_tensor, keep_prob) # TODO: Apply an Output Layer # Set this to the number of classes # Function Definition from Above: x_tensor = output(x_tensor, num_outputs) # TODO: return output return x_tensor """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ ############################## ## Build the Neural Network ## ############################## # Remove previous weights, bias, inputs, etc.. tf.reset_default_graph() # Inputs x = neural_net_image_input((32, 32, 3)) y = neural_net_label_input(10) keep_prob = neural_net_keep_prob_input() # Model logits = conv_net(x, keep_prob) # Name logits Tensor, so that is can be loaded from disk after training logits = tf.identity(logits, name='logits') # Loss and Optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)) optimizer = tf.train.AdamOptimizer().minimize(cost) # Accuracy correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy') tests.test_conv_net(conv_net) """ Explanation: Create Convolutional Model Implement the function conv_net to create a convolutional neural network model. The function takes in a batch of images, x, and outputs logits. Use the layers you created above to create this model: Apply 1, 2, or 3 Convolution and Max Pool layers Apply a Flatten Layer Apply 1, 2, or 3 Fully Connected Layers Apply an Output Layer Return the output Apply TensorFlow's Dropout to one or more layers in the model using keep_prob. End of explanation """ def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch): """ Optimize the session on a batch of images and labels : session: Current TensorFlow session : optimizer: TensorFlow optimizer function : keep_probability: keep probability : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data """ # TODO: Implement Function session.run(optimizer, feed_dict={ x: feature_batch, y: label_batch, keep_prob: keep_probability}) pass """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_train_nn(train_neural_network) """ Explanation: Train the Neural Network Single Optimization Implement the function train_neural_network to do a single optimization. The optimization should use optimizer to optimize in session with a feed_dict of the following: * x for image input * y for labels * keep_prob for keep probability for dropout This function will be called for each batch, so tf.global_variables_initializer() has already been called. Note: Nothing needs to be returned. This function is only optimizing the neural network. End of explanation """ def print_stats(session, feature_batch, label_batch, cost, accuracy): """ Print information about loss and validation accuracy : session: Current TensorFlow session : feature_batch: Batch of Numpy image data : label_batch: Batch of Numpy label data : cost: TensorFlow cost function : accuracy: TensorFlow accuracy function """ # TODO: Implement Function loss = session.run(cost, feed_dict={ x: feature_batch, y: label_batch, keep_prob: 1.}) valid_acc = session.run(accuracy, feed_dict={ x: valid_features, y: valid_labels, keep_prob: 1.}) print('Loss: {:>10.4f} Validation Accuracy: {:.6f}'.format(loss, valid_acc)) pass """ Explanation: Show Stats Implement the function print_stats to print loss and validation accuracy. Use the global variables valid_features and valid_labels to calculate validation accuracy. Use a keep probability of 1.0 to calculate the loss and validation accuracy. End of explanation """ # TODO: Tune Parameters epochs = 15 batch_size = 256 keep_probability = 0.75 """ Explanation: Hyperparameters Tune the following parameters: * Set epochs to the number of iterations until the network stops learning or start overfitting * Set batch_size to the highest number that your machine has memory for. Most people set them to common sizes of memory: * 64 * 128 * 256 * ... * Set keep_probability to the probability of keeping a node using dropout End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ print('Checking the Training on a Single Batch...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # sess.close # Training cycle for epoch in range(epochs): batch_i = 1 for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) """ Explanation: Train on a Single CIFAR-10 Batch Instead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ save_model_path = './image_classification' print('Training...') with tf.Session() as sess: # Initializing the variables sess.run(tf.global_variables_initializer()) # Training cycle for epoch in range(epochs): # Loop over all batches n_batches = 5 for batch_i in range(1, n_batches + 1): for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size): train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels) print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='') print_stats(sess, batch_features, batch_labels, cost, accuracy) # Save Model saver = tf.train.Saver() save_path = saver.save(sess, save_model_path) """ Explanation: Fully Train the Model Now that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ %matplotlib inline %config InlineBackend.figure_format = 'retina' import tensorflow as tf import pickle import helper import random # Set batch size if not already set try: if batch_size: pass except NameError: batch_size = 64 save_model_path = './image_classification' n_samples = 4 top_n_predictions = 3 def test_model(): """ Test the saved model against the test dataset """ test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb')) loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load model loader = tf.train.import_meta_graph(save_model_path + '.meta') loader.restore(sess, save_model_path) # Get Tensors from loaded model loaded_x = loaded_graph.get_tensor_by_name('x:0') loaded_y = loaded_graph.get_tensor_by_name('y:0') loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0') loaded_logits = loaded_graph.get_tensor_by_name('logits:0') loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0') # Get accuracy in batches for memory limitations test_batch_acc_total = 0 test_batch_count = 0 for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size): test_batch_acc_total += sess.run( loaded_acc, feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0}) test_batch_count += 1 print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count)) # Print Random Samples random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples))) random_test_predictions = sess.run( tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions), feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0}) helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions) test_model() """ Explanation: Checkpoint The model has been saved to disk. Test Model Test your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters. End of explanation """
leriomaggio/numpy_euroscipy2015
extra_torch_tensor.ipynb
mit
import torch """ Explanation: Original Notebook Introduction to PyTorch Tensor Reference: "What is PyTorch?" by Soumith Chintala What is PyTorch? It’s a Python-based scientific computing package targeted at two sets of audiences: A replacement for NumPy to use the power of GPUs a deep learning research platform that provides maximum flexibility and speed Getting Started Tensors ^^^^^^^ Tensors are similar to NumPy’s ndarrays, with the addition being that Tensors can also be used on a GPU to accelerate computing. End of explanation """ x = torch.empty(5, 3) print(x) type(x) """ Explanation: <div class="alert alert-info"><h4>Note</h4><p>An uninitialized matrix is declared, but does not contain definite known values before it is used. When an uninitialized matrix is created, whatever values were in the allocated memory at the time will appear as the initial values.</p></div> Construct a 5x3 matrix, uninitialized: End of explanation """ x = torch.rand(5, 3) print(x) """ Explanation: Construct a randomly initialized matrix: End of explanation """ x = torch.zeros(5, 3, dtype=torch.long) print(x) """ Explanation: Construct a matrix filled zeros and of dtype long: End of explanation """ x = torch.tensor([5.5, 3]) print(x) """ Explanation: Construct a tensor directly from data: End of explanation """ x = x.new_ones(5, 3, dtype=torch.double) # new_* methods take in sizes print(x) x = torch.randn_like(x, dtype=torch.float) # override dtype! print(x) # result has the same size """ Explanation: or create a tensor based on an existing tensor. These methods will reuse properties of the input tensor, e.g. dtype, unless new values are provided by user End of explanation """ print(x.size()) """ Explanation: Get its size: End of explanation """ y = torch.rand(5, 3) print(x + y) """ Explanation: <div class="alert alert-info"><h4>Note</h4><p>``torch.Size`` is in fact a tuple, so it supports all tuple operations.</p></div> Operations ^^^^^^^^^^ There are multiple syntaxes for operations. In the following example, we will take a look at the addition operation. Addition: syntax 1 End of explanation """ print(torch.add(x, y)) """ Explanation: Addition: syntax 2 End of explanation """ result = torch.empty(5, 3) torch.add(x, y, out=result) print(result) """ Explanation: Addition: providing an output tensor as argument End of explanation """ # adds x to y y.add_(x) print(y) """ Explanation: Addition: in-place End of explanation """ print(x[:, 1]) """ Explanation: <div class="alert alert-info"><h4>Note</h4><p>Any operation that mutates a tensor in-place is post-fixed with an ``_``. For example: ``x.copy_(y)``, ``x.t_()``, will change ``x``.</p></div> You can use standard NumPy-like indexing with all bells and whistles! End of explanation """ x = torch.randn(4, 4) y = x.view(16) z = x.view(-1, 8) # the size -1 is inferred from other dimensions print(x.size(), y.size(), z.size()) """ Explanation: Resizing: If you want to resize/reshape tensor, you can use torch.view: End of explanation """ x = torch.randn(1) print(x) print(x.item()) """ Explanation: If you have a one element tensor, use .item() to get the value as a Python number End of explanation """ a = torch.ones(5) print(a) b = a.numpy() print(b) """ Explanation: Read later: 100+ Tensor operations, including transposing, indexing, slicing, mathematical operations, linear algebra, random numbers, etc., are described here &lt;https://pytorch.org/docs/torch&gt;_. NumPy Bridge Converting a Torch Tensor to a NumPy array and vice versa is a breeze. The Torch Tensor and NumPy array will share their underlying memory locations (if the Torch Tensor is on CPU), and changing one will change the other. Converting a Torch Tensor to a NumPy Array ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ End of explanation """ a.add_(1) print(a) print(b) """ Explanation: See how the numpy array changed in value. End of explanation """ import numpy as np a = np.ones(5) b = torch.from_numpy(a) np.add(a, 1, out=a) print(a) print(b) """ Explanation: Converting NumPy Array to Torch Tensor ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ See how changing the np array changed the Torch Tensor automatically End of explanation """ # let us run this cell only if CUDA is available # We will use ``torch.device`` objects to move tensors in and out of GPU if torch.cuda.is_available(): device = torch.device("cuda") # a CUDA device object y = torch.ones_like(x, device=device) # directly create a tensor on GPU x = x.to(device) # or just use strings ``.to("cuda")`` z = x + y print(z) print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together! """ Explanation: All the Tensors on the CPU except a CharTensor support converting to NumPy and back. CUDA Tensors Tensors can be moved onto any device using the .to method. End of explanation """
InsightLab/data-science-cookbook
2019/09-clustering/Notebook_KMeans_Assignment.ipynb
mit
# import libraries # linear algebra import numpy as np # data processing import pandas as pd # data visualization from matplotlib import pyplot as plt # load the data with pandas dataset = pd.read_csv('dataset.csv', header=None) dataset = np.array(dataset) plt.scatter(dataset[:,0], dataset[:,1], s=10) plt.show() """ Explanation: <p style="text-align: center;">Clusterização e algoritmo K-means</p> Organizar dados em agrupamentos é um dos modos mais fundamentais de compreensão e aprendizado. Como por exemplo, os organismos em um sistema biologico são classificados em domínio, reino, filo, classe, etc. A análise de agrupamento é o estudo formal de métodos e algoritmos para agrupar objetos de acordo com medidas ou características semelhantes. A análise de cluster, em sua essência, não utiliza rótulos de categoria que marcam objetos com identificadores anteriores, ou seja, rótulos de classe. A ausência de informação de categoria distingue o agrupamento de dados (aprendizagem não supervisionada) da classificação ou análise discriminante (aprendizagem supervisionada). O objetivo da clusterização é encontrar estruturas em dados e, portanto, é de natureza exploratória. A técnica de Clustering tem uma longa e rica história em uma variedade de campos científicos. Um dos algoritmos de clusterização mais populares e simples, o K-means, foi publicado pela primeira vez em 1955. Apesar do K-means ter sido proposto há mais de 50 anos e milhares de algoritmos de clustering terem sido publicados desde então, o K-means é ainda amplamente utilizado. Fonte: Anil K. Jain, Data clustering: 50 years beyond K-means, Pattern Recognition Letters, Volume 31, Issue 8, 2010 Objetivo Implementar as funções do algoritmo KMeans passo-a-passo Comparar a implementação com o algoritmo do Scikit-Learn Entender e codificar o Método do Cotovelo Utilizar o K-means em um dataset real Carregando os dados de teste Carregue os dados disponibilizados, e identifique visualmente em quantos grupos os dados parecem estar distribuídos. End of explanation """ def calculate_initial_centers(dataset, k): """ Inicializa os centróides iniciais de maneira arbitrária Argumentos: dataset -- Conjunto de dados - [m,n] k -- Número de centróides desejados Retornos: centroids -- Lista com os centróides calculados - [k,n] """ #### CODE HERE #### ### END OF CODE ### return centroids """ Explanation: 1. Implementar o algoritmo K-means Nesta etapa você irá implementar as funções que compõe o algoritmo do KMeans uma a uma. É importante entender e ler a documentação de cada função, principalmente as dimensões dos dados esperados na saída. 1.1 Inicializar os centróides A primeira etapa do algoritmo consiste em inicializar os centróides de maneira aleatória. Essa etapa é uma das mais importantes do algoritmo e uma boa inicialização pode diminuir bastante o tempo de convergência. Para inicializar os centróides você pode considerar o conhecimento prévio sobre os dados, mesmo sem saber a quantidade de grupos ou sua distribuição. Dica: https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.uniform.html End of explanation """ k = 3 centroids = calculate_initial_centers(dataset, k) plt.scatter(dataset[:,0], dataset[:,1], s=10) plt.scatter(centroids[:,0], centroids[:,1], marker='^', c='red',s=100) plt.show() """ Explanation: Teste a função criada e visualize os centróides que foram calculados. End of explanation """ def euclidean_distance(a, b): """ Calcula a distância euclidiana entre os pontos a e b Argumentos: a -- Um ponto no espaço - [1,n] b -- Um ponto no espaço - [1,n] Retornos: distance -- Distância euclidiana entre os pontos """ #### CODE HERE #### ### END OF CODE ### return distance """ Explanation: 1.2 Definir os clusters Na segunda etapa do algoritmo serão definidos o grupo de cada dado, de acordo com os centróides calculados. 1.2.1 Função de distância Codifique a função de distância euclidiana entre dois pontos (a, b). Definido pela equação: $$ dist(a, b) = \sqrt{(a_1-b_1)^{2}+(a_2-b_2)^{2}+ ... + (a_n-b_n)^{2}} $$ $$ dist(a, b) = \sqrt{\sum_{i=1}^{n}(a_i-b_i)^{2}} $$ End of explanation """ a = np.array([1, 5, 9]) b = np.array([3, 7, 8]) if (euclidean_distance(a,b) == 3): print("Distância calculada corretamente!") else: print("Função de distância incorreta") """ Explanation: Teste a função criada. End of explanation """ def nearest_centroid(a, centroids): """ Calcula o índice do centroid mais próximo ao ponto a Argumentos: a -- Um ponto no espaço - [1,n] centroids -- Lista com os centróides - [k,n] Retornos: nearest_index -- Índice do centróide mais próximo """ #### CODE HERE #### ### END OF CODE ### return nearest_index """ Explanation: 1.2.2 Calcular o centroide mais próximo Utilizando a função de distância codificada anteriormente, complete a função abaixo para calcular o centroid mais próximo de um ponto qualquer. Dica: https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html End of explanation """ # Seleciona um ponto aleatório no dataset index = np.random.randint(dataset.shape[0]) a = dataset[index,:] # Usa a função para descobrir o centroid mais próximo idx_nearest_centroid = nearest_centroid(a, centroids) # Plota os dados ------------------------------------------------ plt.scatter(dataset[:,0], dataset[:,1], s=10) # Plota o ponto aleatório escolhido em uma cor diferente plt.scatter(a[0], a[1], c='magenta', s=30) # Plota os centroids plt.scatter(centroids[:,0], centroids[:,1], marker='^', c='red', s=100) # Plota o centroid mais próximo com uma cor diferente plt.scatter(centroids[idx_nearest_centroid,0], centroids[idx_nearest_centroid,1], marker='^', c='springgreen', s=100) # Cria uma linha do ponto escolhido para o centroid selecionado plt.plot([a[0], centroids[idx_nearest_centroid,0]], [a[1], centroids[idx_nearest_centroid,1]],c='orange') plt.annotate('CENTROID', (centroids[idx_nearest_centroid,0], centroids[idx_nearest_centroid,1],)) plt.show() """ Explanation: Teste a função criada End of explanation """ def all_nearest_centroids(dataset, centroids): """ Calcula o índice do centroid mais próximo para cada ponto do dataset Argumentos: dataset -- Conjunto de dados - [m,n] centroids -- Lista com os centróides - [k,n] Retornos: nearest_indexes -- Índices do centróides mais próximos - [m,1] """ #### CODE HERE #### ### END OF CODE ### return nearest_indexes """ Explanation: 1.2.3 Calcular centroid mais próximo de cada dado do dataset Utilizando a função anterior que retorna o índice do centroid mais próximo, calcule o centroid mais próximo de cada dado do dataset. End of explanation """ nearest_indexes = all_nearest_centroids(dataset, centroids) plt.scatter(dataset[:,0], dataset[:,1], c=nearest_indexes) plt.scatter(centroids[:,0], centroids[:,1], marker='^', c='red', s=100) plt.show() """ Explanation: Teste a função criada visualizando os cluster formados. End of explanation """ def inertia(dataset, centroids, nearest_indexes): """ Soma das distâncias quadradas das amostras para o centro do cluster mais próximo. Argumentos: dataset -- Conjunto de dados - [m,n] centroids -- Lista com os centróides - [k,n] nearest_indexes -- Índices do centróides mais próximos - [m,1] Retornos: inertia -- Soma total do quadrado da distância entre os dados de um cluster e seu centróide """ #### CODE HERE #### ### END OF CODE ### return inertia """ Explanation: 1.3 Métrica de avaliação Após formar os clusters, como sabemos se o resultado gerado é bom? Para isso, precisamos definir uma métrica de avaliação. O algoritmo K-means tem como objetivo escolher centróides que minimizem a soma quadrática das distância entre os dados de um cluster e seu centróide. Essa métrica é conhecida como inertia. $$\sum_{i=0}^{n}\min_{c_j \in C}(||x_i - c_j||^2)$$ A inertia, ou o critério de soma dos quadrados dentro do cluster, pode ser reconhecido como uma medida de o quão internamente coerentes são os clusters, porém ela sofre de alguns inconvenientes: A inertia pressupõe que os clusters são convexos e isotrópicos, o que nem sempre é o caso. Desta forma, pode não representar bem em aglomerados alongados ou variedades com formas irregulares. A inertia não é uma métrica normalizada: sabemos apenas que valores mais baixos são melhores e zero é o valor ótimo. Mas em espaços de dimensões muito altas, as distâncias euclidianas tendem a se tornar infladas (este é um exemplo da chamada “maldição da dimensionalidade”). A execução de um algoritmo de redução de dimensionalidade, como o PCA, pode aliviar esse problema e acelerar os cálculos. Fonte: https://scikit-learn.org/stable/modules/clustering.html Para podermos avaliar os nosso clusters, codifique a métrica da inertia abaixo, para isso você pode utilizar a função de distância euclidiana construída anteriormente. $$inertia = \sum_{i=0}^{n}\min_{c_j \in C} (dist(x_i, c_j))^2$$ End of explanation """ tmp_data = np.array([[1,2,3],[3,6,5],[4,5,6]]) tmp_centroide = np.array([[2,3,4]]) tmp_nearest_indexes = all_nearest_centroids(tmp_data, tmp_centroide) if inertia(tmp_data, tmp_centroide, tmp_nearest_indexes) == 26: print("Inertia calculada corretamente!") else: print("Função de inertia incorreta!") # Use a função para verificar a inertia dos seus clusters inertia(dataset, centroids, nearest_indexes) """ Explanation: Teste a função codificada executando o código abaixo. End of explanation """ def update_centroids(dataset, centroids, nearest_indexes): """ Atualiza os centroids Argumentos: dataset -- Conjunto de dados - [m,n] centroids -- Lista com os centróides - [k,n] nearest_indexes -- Índices do centróides mais próximos - [m,1] Retornos: centroids -- Lista com centróides atualizados - [k,n] """ #### CODE HERE #### ### END OF CODE ### return centroids """ Explanation: 1.4 Atualizar os clusters Nessa etapa, os centróides são recomputados. O novo valor de cada centróide será a media de todos os dados atribuídos ao cluster. End of explanation """ nearest_indexes = all_nearest_centroids(dataset, centroids) # Plota os os cluster ------------------------------------------------ plt.scatter(dataset[:,0], dataset[:,1], c=nearest_indexes) # Plota os centroids plt.scatter(centroids[:,0], centroids[:,1], marker='^', c='red', s=100) for index, centroid in enumerate(centroids): dataframe = dataset[nearest_indexes == index,:] for data in dataframe: plt.plot([centroid[0], data[0]], [centroid[1], data[1]], c='lightgray', alpha=0.3) plt.show() """ Explanation: Visualize os clusters formados End of explanation """ centroids = update_centroids(dataset, centroids, nearest_indexes) """ Explanation: Execute a função de atualização e visualize novamente os cluster formados End of explanation """ class KMeans(): def __init__(self, n_clusters=8, max_iter=300): self.n_clusters = n_clusters self.max_iter = max_iter def fit(self,X): # Inicializa os centróides self.cluster_centers_ = [None] # Computa o cluster de cada amostra self.labels_ = [None] # Calcula a inércia inicial old_inertia = [None] for index in [None]: #### CODE HERE #### ### END OF CODE ### return self def predict(self, X): return [None] """ Explanation: 2. K-means 2.1 Algoritmo completo Utilizando as funções codificadas anteriormente, complete a classe do algoritmo K-means! End of explanation """ kmeans = KMeans(n_clusters=3) kmeans.fit(dataset) print("Inércia = ", kmeans.inertia_) plt.scatter(dataset[:,0], dataset[:,1], c=kmeans.labels_) plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], marker='^', c='red', s=100) plt.show() """ Explanation: Verifique o resultado do algoritmo abaixo! End of explanation """ #### CODE HERE #### """ Explanation: 2.2 Comparar com algoritmo do Scikit-Learn Use a implementação do algoritmo do scikit-learn do K-means para o mesmo conjunto de dados. Mostre o valor da inércia e os conjuntos gerados pelo modelo. Você pode usar a mesma estrutura da célula de código anterior. Dica: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans End of explanation """ #### CODE HERE #### """ Explanation: 3. Método do cotovelo Implemete o método do cotovelo e mostre o melhor K para o conjunto de dados. End of explanation """ #### CODE HERE #### """ Explanation: 4. Dataset Real Exercícios 1 - Aplique o algoritmo do K-means desenvolvido por você no datatse iris [1]. Mostre os resultados obtidos utilizando pelo menos duas métricas de avaliação de clusteres [2]. [1] http://archive.ics.uci.edu/ml/datasets/iris [2] http://scikit-learn.org/stable/modules/clustering.html#clustering-evaluation Dica: você pode utilizar as métricas completeness e homogeneity. 2 - Tente melhorar o resultado obtido na questão anterior utilizando uma técnica de mineração de dados. Explique a diferença obtida. Dica: você pode tentar normalizar os dados [3]. - [3] https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.normalize.html 3 - Qual o número de clusteres (K) você escolheu na questão anterior? Desenvolva o Método do Cotovelo sem usar biblioteca e descubra o valor de K mais adequado. Após descobrir, utilize o valor obtido no algoritmo do K-means. 4 - Utilizando os resultados da questão anterior, refaça o cálculo das métricas e comente os resultados obtidos. Houve uma melhoria? Explique. End of explanation """
google/starthinker
colabs/dv360_editor.ipynb
apache-2.0
!pip install git+https://github.com/google/starthinker """ Explanation: DV360 Bulk Editor Allows bulk editing DV360 through Sheets and BigQuery. License Copyright 2020 Google LLC, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Disclaimer This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team. This code generated (see starthinker/scripts for possible source): - Command: "python starthinker_ui/manage.py colab" - Command: "python starthinker/tools/colab.py [JSON RECIPE]" 1. Install Dependencies First install the libraries needed to execute recipes, this only needs to be done once, then click play. End of explanation """ from starthinker.util.configuration import Configuration CONFIG = Configuration( project="", client={}, service={}, user="/content/user.json", verbose=True ) """ Explanation: 2. Set Configuration This code is required to initialize the project. Fill in required fields and press play. If the recipe uses a Google Cloud Project: Set the configuration project value to the project identifier from these instructions. If the recipe has auth set to user: If you have user credentials: Set the configuration user value to your user credentials JSON. If you DO NOT have user credentials: Set the configuration client value to downloaded client credentials. If the recipe has auth set to service: Set the configuration service value to downloaded service credentials. End of explanation """ FIELDS = { 'auth_dv':'user', # Credentials used for dv. 'auth_sheet':'user', # Credentials used for sheet. 'auth_bigquery':'service', # Credentials used for bigquery. 'recipe_name':'', # Name of Google Sheet to create. 'recipe_slug':'', # Name of Google BigQuery dataset to create. 'command':'Load Partners', # Action to take. } print("Parameters Set To: %s" % FIELDS) """ Explanation: 3. Enter DV360 Bulk Editor Recipe Parameters Select Load Partners, then click Save + Run, then a sheet called DV Editor will be created. In the Partners sheet tab, fill in Filter column then select Load Advertisers, click Save + Run. In the Advertisers sheet tab, fill in Filter column then select Load Campaigns, click Save + Run. In the Campaigns sheet tab, fill in Filter column, optional. Then select Load Insertion Orders And Line Items, click Save + Run. To update values, make changes on all Edit columns. Select Preview, then Save + Run. Check the Audit and Preview tabs to verify commit. To commit changes, select Update, then Save + Run. Check the Success and Error tabs. Update can be run multiple times. Update ONLY changes fields that do not match their original value. Insert operates only on Edit columns, ignores orignal value columns. Carefull when using drag to copy rows, values are incremented automatically. Modify audit logic by visting BigQuery and changing the views. Modify the values below for your use case, can be done multiple times, then click play. End of explanation """ from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields TASKS = [ { 'dataset':{ '__comment__':'Ensure dataset exists.', 'auth':{'field':{'name':'auth_bigquery','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}}, 'dataset':{'field':{'name':'recipe_slug','prefix':'DV_Editor_','kind':'string','order':2,'default':'','description':'Name of Google BigQuery dataset to create.'}} } }, { 'drive':{ '__comment__':'Copy the default template to sheet with the recipe name', 'auth':{'field':{'name':'auth_sheet','kind':'authentication','order':1,'default':'user','description':'Credentials used for reading data.'}}, 'copy':{ 'source':'https://docs.google.com/spreadsheets/d/18G6cGo4j5SsY08H8P53R22D_Pm6m-zkE6APd3EDLf2c/', 'destination':{'field':{'name':'recipe_name','prefix':'DV Editor ','kind':'string','order':3,'default':'','description':'Name of Google Sheet to create.'}} } } }, { 'dv_editor':{ '__comment':'Depending on users choice, execute a different part of the solution.', 'auth_dv':{'field':{'name':'auth_dv','kind':'authentication','order':1,'default':'user','description':'Credentials used for dv.'}}, 'auth_sheets':{'field':{'name':'auth_sheet','kind':'authentication','order':2,'default':'user','description':'Credentials used for sheet.'}}, 'auth_bigquery':{'field':{'name':'auth_bigquery','kind':'authentication','order':3,'default':'service','description':'Credentials used for bigquery.'}}, 'sheet':{'field':{'name':'recipe_name','prefix':'DV Editor ','kind':'string','order':4,'default':'','description':'Name of Google Sheet to create.'}}, 'dataset':{'field':{'name':'recipe_slug','prefix':'DV_Editor_','kind':'string','order':5,'default':'','description':'Name of Google BigQuery dataset to create.'}}, 'command':{'field':{'name':'command','kind':'choice','choices':['Clear Partners','Clear Advertisers','Clear Campaigns','Clear Insertion Orders And Line Items','Clear Preview','Clear Update','Load Partners','Load Advertisers','Load Campaigns','Load Insertion Orders And Line Items','Preview','Update'],'order':6,'default':'Load Partners','description':'Action to take.'}} } } ] json_set_fields(TASKS, FIELDS) execute(CONFIG, TASKS, force=True) """ Explanation: 4. Execute DV360 Bulk Editor This does NOT need to be modified unless you are changing the recipe, click play. End of explanation """
johnnyliu27/openmc
examples/jupyter/mgxs-part-i.ipynb
mit
from IPython.display import Image Image(filename='images/mgxs.png', width=350) """ Explanation: This IPython Notebook introduces the use of the openmc.mgxs module to calculate multi-group cross sections for an infinite homogeneous medium. In particular, this Notebook introduces the the following features: General equations for scalar-flux averaged multi-group cross sections Creation of multi-group cross sections for an infinite homogeneous medium Use of tally arithmetic to manipulate multi-group cross sections Introduction to Multi-Group Cross Sections (MGXS) Many Monte Carlo particle transport codes, including OpenMC, use continuous-energy nuclear cross section data. However, most deterministic neutron transport codes use multi-group cross sections defined over discretized energy bins or energy groups. An example of U-235's continuous-energy fission cross section along with a 16-group cross section computed for a light water reactor spectrum is displayed below. End of explanation """ %matplotlib inline import numpy as np import matplotlib.pyplot as plt import openmc import openmc.mgxs as mgxs """ Explanation: A variety of tools employing different methodologies have been developed over the years to compute multi-group cross sections for certain applications, including NJOY (LANL), MC$^2$-3 (ANL), and Serpent (VTT). The openmc.mgxs Python module is designed to leverage OpenMC's tally system to calculate multi-group cross sections with arbitrary energy discretizations for fine-mesh heterogeneous deterministic neutron transport applications. Before proceeding to illustrate how one may use the openmc.mgxs module, it is worthwhile to define the general equations used to calculate multi-group cross sections. This is only intended as a brief overview of the methodology used by openmc.mgxs - we refer the interested reader to the large body of literature on the subject for a more comprehensive understanding of this complex topic. Introductory Notation The continuous real-valued microscopic cross section may be denoted $\sigma_{n,x}(\mathbf{r}, E)$ for position vector $\mathbf{r}$, energy $E$, nuclide $n$ and interaction type $x$. Similarly, the scalar neutron flux may be denoted by $\Phi(\mathbf{r},E)$ for position $\mathbf{r}$ and energy $E$. Note: Although nuclear cross sections are dependent on the temperature $T$ of the interacting medium, the temperature variable is neglected here for brevity. Spatial and Energy Discretization The energy domain for critical systems such as thermal reactors spans more than 10 orders of magnitude of neutron energies from 10$^{-5}$ - 10$^7$ eV. The multi-group approximation discretization divides this energy range into one or more energy groups. In particular, for $G$ total groups, we denote an energy group index $g$ such that $g \in {1, 2, ..., G}$. The energy group indices are defined such that the smaller group the higher the energy, and vice versa. The integration over neutron energies across a discrete energy group is commonly referred to as energy condensation. Multi-group cross sections are computed for discretized spatial zones in the geometry of interest. The spatial zones may be defined on a structured and regular fuel assembly or pin cell mesh, an arbitrary unstructured mesh or the constructive solid geometry used by OpenMC. For a geometry with $K$ distinct spatial zones, we designate each spatial zone an index $k$ such that $k \in {1, 2, ..., K}$. The volume of each spatial zone is denoted by $V_{k}$. The integration over discrete spatial zones is commonly referred to as spatial homogenization. General Scalar-Flux Weighted MGXS The multi-group cross sections computed by openmc.mgxs are defined as a scalar flux-weighted average of the microscopic cross sections across each discrete energy group. This formulation is employed in order to preserve the reaction rates within each energy group and spatial zone. In particular, spatial homogenization and energy condensation are used to compute the general multi-group cross section $\sigma_{n,x,k,g}$ as follows: $$\sigma_{n,x,k,g} = \frac{\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\sigma_{n,x}(\mathbf{r},E')\Phi(\mathbf{r},E')}{\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\Phi(\mathbf{r},E')}$$ This scalar flux-weighted average microscopic cross section is computed by openmc.mgxs for most multi-group cross sections, including total, absorption, and fission reaction types. These double integrals are stochastically computed with OpenMC's tally system - in particular, filters on the energy range and spatial zone (material, cell or universe) define the bounds of integration for both numerator and denominator. Multi-Group Scattering Matrices The general multi-group cross section $\sigma_{n,x,k,g}$ is a vector of $G$ values for each energy group $g$. The equation presented above only discretizes the energy of the incoming neutron and neglects the outgoing energy of the neutron (if any). Hence, this formulation must be extended to account for the outgoing energy of neutrons in the discretized scattering matrix cross section used by deterministic neutron transport codes. We denote the incoming and outgoing neutron energy groups as $g$ and $g'$ for the microscopic scattering matrix cross section $\sigma_{n,s}(\mathbf{r},E)$. As before, spatial homogenization and energy condensation are used to find the multi-group scattering matrix cross section $\sigma_{n,s,k,g \to g'}$ as follows: $$\sigma_{n,s,k,g\rightarrow g'} = \frac{\int_{E_{g'}}^{E_{g'-1}}\mathrm{d}E''\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\sigma_{n,s}(\mathbf{r},E'\rightarrow E'')\Phi(\mathbf{r},E')}{\int_{E_{g}}^{E_{g-1}}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\Phi(\mathbf{r},E')}$$ This scalar flux-weighted multi-group microscopic scattering matrix is computed using OpenMC tallies with both energy in and energy out filters. Multi-Group Fission Spectrum The energy spectrum of neutrons emitted from fission is denoted by $\chi_{n}(\mathbf{r},E' \rightarrow E'')$ for incoming and outgoing energies $E'$ and $E''$, respectively. Unlike the multi-group cross sections $\sigma_{n,x,k,g}$ considered up to this point, the fission spectrum is a probability distribution and must sum to unity. The outgoing energy is typically much less dependent on the incoming energy for fission than for scattering interactions. As a result, it is common practice to integrate over the incoming neutron energy when computing the multi-group fission spectrum. The fission spectrum may be simplified as $\chi_{n}(\mathbf{r},E)$ with outgoing energy $E$. Unlike the multi-group cross sections defined up to this point, the multi-group fission spectrum is weighted by the fission production rate rather than the scalar flux. This formulation is intended to preserve the total fission production rate in the multi-group deterministic calculation. In order to mathematically define the multi-group fission spectrum, we denote the microscopic fission cross section as $\sigma_{n,f}(\mathbf{r},E)$ and the average number of neutrons emitted from fission interactions with nuclide $n$ as $\nu_{n}(\mathbf{r},E)$. The multi-group fission spectrum $\chi_{n,k,g}$ is then the probability of fission neutrons emitted into energy group $g$. Similar to before, spatial homogenization and energy condensation are used to find the multi-group fission spectrum $\chi_{n,k,g}$ as follows: $$\chi_{n,k,g'} = \frac{\int_{E_{g'}}^{E_{g'-1}}\mathrm{d}E''\int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\chi_{n}(\mathbf{r},E'\rightarrow E'')\nu_{n}(\mathbf{r},E')\sigma_{n,f}(\mathbf{r},E')\Phi(\mathbf{r},E')}{\int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r}\nu_{n}(\mathbf{r},E')\sigma_{n,f}(\mathbf{r},E')\Phi(\mathbf{r},E')}$$ The fission production-weighted multi-group fission spectrum is computed using OpenMC tallies with both energy in and energy out filters. This concludes our brief overview on the methodology to compute multi-group cross sections. The following sections detail more concretely how users may employ the openmc.mgxs module to power simulation workflows requiring multi-group cross sections for downstream deterministic calculations. Generate Input Files End of explanation """ # Instantiate a Material and register the Nuclides inf_medium = openmc.Material(name='moderator') inf_medium.set_density('g/cc', 5.) inf_medium.add_nuclide('H1', 0.028999667) inf_medium.add_nuclide('O16', 0.01450188) inf_medium.add_nuclide('U235', 0.000114142) inf_medium.add_nuclide('U238', 0.006886019) inf_medium.add_nuclide('Zr90', 0.002116053) """ Explanation: We being by creating a material for the homogeneous medium. End of explanation """ # Instantiate a Materials collection and export to XML materials_file = openmc.Materials([inf_medium]) materials_file.export_to_xml() """ Explanation: With our material, we can now create a Materials object that can be exported to an actual XML file. End of explanation """ # Instantiate boundary Planes min_x = openmc.XPlane(boundary_type='reflective', x0=-0.63) max_x = openmc.XPlane(boundary_type='reflective', x0=0.63) min_y = openmc.YPlane(boundary_type='reflective', y0=-0.63) max_y = openmc.YPlane(boundary_type='reflective', y0=0.63) """ Explanation: Now let's move on to the geometry. This problem will be a simple square cell with reflective boundary conditions to simulate an infinite homogeneous medium. The first step is to create the outer bounding surfaces of the problem. End of explanation """ # Instantiate a Cell cell = openmc.Cell(cell_id=1, name='cell') # Register bounding Surfaces with the Cell cell.region = +min_x & -max_x & +min_y & -max_y # Fill the Cell with the Material cell.fill = inf_medium """ Explanation: With the surfaces defined, we can now create a cell that is defined by intersections of half-spaces created by the surfaces. End of explanation """ # Create root universe root_universe = openmc.Universe(name='root universe', cells=[cell]) """ Explanation: OpenMC requires that there is a "root" universe. Let us create a root universe and add our square cell to it. End of explanation """ # Create Geometry and set root Universe openmc_geometry = openmc.Geometry(root_universe) # Export to "geometry.xml" openmc_geometry.export_to_xml() """ Explanation: We now must create a geometry that is assigned a root universe and export it to XML. End of explanation """ # OpenMC simulation parameters batches = 50 inactive = 10 particles = 2500 # Instantiate a Settings object settings_file = openmc.Settings() settings_file.batches = batches settings_file.inactive = inactive settings_file.particles = particles settings_file.output = {'tallies': True} # Create an initial uniform spatial source distribution over fissionable zones bounds = [-0.63, -0.63, -0.63, 0.63, 0.63, 0.63] uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True) settings_file.source = openmc.source.Source(space=uniform_dist) # Export to "settings.xml" settings_file.export_to_xml() """ Explanation: Next, we must define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles. End of explanation """ # Instantiate a 2-group EnergyGroups object groups = mgxs.EnergyGroups() groups.group_edges = np.array([0., 0.625, 20.0e6]) """ Explanation: Now we are ready to generate multi-group cross sections! First, let's define a 2-group structure using the built-in EnergyGroups class. End of explanation """ # Instantiate a few different sections total = mgxs.TotalXS(domain=cell, groups=groups) absorption = mgxs.AbsorptionXS(domain=cell, groups=groups) scattering = mgxs.ScatterXS(domain=cell, groups=groups) # Note that if we wanted to incorporate neutron multiplication in the # scattering cross section we would write the previous line as: # scattering = mgxs.ScatterXS(domain=cell, groups=groups, nu=True) """ Explanation: We can now use the EnergyGroups object, along with our previously created materials and geometry, to instantiate some MGXS objects from the openmc.mgxs module. In particular, the following are subclasses of the generic and abstract MGXS class: TotalXS TransportXS AbsorptionXS CaptureXS FissionXS KappaFissionXS ScatterXS ScatterMatrixXS Chi ChiPrompt InverseVelocity PromptNuFissionXS Of course, we are aware that the fission cross section (FissionXS) can sometimes be paired with the fission neutron multiplication to become $\nu\sigma_f$. This can be accomodated in to the FissionXS class by setting the nu parameter to True as shown below. Additionally, scattering reactions (like (n,2n)) can also be defined to take in to account the neutron multiplication to become $\nu\sigma_s$. This can be accomodated in the the transport (TransportXS), scattering (ScatterXS), and scattering-matrix (ScatterMatrixXS) cross sections types by setting the nu parameter to True as shown below. These classes provide us with an interface to generate the tally inputs as well as perform post-processing of OpenMC's tally data to compute the respective multi-group cross sections. In this case, let's create the multi-group total, absorption and scattering cross sections with our 2-group structure. End of explanation """ absorption.tallies """ Explanation: Each multi-group cross section object stores its tallies in a Python dictionary called tallies. We can inspect the tallies in the dictionary for our Absorption object as follows. End of explanation """ # Instantiate an empty Tallies object tallies_file = openmc.Tallies() # Add total tallies to the tallies file tallies_file += total.tallies.values() # Add absorption tallies to the tallies file tallies_file += absorption.tallies.values() # Add scattering tallies to the tallies file tallies_file += scattering.tallies.values() # Export to "tallies.xml" tallies_file.export_to_xml() """ Explanation: The Absorption object includes tracklength tallies for the 'absorption' and 'flux' scores in the 2-group structure in cell 1. Now that each MGXS object contains the tallies that it needs, we must add these tallies to a Tallies object to generate the "tallies.xml" input file for OpenMC. End of explanation """ # Run OpenMC openmc.run() """ Explanation: Now we a have a complete set of inputs, so we can go ahead and run our simulation. End of explanation """ # Load the last statepoint file sp = openmc.StatePoint('statepoint.50.h5') """ Explanation: Tally Data Processing Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object. End of explanation """ # Load the tallies from the statepoint into each MGXS object total.load_from_statepoint(sp) absorption.load_from_statepoint(sp) scattering.load_from_statepoint(sp) """ Explanation: In addition to the statepoint file, our simulation also created a summary file which encapsulates information about the materials and geometry. By default, a Summary object is automatically linked when a StatePoint is loaded. This is necessary for the openmc.mgxs module to properly process the tally data. The statepoint is now ready to be analyzed by our multi-group cross sections. We simply have to load the tallies from the StatePoint into each object as follows and our MGXS objects will compute the cross sections for us under-the-hood. End of explanation """ total.print_xs() """ Explanation: Voila! Our multi-group cross sections are now ready to rock 'n roll! Extracting and Storing MGXS Data Let's first inspect our total cross section by printing it to the screen. End of explanation """ df = scattering.get_pandas_dataframe() df.head(10) """ Explanation: Since the openmc.mgxs module uses tally arithmetic under-the-hood, the cross section is stored as a "derived" Tally object. This means that it can be queried and manipulated using all of the same methods supported for the Tally class in the OpenMC Python API. For example, we can construct a Pandas DataFrame of the multi-group cross section data. End of explanation """ absorption.export_xs_data(filename='absorption-xs', format='excel') """ Explanation: Each multi-group cross section object can be easily exported to a variety of file formats, including CSV, Excel, and LaTeX for storage or data processing. End of explanation """ total.build_hdf5_store(filename='mgxs', append=True) absorption.build_hdf5_store(filename='mgxs', append=True) scattering.build_hdf5_store(filename='mgxs', append=True) """ Explanation: The following code snippet shows how to export all three MGXS to the same HDF5 binary data store. End of explanation """ # Use tally arithmetic to compute the difference between the total, absorption and scattering difference = total.xs_tally - absorption.xs_tally - scattering.xs_tally # The difference is a derived tally which can generate Pandas DataFrames for inspection difference.get_pandas_dataframe() """ Explanation: Comparing MGXS with Tally Arithmetic Finally, we illustrate how one can leverage OpenMC's tally arithmetic data processing feature with MGXS objects. The openmc.mgxs module uses tally arithmetic to compute multi-group cross sections with automated uncertainty propagation. Each MGXS object includes an xs_tally attribute which is a "derived" Tally based on the tallies needed to compute the cross section type of interest. These derived tallies can be used in subsequent tally arithmetic operations. For example, we can use tally artithmetic to confirm that the TotalXS is equal to the sum of the AbsorptionXS and ScatterXS objects. End of explanation """ # Use tally arithmetic to compute the absorption-to-total MGXS ratio absorption_to_total = absorption.xs_tally / total.xs_tally # The absorption-to-total ratio is a derived tally which can generate Pandas DataFrames for inspection absorption_to_total.get_pandas_dataframe() # Use tally arithmetic to compute the scattering-to-total MGXS ratio scattering_to_total = scattering.xs_tally / total.xs_tally # The scattering-to-total ratio is a derived tally which can generate Pandas DataFrames for inspection scattering_to_total.get_pandas_dataframe() """ Explanation: Similarly, we can use tally arithmetic to compute the ratio of AbsorptionXS and ScatterXS to the TotalXS. End of explanation """ # Use tally arithmetic to ensure that the absorption- and scattering-to-total MGXS ratios sum to unity sum_ratio = absorption_to_total + scattering_to_total # The sum ratio is a derived tally which can generate Pandas DataFrames for inspection sum_ratio.get_pandas_dataframe() """ Explanation: Lastly, we sum the derived scatter-to-total and absorption-to-total ratios to confirm that they sum to unity. End of explanation """
nbokulich/short-read-tax-assignment
ipynb/mock-community/find-expected-gapless.ipynb
bsd-3-clause
from tax_credit import mock_quality from os.path import expanduser, join """ Explanation: Mock community quality control This notebook maps observed mock community sequences, which are technically from unknown organisms, to "trueish" taxonomies, i.e., the most likely taxonomic match given a list of expected sequences derived from the input strains. This serves two purposes: 1. We can then use trueish taxonomies to calculate per-sequence precision/recall scores 2. Mismatch profiles give us a quantitative assessment of the overall "quality" of a mock community (or at least the quality control methods used to process it). End of explanation """ data_dir = expanduser('~/Projects/short-read-tax-assignment-bk/') mockrobiota_dir = expanduser('~/Projects/mockrobiota/') ref_dir = expanduser('~/Data') """ Explanation: Define paths to tax-credit repository directory, mockrobiota repository directory, and reference database directory. End of explanation """ ref_dbs = [('greengenes', join(ref_dir, 'gg_13_8_otus', 'rep_set', '99_otus.fasta'), join(ref_dir, 'gg_13_8_otus', 'taxonomy', '99_otu_taxonomy.txt')), ('unite', join(ref_dir, 'unite_7_1', 'developer', 'sh_refs_qiime_ver7_99_20.11.2016_dev.fasta'), join(ref_dir, 'unite_7_1', 'developer', 'sh_taxonomy_qiime_ver7_99_20.11.2016_dev.txt'))] """ Explanation: Identify location of your reference databases. End of explanation """ refs, taxs = mock_quality.ref_db_to_dict(ref_dbs) """ Explanation: Now generate reference sequence/taxonomy dictionaries. End of explanation """ mock_quality.match_expected_seqs_to_taxonomy(data_dir, mockrobiota_dir, refs, taxs) """ Explanation: Establish expected sequences and taxonomies End of explanation """ mock_quality.generate_trueish_taxonomies(data_dir) """ Explanation: Map sequences that match to taxonomies End of explanation """
google/earthengine-api
python/examples/ipynb/Earth_Engine_TensorFlow_AI_Platform.ipynb
apache-2.0
from google.colab import auth auth.authenticate_user() """ Explanation: <table class="ee-notebook-buttons" align="left"><td> <a target="_blank" href="http://colab.research.google.com/github/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_TensorFlow_AI_Platform.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td><td> <a target="_blank" href="https://github.com/google/earthengine-api/blob/master/python/examples/ipynb/Earth_Engine_TensorFlow_AI_Platform.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td></table> Introduction This is an Earth Engine <> TensorFlow demonstration notebook. This demonstrates a per-pixel neural network implemented in a way that allows the trained model to be hosted on Google AI Platform and used in Earth Engine for interactive prediction from an ee.Model.fromAIPlatformPredictor. See this example notebook for background on the dense model. Running this demo may incur charges to your Google Cloud Account! Setup software libraries Import software libraries and/or authenticate as necessary. Authenticate to Colab and Cloud To read/write from a Google Cloud Storage bucket to which you have access, it's necessary to authenticate (as yourself). This should be the same account you use to login to Earth Engine. When you run the code below, it will display a link in the output to an authentication page in your browser. Follow the link to a page that will let you grant permission to the Cloud SDK to access your resources. Copy the code from the permissions page back into this notebook and press return to complete the process. (You may need to run this again if you get a credentials error later.) End of explanation """ !pip install -U earthengine-api --no-deps import ee ee.Authenticate() ee.Initialize() """ Explanation: Upgrade Earth Engine and Authenticate Update Earth Engine to ensure you have the latest version. Authenticate to Earth Engine the same way you did to the Colab notebook. Specifically, run the code to display a link to a permissions page. This gives you access to your Earth Engine account. This should be the same account you used to login to Cloud previously. Copy the code from the Earth Engine permissions page back into the notebook and press return to complete the process. End of explanation """ import tensorflow as tf print(tf.__version__) """ Explanation: Test the TensorFlow installation Import TensorFlow and check the version. End of explanation """ import folium print(folium.__version__) """ Explanation: Test the Folium installation We will use the Folium library for visualization. Import the library and check the version. End of explanation """ # REPLACE WITH YOUR CLOUD PROJECT! PROJECT = 'your-project' # Cloud Storage bucket with training and testing datasets. DATA_BUCKET = 'ee-docs-demos' # Output bucket for trained models. You must be able to write into this bucket. OUTPUT_BUCKET = 'your-bucket' # This is a good region for hosting AI models. REGION = 'us-central1' # Training and testing dataset file names in the Cloud Storage bucket. TRAIN_FILE_PREFIX = 'Training_demo' TEST_FILE_PREFIX = 'Testing_demo' file_extension = '.tfrecord.gz' TRAIN_FILE_PATH = 'gs://' + DATA_BUCKET + '/' + TRAIN_FILE_PREFIX + file_extension TEST_FILE_PATH = 'gs://' + DATA_BUCKET + '/' + TEST_FILE_PREFIX + file_extension # The labels, consecutive integer indices starting from zero, are stored in # this property, set on each point. LABEL = 'landcover' # Number of label values, i.e. number of classes in the classification. N_CLASSES = 3 # Use Landsat 8 surface reflectance data for predictors. L8SR = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR') # Use these bands for prediction. BANDS = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'] # These names are used to specify properties in the export of # training/testing data and to define the mapping between names and data # when reading into TensorFlow datasets. FEATURE_NAMES = list(BANDS) FEATURE_NAMES.append(LABEL) # List of fixed-length features, all of which are float32. columns = [ tf.io.FixedLenFeature(shape=[1], dtype=tf.float32) for k in FEATURE_NAMES ] # Dictionary with feature names as keys, fixed-length features as values. FEATURES_DICT = dict(zip(FEATURE_NAMES, columns)) """ Explanation: Define variables The training data are land cover labels with a single vector of Landsat 8 pixel values (BANDS) as predictors. See this example notebook for details on how to generate these training data. End of explanation """ print('Found training file.' if tf.io.gfile.exists(TRAIN_FILE_PATH) else 'No training file found.') print('Found testing file.' if tf.io.gfile.exists(TEST_FILE_PATH) else 'No testing file found.') """ Explanation: Read data Check existence of the data files Check that you have permission to read the files in the output Cloud Storage bucket. End of explanation """ # Create a dataset from the TFRecord file in Cloud Storage. train_dataset = tf.data.TFRecordDataset([TRAIN_FILE_PATH, TEST_FILE_PATH], compression_type='GZIP') # Print the first record to check. print(iter(train_dataset).next()) """ Explanation: Read into a tf.data.Dataset Here we are going to read a file in Cloud Storage into a tf.data.Dataset. (these TensorFlow docs explain more about reading data into a tf.data.Dataset). Check that you can read examples from the file. The purpose here is to ensure that we can read from the file without an error. The actual content is not necessarily human readable. Note that we will use all data for training. End of explanation """ def parse_tfrecord(example_proto): """The parsing function. Read a serialized example into the structure defined by FEATURES_DICT. Args: example_proto: a serialized Example. Returns: A tuple of the predictors dictionary and the LABEL, cast to an `int32`. """ parsed_features = tf.io.parse_single_example(example_proto, FEATURES_DICT) labels = parsed_features.pop(LABEL) return parsed_features, tf.cast(labels, tf.int32) # Map the function over the dataset. parsed_dataset = train_dataset.map(parse_tfrecord, num_parallel_calls=4) from pprint import pprint # Print the first parsed record to check. pprint(iter(parsed_dataset).next()) """ Explanation: Parse the dataset Now we need to make a parsing function for the data in the TFRecord files. The data comes in flattened 2D arrays per record and we want to use the first part of the array for input to the model and the last element of the array as the class label. The parsing function reads data from a serialized Example proto (i.e. example.proto) into a dictionary in which the keys are the feature names and the values are the tensors storing the value of the features for that example. (Learn more about parsing Example protocol buffer messages). End of explanation """ # Inputs as a tuple. Make predictors 1x1xP and labels 1x1xN_CLASSES. def to_tuple(inputs, label): return (tf.expand_dims(tf.transpose(list(inputs.values())), 1), tf.expand_dims(tf.one_hot(indices=label, depth=N_CLASSES), 1)) input_dataset = parsed_dataset.map(to_tuple) # Check the first one. pprint(iter(input_dataset).next()) input_dataset = input_dataset.shuffle(128).batch(8) """ Explanation: Note that each record of the parsed dataset contains a tuple. The first element of the tuple is a dictionary with bands names for keys and tensors storing the pixel data for values. The second element of the tuple is tensor storing the class label. Adjust dimension and shape Turn the dictionary of {name: tensor,...} into a 1x1xP array of values, where P is the number of predictors. Turn the label into a 1x1xN_CLASSES array of indicators (i.e. one-hot vector), in order to use a categorical crossentropy-loss function. Return a tuple of (predictors, indicators where each is a three dimensional array; the first two dimensions are spatial x, y (i.e. 1x1 kernel). End of explanation """ from tensorflow import keras # Define the layers in the model. Note the 1x1 kernels. model = tf.keras.models.Sequential([ tf.keras.layers.Input((None, None, len(BANDS),)), tf.keras.layers.Conv2D(64, (1,1), activation=tf.nn.relu), tf.keras.layers.Dropout(0.1), tf.keras.layers.Conv2D(N_CLASSES, (1,1), activation=tf.nn.softmax) ]) # Compile the model with the specified loss and optimizer functions. model.compile(optimizer=tf.keras.optimizers.Adam(), loss='categorical_crossentropy', metrics=['accuracy']) # Fit the model to the training data. Lucky number 7. model.fit(x=input_dataset, epochs=7) """ Explanation: Model setup Make a densely-connected convolutional model, where the convolution occurs in a 1x1 kernel. This is exactly analogous to the model generated in this example notebook, but operates in a convolutional manner in a 1x1 kernel. This allows Earth Engine to apply the model spatially, as demonstrated below. Note that the model used here is purely for demonstration purposes and hasn't gone through any performance tuning. Create the Keras model Before we create the model, there's still a wee bit of pre-processing to get the data into the right input shape and a format that can be used with cross-entropy loss. Specifically, Keras expects a list of inputs and a one-hot vector for the class. (See the Keras loss function docs, the TensorFlow categorical identity docs and the tf.one_hot docs for details). Here we will use a simple neural network model with a 64 node hidden layer. Once the dataset has been prepared, define the model, compile it, fit it to the training data. See the Keras Sequential model guide for more details. End of explanation """ MODEL_DIR = 'gs://' + OUTPUT_BUCKET + '/demo_pixel_model' model.save(MODEL_DIR, save_format='tf') """ Explanation: Save the trained model Export the trained model to TensorFlow SavedModel format in your cloud storage bucket. The Cloud Platform storage browser is useful for checking on these saved models. End of explanation """ from tensorflow.python.tools import saved_model_utils meta_graph_def = saved_model_utils.get_meta_graph_def(MODEL_DIR, 'serve') inputs = meta_graph_def.signature_def['serving_default'].inputs outputs = meta_graph_def.signature_def['serving_default'].outputs # Just get the first thing(s) from the serving signature def. i.e. this # model only has a single input and a single output. input_name = None for k,v in inputs.items(): input_name = v.name break output_name = None for k,v in outputs.items(): output_name = v.name break # Make a dictionary that maps Earth Engine outputs and inputs to # AI Platform inputs and outputs, respectively. import json input_dict = "'" + json.dumps({input_name: "array"}) + "'" output_dict = "'" + json.dumps({output_name: "output"}) + "'" print(input_dict) print(output_dict) """ Explanation: EEification EEIfication prepares the model for hosting on Google AI Platform. Learn more about EEification from this doc. First, get (and SET) input and output names of the nodes. CHANGE THE OUTPUT NAME TO SOMETHING THAT MAKES SENSE FOR YOUR MODEL! Keep the input name of 'array', which is how you'll pass data into the model (as an array image). End of explanation """ # Put the EEified model next to the trained model directory. EEIFIED_DIR = 'gs://' + OUTPUT_BUCKET + '/eeified_pixel_model' # You need to set the project before using the model prepare command. !earthengine set_project {PROJECT} !earthengine model prepare --source_dir {MODEL_DIR} --dest_dir {EEIFIED_DIR} --input {input_dict} --output {output_dict} """ Explanation: Run the EEifier The actual EEification is handled by the earthengine model prepare command. Note that you will need to set your Cloud Project prior to running the command. End of explanation """ MODEL_NAME = 'pixel_demo_model' VERSION_NAME = 'v0' !gcloud ai-platform models create {MODEL_NAME} \ --project {PROJECT} \ --region {REGION} !gcloud ai-platform versions create {VERSION_NAME} \ --project {PROJECT} \ --region {REGION} \ --model {MODEL_NAME} \ --origin {EEIFIED_DIR} \ --framework "TENSORFLOW" \ --runtime-version=2.3 \ --python-version=3.7 """ Explanation: Deploy and host the EEified model on AI Platform Now there is another TensorFlow SavedModel stored in EEIFIED_DIR ready for hosting by AI Platform. Do that from the gcloud command line tool, installed in the Colab runtime by default. Be sure to specify a regional model with the REGION parameter. Note that the MODEL_NAME must be unique. If you already have a model by that name, either name a new model or a new version of the old model. The Cloud Console AI Platform models page is useful for monitoring your models. If you change anything about the trained model, you'll need to re-EEify it and create a new version! End of explanation """ # Cloud masking function. def maskL8sr(image): cloudShadowBitMask = ee.Number(2).pow(3).int() cloudsBitMask = ee.Number(2).pow(5).int() qa = image.select('pixel_qa') mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0).And( qa.bitwiseAnd(cloudsBitMask).eq(0)) return image.updateMask(mask).select(BANDS).divide(10000) # The image input data is a 2018 cloud-masked median composite. image = L8SR.filterDate('2018-01-01', '2018-12-31').map(maskL8sr).median() # Get a map ID for display in folium. rgb_vis = {'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.3, 'format': 'png'} mapid = image.getMapId(rgb_vis) # Turn into an array image for input to the model. array_image = image.float().toArray() # Point to the model hosted on AI Platform. If you specified a region other # than the default (us-central1) at model creation, specify it here. model = ee.Model.fromAiPlatformPredictor( projectName=PROJECT, modelName=MODEL_NAME, version=VERSION_NAME, # Can be anything, but don't make it too big. inputTileSize=[8, 8], # Keep this the same as your training data. proj=ee.Projection('EPSG:4326').atScale(30), fixInputProj=True, # Note the names here need to match what you specified in the # output dictionary you passed to the EEifier. outputBands={'output': { 'type': ee.PixelType.float(), 'dimensions': 1 } }, ) # model.predictImage outputs a one dimensional array image that # packs the output nodes of your model into an array. These # are class probabilities that you need to unpack into a # multiband image with arrayFlatten(). If you want class # labels, use arrayArgmax() as follows. predictions = model.predictImage(array_image) probabilities = predictions.arrayFlatten([['bare', 'veg', 'water']]) label = predictions.arrayArgmax().arrayGet([0]).rename('label') # Get map IDs for display in folium. probability_vis = { 'bands': ['bare', 'veg', 'water'], 'max': 0.5, 'format': 'png' } label_vis = { 'palette': ['red', 'green', 'blue'], 'min': 0, 'max': 2, 'format': 'png' } probability_mapid = probabilities.getMapId(probability_vis) label_mapid = label.getMapId(label_vis) # Visualize the input imagery and the predictions. map = folium.Map(location=[37.6413, -122.2582], zoom_start=11) folium.TileLayer( tiles=mapid['tile_fetcher'].url_format, attr='Map Data &copy; <a href="https://earthengine.google.com/">Google Earth Engine</a>', overlay=True, name='median composite', ).add_to(map) folium.TileLayer( tiles=label_mapid['tile_fetcher'].url_format, attr='Map Data &copy; <a href="https://earthengine.google.com/">Google Earth Engine</a>', overlay=True, name='predicted label', ).add_to(map) folium.TileLayer( tiles=probability_mapid['tile_fetcher'].url_format, attr='Map Data &copy; <a href="https://earthengine.google.com/">Google Earth Engine</a>', overlay=True, name='probability', ).add_to(map) map.add_child(folium.LayerControl()) map """ Explanation: Connect to the hosted model from Earth Engine Generate the input imagery. This should be done in exactly the same way as the training data were generated. See this example notebook for details. Connect to the hosted model. Use the model to make predictions. Display the results. Note that it takes the model a couple minutes to spin up and make predictions. End of explanation """
deepmind/deepmind-research
nowcasting/Open_sourced_dataset_and_model_snapshot_for_precipitation_nowcasting.ipynb
apache-2.0
!pip -q install tensorflow~=2.5.0 numpy~=1.19.5 matplotlib~=3.2.2 tensorflow_hub~=0.12.0 cartopy~=0.19.0 # Workaround for cartopy crashes due to the shapely installed by default in # google colab kernel (https://github.com/anitagraser/movingpandas/issues/81): !pip uninstall -y shapely !pip install shapely --no-binary shapely """ Explanation: Open-sourced dataset and model snapshot for precipitation nowcasting, accompanying the paper Skillful Precipitation Nowcasting using Deep Generative Models of Radar, Ravuri et al. 2021. This colab contains: * Code to read the dataset using Tensorflow 2, with documentation of the available splits, variants and fields * Example plots and animations of the data using matplotlib and cartopy * A TF-Hub snapshot of the model from the paper * Example code to load this model and use it to make predictions. It has been tested in a public Google colab kernel. How to run this notebook All sections with the exception of 'Making predictions on a row from the full-frame test set (1536x1280)' can be evaluated on a free public Colab kernel. The final section requires more RAM than is available with a free kernel. To evaluate these cells you can either run your own local kernel (with >= 24GB of RAM), or upgrade to Colab Pro. To launch a local colab kernel, please follow these instructions. License and attribution Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The datasets and the model snapshots associated with this colab are made available for use under the terms of the Creative Commons Attribution 4.0 International License. This colab and the associated model snapshots are Copyright 2021 DeepMind Technologies Limited. The associated datasets contain public sector information licensed by the Met Office under the UK Open Government Licence v3.0. Library dependency installs and imports The following libraries are required. You can skip these pip install cells if your kernel already has them installed. End of explanation """ import datetime import os import cartopy import matplotlib import matplotlib.pyplot as plt from matplotlib import animation import numpy as np import shapely.geometry as sgeom import tensorflow as tf import tensorflow_hub from google.colab import auth """ Explanation: Imports: End of explanation """ # This Google Cloud Storage (GCS) bucket is free to access and contains an # example subset of the full dataset (just the first shard of each # split/variant): EXAMPLE_DATASET_BUCKET_PATH = "gs://dm-nowcasting-example-data/datasets/nowcasting_open_source_osgb/nimrod_osgb_1000m_yearly_splits/radar/20200718" # This bucket is requester-pays and will require authentication. It contains the # full dataset. We recommend downloading a local copy first and updating # ROOT_DATASET_DIR below to the local path. This should save on transfer costs # and speed up training. FULL_DATASET_BUCKET_PATH = "gs://dm-nowcasting/datasets/nowcasting_open_source_osgb/nimrod_osgb_1000m_yearly_splits/radar/20200718" # Update this as required: DATASET_ROOT_DIR = EXAMPLE_DATASET_BUCKET_PATH """ Explanation: Dataset location End of explanation """ auth.authenticate_user() """ Explanation: Use this to authenticate as required for access to GCS buckets: End of explanation """ _FEATURES = {name: tf.io.FixedLenFeature([], dtype) for name, dtype in [ ("radar", tf.string), ("sample_prob", tf.float32), ("osgb_extent_top", tf.int64), ("osgb_extent_left", tf.int64), ("osgb_extent_bottom", tf.int64), ("osgb_extent_right", tf.int64), ("end_time_timestamp", tf.int64), ]} _SHAPE_BY_SPLIT_VARIANT = { ("train", "random_crops_256"): (24, 256, 256, 1), ("valid", "subsampled_tiles_256_20min_stride"): (24, 256, 256, 1), ("test", "full_frame_20min_stride"): (24, 1536, 1280, 1), ("test", "subsampled_overlapping_padded_tiles_512_20min_stride"): (24, 512, 512, 1), } _MM_PER_HOUR_INCREMENT = 1/32. _MAX_MM_PER_HOUR = 128. _INT16_MASK_VALUE = -1 def parse_and_preprocess_row(row, split, variant): result = tf.io.parse_example(row, _FEATURES) shape = _SHAPE_BY_SPLIT_VARIANT[(split, variant)] radar_bytes = result.pop("radar") radar_int16 = tf.reshape(tf.io.decode_raw(radar_bytes, tf.int16), shape) mask = tf.not_equal(radar_int16, _INT16_MASK_VALUE) radar = tf.cast(radar_int16, tf.float32) * _MM_PER_HOUR_INCREMENT radar = tf.clip_by_value( radar, _INT16_MASK_VALUE * _MM_PER_HOUR_INCREMENT, _MAX_MM_PER_HOUR) result["radar_frames"] = radar result["radar_mask"] = mask return result def reader(split="train", variant="random_crops_256", shuffle_files=False): """Reader for open-source nowcasting datasets. Args: split: Which yearly split of the dataset to use: "train": Data from 2016 - 2018, excluding the first day of each month. "valid": Data from 2016 - 2018, only the first day of the month. "test": Data from 2019. variant: Which variant to use. The available variants depend on the split: "random_crops_256": Available for the training split. 24x256x256 pixel crops, sampled with a bias towards crops containing rainfall. Crops at all spatial and temporal offsets were able to be sampled, some crops may overlap. "subsampled_tiles_256_20min_stride": Available for the validation set. Non-spatially-overlapping 24x256x256 pixel crops, subsampled from a regular spatial grid with stride 256x256 pixels, and a temporal stride of 20mins (4 timesteps at 5 minute resolution). Sampling favours crops containing rainfall. "subsampled_overlapping_padded_tiles_512_20min_stride": Available for the test set. Overlapping 24x512x512 pixel crops, subsampled from a regular spatial grid with stride 64x64 pixels, and a temporal stride of 20mins (4 timesteps at 5 minute resolution). Subsampling favours crops containing rainfall. These crops include extra spatial context for a fairer evaluation of the PySTEPS baseline, which benefits from this extra context. Our other models only use the central 256x256 pixels of these crops. "full_frame_20min_stride": Available for the test set. Includes full frames at 24x1536x1280 pixels, every 20 minutes with no additional subsampling. shuffle_files: Whether to shuffle the shard files of the dataset non-deterministically before interleaving them. Recommended for the training set to improve mixing and read performance (since non-deterministic parallel interleave is then enabled). Returns: A tf.data.Dataset whose rows are dicts with the following keys: "radar_frames": Shape TxHxWx1, float32. Radar-based estimates of ground-level precipitation, in units of mm/hr. Pixels which are masked will take on a value of -1/32 and should be excluded from use as evaluation targets. The coordinate reference system used is OSGB36, with a spatial resolution of 1000 OSGB36 coordinate units (approximately equal to 1km). The temporal resolution is 5 minutes. "radar_mask": Shape TxHxWx1, bool. A binary mask which is False for pixels that are unobserved / unable to be inferred from radar measurements (e.g. due to being too far from a radar site). This mask is usually static over time, but occasionally a whole radar site will drop in or out resulting in large changes to the mask, and more localised changes can happen too. "sample_prob": Scalar float. The probability with which the row was sampled from the overall pool available for sampling, as described above under 'variants'. We use importance weights proportional to 1/sample_prob when computing metrics on the validation and test set, to reduce bias due to the subsampling. "end_time_timestamp": Scalar int64. A timestamp for the final frame in the example, in seconds since the UNIX epoch (1970-01-01 00:00:00 UTC). "osgb_extent_left", "osgb_extent_right", "osgb_extent_top", "osgb_extent_bottom": Scalar int64s. Spatial extent for the crop in the OSGB36 coordinate reference system. """ shards_glob = os.path.join(DATASET_ROOT_DIR, split, variant, "*.tfrecord.gz") shard_paths = tf.io.gfile.glob(shards_glob) shards_dataset = tf.data.Dataset.from_tensor_slices(shard_paths) if shuffle_files: shards_dataset = shards_dataset.shuffle(buffer_size=len(shard_paths)) return ( shards_dataset .interleave(lambda x: tf.data.TFRecordDataset(x, compression_type="GZIP"), num_parallel_calls=tf.data.AUTOTUNE, deterministic=not shuffle_files) .map(lambda row: parse_and_preprocess_row(row, split, variant), num_parallel_calls=tf.data.AUTOTUNE) # Do your own subsequent repeat, shuffle, batch, prefetch etc as required. ) """ Explanation: Dataset reader code End of explanation """ help(reader) """ Explanation: Dataset reader documentation End of explanation """ row = next(iter(reader(split="train", variant="random_crops_256"))) {k: (v.dtype, v.shape) for k, v in row.items()} """ Explanation: Reading a row from the training set and inspecting types/shapes/values End of explanation """ {k: v.numpy() for k, v in row.items() if v.shape.ndims == 0} """ Explanation: Values for scalar features: End of explanation """ datetime.datetime.utcfromtimestamp(row["end_time_timestamp"]).isoformat() """ Explanation: Decoding the end_time_timestamp: End of explanation """ matplotlib.rc('animation', html='jshtml') def plot_animation(field, figsize=None, vmin=0, vmax=10, cmap="jet", **imshow_args): fig = plt.figure(figsize=figsize) ax = plt.axes() ax.set_axis_off() plt.close() # Prevents extra axes being plotted below animation img = ax.imshow(field[0, ..., 0], vmin=vmin, vmax=vmax, cmap=cmap, **imshow_args) def animate(i): img.set_data(field[i, ..., 0]) return (img,) return animation.FuncAnimation( fig, animate, frames=field.shape[0], interval=24, blit=False) class ExtendedOSGB(cartopy.crs.OSGB): """MET office radar data uses OSGB36 with an extended bounding box.""" def __init__(self): super().__init__(approx=False) @property def x_limits(self): return (-405000, 1320000) @property def y_limits(self): return (-625000, 1550000) @property def boundary(self): x0, x1 = self.x_limits y0, y1 = self.y_limits return sgeom.LinearRing([(x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)]) def plot_rows_on_map(rows, field_name="radar_frames", timestep=0, num_rows=None, cbar_label=None, **imshow_kwargs): fig = plt.figure(figsize=(10, 10)) axes = fig.add_subplot(1, 1, 1, projection=ExtendedOSGB()) if num_rows is None: num_rows = next(iter(rows.values())).shape[0] for b in range(num_rows): extent = (rows["osgb_extent_left"][b].numpy(), rows["osgb_extent_right"][b].numpy(), rows["osgb_extent_bottom"][b].numpy(), rows["osgb_extent_top"][b].numpy()) im = axes.imshow(rows[field_name][b, timestep, ..., 0].numpy(), extent=extent, **imshow_kwargs) axes.set_xlim(*axes.projection.x_limits) axes.set_ylim(*axes.projection.y_limits) axes.set_facecolor("black") axes.gridlines(alpha=0.5) axes.coastlines(resolution="50m", color="white") if cbar_label: cbar = fig.colorbar(im) cbar.set_label(cbar_label) return fig def plot_animation_on_map(row): fig = plt.figure(figsize=(10, 10)) axes = fig.add_subplot(1, 1, 1, projection=ExtendedOSGB()) plt.close() # Prevents extra axes being plotted below animation axes.gridlines(alpha=0.5) axes.coastlines(resolution="50m", color="white") extent = (row["osgb_extent_left"].numpy(), row["osgb_extent_right"].numpy(), row["osgb_extent_bottom"].numpy(), row["osgb_extent_top"].numpy()) img = axes.imshow( row["radar_frames"][0, ..., 0].numpy(), extent=extent, vmin=0, vmax=15., cmap="jet") cbar = fig.colorbar(img) cbar.set_label("Precipitation, mm/hr") def animate(i): return img.set_data(row["radar_frames"][i, ..., 0].numpy()), return animation.FuncAnimation( fig, animate, frames=row["radar_frames"].shape[0], interval=24, blit=False) def plot_mask_on_map(row): fig = plt.figure(figsize=(10, 10)) axes = fig.add_subplot(1, 1, 1, projection=ExtendedOSGB()) axes.gridlines(alpha=0.5) axes.coastlines(resolution="50m", color="black") extent = (row["osgb_extent_left"].numpy(), row["osgb_extent_right"].numpy(), row["osgb_extent_bottom"].numpy(), row["osgb_extent_top"].numpy()) img = axes.imshow( row["radar_mask"][0, ..., 0].numpy(), extent=extent, vmin=0, vmax=1, cmap="viridis") """ Explanation: Visualization helpers End of explanation """ plot_animation(row["radar_frames"].numpy()) """ Explanation: Visualizing rows Animation of a single row from the random_crops_256 training set (sequence of 24 frames at 256x256) End of explanation """ plt.imshow(row["radar_mask"][0, ..., 0].numpy(), vmin=0, vmax=1); """ Explanation: And its mask. This may not always be interesting, sometimes it will be all ones. I only plot the first frame as this is usually static over time. End of explanation """ dataset = reader(split="test", variant="full_frame_20min_stride") full_frame_test_set_row = next(iter(dataset)) plot_animation_on_map(full_frame_test_set_row) """ Explanation: Plotting an animation of a row from the full-frame test set End of explanation """ plot_mask_on_map(full_frame_test_set_row) """ Explanation: And just its mask: End of explanation """ BATCH_SIZE = 60 dataset = reader(split="train", variant="random_crops_256") rows = next(iter(dataset.batch(BATCH_SIZE))) plot_rows_on_map(rows, field_name="radar_frames", num_rows=10, vmin=0, vmax=15., cmap="jet", cbar_label="Precipitation, mm/hr"); """ Explanation: Plotting a few different crops from the training set on the same map, using their OSGB extents. Note these will have been sampled at different timestamps so won't be consistent with each other. End of explanation """ plot_rows_on_map(rows, field_name="radar_mask", vmin=0, vmax=1, alpha=0.5, cmap="spring"); """ Explanation: And plotting their masks, which will be more consistent with each other since they change less frequently. End of explanation """ TFHUB_BASE_PATH = "gs://dm-nowcasting-example-data/tfhub_snapshots" """ Explanation: Making predictions using model loaded from TF-Hub snapshots Location of snapshots: End of explanation """ def load_module(input_height, input_width): """Load a TF-Hub snapshot of the 'Generative Method' model.""" hub_module = tensorflow_hub.load( os.path.join(TFHUB_BASE_PATH, f"{input_height}x{input_width}")) # Note this has loaded a legacy TF1 model for running under TF2 eager mode. # This means we need to access the module via the "signatures" attribute. See # https://github.com/tensorflow/hub/blob/master/docs/migration_tf2.md#using-lower-level-apis # for more information. return hub_module.signatures['default'] def predict(module, input_frames, num_samples=1, include_input_frames_in_result=False): """Make predictions from a TF-Hub snapshot of the 'Generative Method' model. Args: module: One of the raw TF-Hub modules returned by load_module above. input_frames: Shape (T_in,H,W,C), where T_in = 4. Input frames to condition the predictions on. num_samples: The number of different samples to draw. include_input_frames_in_result: If True, will return a total of 22 frames along the time axis, the 4 input frames followed by 18 predicted frames. Otherwise will only return the 18 predicted frames. Returns: A tensor of shape (num_samples,T_out,H,W,C), where T_out is either 18 or 22 as described above. """ input_frames = tf.math.maximum(input_frames, 0.) # Add a batch dimension and tile along it to create a copy of the input for # each sample: input_frames = tf.expand_dims(input_frames, 0) input_frames = tf.tile(input_frames, multiples=[num_samples, 1, 1, 1, 1]) # Sample the latent vector z for each sample: _, input_signature = module.structured_input_signature z_size = input_signature['z'].shape[1] z_samples = tf.random.normal(shape=(num_samples, z_size)) inputs = { "z": z_samples, "labels$onehot" : tf.ones(shape=(num_samples, 1)), "labels$cond_frames" : input_frames } samples = module(**inputs)['default'] if not include_input_frames_in_result: # The module returns the input frames alongside its sampled predictions, we # slice out just the predictions: samples = samples[:, NUM_INPUT_FRAMES:, ...] # Take positive values of rainfall only. samples = tf.math.maximum(samples, 0.) return samples # Fixed values supported by the snapshotted model. NUM_INPUT_FRAMES = 4 NUM_TARGET_FRAMES = 18 def extract_input_and_target_frames(radar_frames): """Extract input and target frames from a dataset row's radar_frames.""" # We align our targets to the end of the window, and inputs precede targets. input_frames = radar_frames[-NUM_TARGET_FRAMES-NUM_INPUT_FRAMES : -NUM_TARGET_FRAMES] target_frames = radar_frames[-NUM_TARGET_FRAMES : ] return input_frames, target_frames def horizontally_concatenate_batch(samples): n, t, h, w, c = samples.shape # N,T,H,W,C => T,H,N,W,C => T,H,N*W,C return tf.reshape(tf.transpose(samples, [1, 2, 0, 3, 4]), [t, h, n*w, c]) """ Explanation: Helper code for loading snapshots and making predictions with them End of explanation """ module = load_module(256, 256) row = next(iter(reader(split="valid", variant="subsampled_tiles_256_20min_stride"))) num_samples = 5 input_frames, target_frames = extract_input_and_target_frames(row["radar_frames"]) samples = predict(module, input_frames, num_samples=num_samples, include_input_frames_in_result=True) """ Explanation: Making predictions for a row from the validation set (256x256 crops) End of explanation """ plot_animation(horizontally_concatenate_batch(samples), figsize=(4*num_samples, 4)) """ Explanation: We will plot an animation of 5 different samples, including the input frames first (so all 5 will start the same). You can see they end up in different places. End of explanation """ # This is the same model with same parameters as above; we have had to export # separate copies of the graph for each input size as the input size is # unfortunately hardcoded into the graph as static shapes. module = load_module(1536, 1280) full_frame_test_set_row = next(iter( reader(split="test", variant="full_frame_20min_stride"))) num_samples = 2 input_frames, target_frames = extract_input_and_target_frames( full_frame_test_set_row["radar_frames"]) samples = predict(module, input_frames, num_samples=num_samples, include_input_frames_in_result=True) """ Explanation: Making predictions on a row from the full-frame test set (1536x1280) Warning: this will require more RAM than is available in a free public colab kernel, even if you reduce num_samples to 1. End of explanation """ row_with_predictions = full_frame_test_set_row.copy() row_with_predictions["radar_frames"] = samples[0] plot_animation_on_map(row_with_predictions) """ Explanation: Plotting two different predicted samples following on from the input frames. The first sample: End of explanation """ row_with_predictions["radar_frames"] = samples[1] plot_animation_on_map(row_with_predictions) """ Explanation: And the second sample: End of explanation """
JakeColtman/BayesianSurvivalAnalysis
PyMC Part 1 Done.ipynb
mit
running_id = 0 output = [[0]] with open("E:/output.txt") as file_open: for row in file_open.read().split("\n"): cols = row.split(",") if cols[0] == output[-1][0]: output[-1].append(cols[1]) output[-1].append(True) else: output.append(cols) output = output[1:] for row in output: if len(row) == 6: row += [datetime(2016, 5, 3, 20, 36, 8, 92165), False] output = output[1:-1] def convert_to_days(dt): day_diff = dt / np.timedelta64(1, 'D') if day_diff == 0: return 23.0 else: return day_diff df = pd.DataFrame(output, columns=["id", "advert_time", "male","age","search","brand","conversion_time","event"]) df["lifetime"] = pd.to_datetime(df["conversion_time"]) - pd.to_datetime(df["advert_time"]) df["lifetime"] = df["lifetime"].apply(convert_to_days) df["male"] = df["male"].astype(int) df["search"] = df["search"].astype(int) df["brand"] = df["brand"].astype(int) df["age"] = df["age"].astype(int) df["event"] = df["event"].astype(int) df = df.drop('advert_time', 1) df = df.drop('conversion_time', 1) df = df.set_index("id") df = df.dropna(thresh=2) df.median() ###Parametric Bayes #Shout out to Cam Davidson-Pilon ## Example fully worked model using toy data ## Adapted from http://blog.yhat.com/posts/estimating-user-lifetimes-with-pymc.html ## Note that we've made some corrections N = 2500 ##Generate some random data lifetime = pm.rweibull( 2, 5, size = N ) birth = pm.runiform(0, 10, N) censor = ((birth + lifetime) >= 10) lifetime_ = lifetime.copy() lifetime_[censor] = 10 - birth[censor] alpha = pm.Uniform('alpha', 0, 20) beta = pm.Uniform('beta', 0, 20) @pm.observed def survival(value=lifetime_, alpha = alpha, beta = beta ): return sum( (1-censor)*(log( alpha/beta) + (alpha-1)*log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(50000, 30000) pm.Matplot.plot(mcmc) mcmc.trace("alpha")[:] """ Explanation: The first step in any data analysis is acquiring and munging the data Our starting data set can be found here: http://jakecoltman.com in the pyData post It is designed to be roughly similar to the output from DCM's path to conversion Download the file and transform it into something with the columns: id,lifetime,age,male,event,search,brand where lifetime is the total time that we observed someone not convert for and event should be 1 if we see a conversion and 0 if we don't. Note that all values should be converted into ints It is useful to note that end_date = datetime.datetime(2016, 5, 3, 20, 36, 8, 92165) End of explanation """ censor = np.array(df["event"].apply(lambda x: 0 if x else 1).tolist()) alpha = pm.Uniform("alpha", 0,50) beta = pm.Uniform("beta", 0,50) @pm.observed def survival(value=df["lifetime"], alpha = alpha, beta = beta ): return sum( (1-censor)*(np.log( alpha/beta) + (alpha-1)*np.log(value/beta)) - (value/beta)**(alpha)) mcmc = pm.MCMC([alpha, beta, survival ] ) mcmc.sample(10000) def weibull_median(alpha, beta): return beta * ((log(2)) ** ( 1 / alpha)) plt.hist([weibull_median(x[0], x[1]) for x in zip(mcmc.trace("alpha"), mcmc.trace("beta"))]) """ Explanation: Problems: 1 - Try to fit your data from section 1 2 - Use the results to plot the distribution of the median Note that the media of a Weibull distribution is: $$β(log 2)^{1/α}$$ End of explanation """ #### Adjust burn and thin, both paramters of the mcmc sample function #### Narrow and broaden prior """ Explanation: Problems: 4 - Try adjusting the number of samples for burning and thinnning 5 - Try adjusting the prior and see how it affects the estimate End of explanation """ #### Hypothesis testing """ Explanation: Problems: 7 - Try testing whether the median is greater than a different values End of explanation """ ### Fit a cox proprtional hazards model """ Explanation: If we want to look at covariates, we need a new approach. We'll use Cox proprtional hazards, a very popular regression model. To fit in python we use the module lifelines: http://lifelines.readthedocs.io/en/latest/ End of explanation """ #### Plot baseline hazard function #### Predict #### Plot survival functions for different covariates #### Plot some odds """ Explanation: Once we've fit the data, we need to do something useful with it. Try to do the following things: 1 - Plot the baseline survival function 2 - Predict the functions for a particular set of features 3 - Plot the survival function for two different set of features 4 - For your results in part 3 caculate how much more likely a death event is for one than the other for a given period of time End of explanation """ #### BMA Coefficient values #### Different priors """ Explanation: Model selection Difficult to do with classic tools (here) Problem: 1 - Calculate the BMA coefficient values 2 - Try running with different priors End of explanation """
angelmtenor/deep-learning
dcgan-svhn/DCGAN.ipynb
mit
%matplotlib inline import pickle as pkl import matplotlib.pyplot as plt import numpy as np from scipy.io import loadmat import tensorflow as tf !mkdir data """ Explanation: Deep Convolutional GANs In this notebook, you'll build a GAN using convolutional layers in the generator and discriminator. This is called a Deep Convolutional GAN, or DCGAN for short. The DCGAN architecture was first explored last year and has seen impressive results in generating new images, you can read the original paper here. You'll be training DCGAN on the Street View House Numbers (SVHN) dataset. These are color images of house numbers collected from Google street view. SVHN images are in color and much more variable than MNIST. So, we'll need a deeper and more powerful network. This is accomplished through using convolutional layers in the discriminator and generator. It's also necessary to use batch normalization to get the convolutional networks to train. The only real changes compared to what you saw previously are in the generator and discriminator, otherwise the rest of the implementation is the same. End of explanation """ from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm data_dir = 'data/' if not isdir(data_dir): raise Exception("Data directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(data_dir + "train_32x32.mat"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar: urlretrieve( 'http://ufldl.stanford.edu/housenumbers/train_32x32.mat', data_dir + 'train_32x32.mat', pbar.hook) if not isfile(data_dir + "test_32x32.mat"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar: urlretrieve( 'http://ufldl.stanford.edu/housenumbers/test_32x32.mat', data_dir + 'test_32x32.mat', pbar.hook) """ Explanation: Getting the data Here you can download the SVHN dataset. Run the cell above and it'll download to your machine. End of explanation """ trainset = loadmat(data_dir + 'train_32x32.mat') testset = loadmat(data_dir + 'test_32x32.mat') """ Explanation: These SVHN files are .mat files typically used with Matlab. However, we can load them in with scipy.io.loadmat which we imported above. End of explanation """ idx = np.random.randint(0, trainset['X'].shape[3], size=36) fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),) for ii, ax in zip(idx, axes.flatten()): ax.imshow(trainset['X'][:,:,:,ii], aspect='equal') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) plt.subplots_adjust(wspace=0, hspace=0) """ Explanation: Here I'm showing a small sample of the images. Each of these is 32x32 with 3 color channels (RGB). These are the real images we'll pass to the discriminator and what the generator will eventually fake. End of explanation """ def scale(x, feature_range=(-1, 1)): # scale to (0, 1) x = ((x - x.min())/(255 - x.min())) # scale to feature_range min, max = feature_range x = x * (max - min) + min return x class Dataset: def __init__(self, train, test, val_frac=0.5, shuffle=False, scale_func=None): split_idx = int(len(test['y'])*(1 - val_frac)) self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:] self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:] self.train_x, self.train_y = train['X'], train['y'] self.train_x = np.rollaxis(self.train_x, 3) self.valid_x = np.rollaxis(self.valid_x, 3) self.test_x = np.rollaxis(self.test_x, 3) if scale_func is None: self.scaler = scale else: self.scaler = scale_func self.shuffle = shuffle def batches(self, batch_size): if self.shuffle: idx = np.arange(len(dataset.train_x)) np.random.shuffle(idx) self.train_x = self.train_x[idx] self.train_y = self.train_y[idx] n_batches = len(self.train_y)//batch_size for ii in range(0, len(self.train_y), batch_size): x = self.train_x[ii:ii+batch_size] y = self.train_y[ii:ii+batch_size] yield self.scaler(x), y """ Explanation: Here we need to do a bit of preprocessing and getting the images into a form where we can pass batches to the network. First off, we need to rescale the images to a range of -1 to 1, since the output of our generator is also in that range. We also have a set of test and validation images which could be used if we're trying to identify the numbers in the images. End of explanation """ def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z """ Explanation: Network Inputs Here, just creating some placeholders like normal. End of explanation """ def generator(z, output_dim, reuse=False, alpha=0.2, training=True): with tf.variable_scope('generator', reuse=reuse): # First fully connected layer x1 = tf.layers.dense(z, 4*4*512) # Reshape it to start the convolutional stack x1 = tf.reshape(x1, (-1, 4, 4, 512)) x1 = tf.layers.batch_normalization(x1, training=training) x1 = tf.maximum(alpha * x1, x1) # 4x4x512 now x2 = tf.layers.conv2d_transpose(x1, 256, 5, strides=2, padding='same') x2 = tf.layers.batch_normalization(x2, training=training) x2 = tf.maximum(alpha * x2, x2) # 8x8x256 now x3 = tf.layers.conv2d_transpose(x2, 128, 5, strides=2, padding='same') x3 = tf.layers.batch_normalization(x3, training=training) x3 = tf.maximum(alpha * x3, x3) # 16x16x128 now # Output layer logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same') # 32x32x3 now out = tf.tanh(logits) return out """ Explanation: Generator Here you'll build the generator network. The input will be our noise vector z as before. Also as before, the output will be a $tanh$ output, but this time with size 32x32 which is the size of our SVHN images. What's new here is we'll use convolutional layers to create our new images. The first layer is a fully connected layer which is reshaped into a deep and narrow layer, something like 4x4x1024 as in the original DCGAN paper. Then we use batch normalization and a leaky ReLU activation. Next is a transposed convolution where typically you'd halve the depth and double the width and height of the previous layer. Again, we use batch normalization and leaky ReLU. For each of these layers, the general scheme is convolution > batch norm > leaky ReLU. You keep stack layers up like this until you get the final transposed convolution layer with shape 32x32x3. Below is the archicture used in the original DCGAN paper: Note that the final layer here is 64x64x3, while for our SVHN dataset, we only want it to be 32x32x3. End of explanation """ def discriminator(x, reuse=False, alpha=0.2): with tf.variable_scope('discriminator', reuse=reuse): # Input layer is 32x32x3 x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same') relu1 = tf.maximum(alpha * x1, x1) # 16x16x64 x2 = tf.layers.conv2d(relu1, 128, 5, strides=2, padding='same') bn2 = tf.layers.batch_normalization(x2, training=True) relu2 = tf.maximum(alpha * bn2, bn2) # 8x8x128 x3 = tf.layers.conv2d(relu2, 256, 5, strides=2, padding='same') bn3 = tf.layers.batch_normalization(x3, training=True) relu3 = tf.maximum(alpha * bn3, bn3) # 4x4x256 # Flatten it flat = tf.reshape(relu3, (-1, 4*4*256)) logits = tf.layers.dense(flat, 1) out = tf.sigmoid(logits) return out, logits """ Explanation: Discriminator Here you'll build the discriminator. This is basically just a convolutional classifier like you've build before. The input to the discriminator are 32x32x3 tensors/images. You'll want a few convolutional layers, then a fully connected layer for the output. As before, we want a sigmoid output, and you'll need to return the logits as well. For the depths of the convolutional layers I suggest starting with 16, 32, 64 filters in the first layer, then double the depth as you add layers. Note that in the DCGAN paper, they did all the downsampling using only strided convolutional layers with no maxpool layers. You'll also want to use batch normalization with tf.layers.batch_normalization on each layer except the first convolutional and output layers. Again, each layer should look something like convolution > batch norm > leaky ReLU. Note: in this project, your batch normalization layers will always use batch statistics. (That is, always set training to True.) That's because we are only interested in using the discriminator to help train the generator. However, if you wanted to use the discriminator for inference later, then you would need to set the training parameter appropriately. End of explanation """ def model_loss(input_real, input_z, output_dim, alpha=0.2): """ Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss) """ g_model = generator(input_z, output_dim, alpha=alpha) d_model_real, d_logits_real = discriminator(input_real, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha) d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake))) g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake))) d_loss = d_loss_real + d_loss_fake return d_loss, g_loss """ Explanation: Model Loss Calculating the loss like before, nothing new here. End of explanation """ def model_opt(d_loss, g_loss, learning_rate, beta1): """ Get optimization operations :param d_loss: Discriminator loss Tensor :param g_loss: Generator loss Tensor :param learning_rate: Learning Rate Placeholder :param beta1: The exponential decay rate for the 1st moment in the optimizer :return: A tuple of (discriminator training operation, generator training operation) """ # Get weights and bias to update t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if var.name.startswith('discriminator')] g_vars = [var for var in t_vars if var.name.startswith('generator')] # Optimize with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars) return d_train_opt, g_train_opt """ Explanation: Optimizers Not much new here, but notice how the train operations are wrapped in a with tf.control_dependencies block so the batch normalization layers can update their population statistics. End of explanation """ class GAN: def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5): tf.reset_default_graph() self.input_real, self.input_z = model_inputs(real_size, z_size) self.d_loss, self.g_loss = model_loss(self.input_real, self.input_z, real_size[2], alpha=0.2) self.d_opt, self.g_opt = model_opt(self.d_loss, self.g_loss, learning_rate, beta1) """ Explanation: Building the model Here we can use the functions we defined about to build the model as a class. This will make it easier to move the network around in our code since the nodes and operations in the graph are packaged in one object. End of explanation """ def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)): fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.axis('off') img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8) ax.set_adjustable('box-forced') im = ax.imshow(img, aspect='equal') plt.subplots_adjust(wspace=0, hspace=0) return fig, axes """ Explanation: Here is a function for displaying generated images. End of explanation """ def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)): saver = tf.train.Saver() sample_z = np.random.uniform(-1, 1, size=(72, z_size)) samples, losses = [], [] steps = 0 with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for x, y in dataset.batches(batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z}) _ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x}) if steps % print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x}) train_loss_g = net.g_loss.eval({net.input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) if steps % show_every == 0: gen_samples = sess.run( generator(net.input_z, 3, reuse=True, training=False), feed_dict={net.input_z: sample_z}) samples.append(gen_samples) _ = view_samples(-1, samples, 6, 12, figsize=figsize) plt.show() saver.save(sess, './checkpoints/generator.ckpt') with open('samples.pkl', 'wb') as f: pkl.dump(samples, f) return losses, samples """ Explanation: And another function we can use to train our network. Notice when we call generator to create the samples to display, we set training to False. That's so the batch normalization layers will use the population statistics rather than the batch statistics. Also notice that we set the net.input_real placeholder when we run the generator's optimizer. The generator doesn't actually use it, but we'd get an errror without it because of the tf.control_dependencies block we created in model_opt. End of explanation """ real_size = (32,32,3) z_size = 100 learning_rate = 0.0002 batch_size = 128 epochs = 25 alpha = 0.2 beta1 = 0.5 # Create the network net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1) dataset = Dataset(trainset, testset) losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5)) fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator', alpha=0.5) plt.plot(losses.T[1], label='Generator', alpha=0.5) plt.title("Training Losses") plt.legend() _ = view_samples(-1, samples, 6, 12, figsize=(10,5)) _ = view_samples(-1, samples, 6, 12, figsize=(10,5)) """ Explanation: Hyperparameters GANs are very senstive to hyperparameters. A lot of experimentation goes into finding the best hyperparameters such that the generator and discriminator don't overpower each other. Try out your own hyperparameters or read the DCGAN paper to see what worked for them. End of explanation """
mlperf/training_results_v0.5
v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/models/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb
apache-2.0
!pip install kaggle import os import glob import zipfile import functools import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['axes.grid'] = False mpl.rcParams['figure.figsize'] = (12,12) from sklearn.model_selection import train_test_split import matplotlib.image as mpimg import pandas as pd from PIL import Image import tensorflow as tf import tensorflow.contrib as tfcontrib from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras import backend as K """ Explanation: Image Segmentation with tf.keras <table class="tfo-notebook-buttons" align="left"><td> <a target="_blank" href="http://colab.research.google.com/github/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td><td> <a target="_blank" href="https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table> In this tutorial we will learn how to segment images. Segmentation is the process of generating pixel-wise segmentations giving the class of the object visible at each pixel. For example, we could be identifying the location and boundaries of people within an image or identifying cell nuclei from an image. Formally, image segmentation refers to the process of partitioning an image into a set of pixels that we desire to identify (our target) and the background. Specifically, in this tutorial we will be using the Kaggle Carvana Image Masking Challenge Dataset. This dataset contains a large number of car images, with each car taken from different angles. In addition, for each car image, we have an associated manually cutout mask; our task will be to automatically create these cutout masks for unseen data. Specific concepts that will be covered: In the process, we will build practical experience and develop intuition around the following concepts: * Functional API - we will be implementing UNet, a convolutional network model classically used for biomedical image segmentation with the Functional API. * This model has layers that require multiple input/outputs. This requires the use of the functional API * Check out the original paper, U-Net: Convolutional Networks for Biomedical Image Segmentation by Olaf Ronneberger! * Custom Loss Functions and Metrics - We'll implement a custom loss function using binary cross entropy and dice loss. We'll also implement dice coefficient (which is used for our loss) and mean intersection over union, that will help us monitor our training process and judge how well we are performing. * Saving and loading keras models - We'll save our best model to disk. When we want to perform inference/evaluate our model, we'll load in the model from disk. We will follow the general workflow: Visualize data/perform some exploratory data analysis Set up data pipeline and preprocessing Build model Train model Evaluate model Repeat Audience: This post is geared towards intermediate users who are comfortable with basic machine learning concepts. Note that if you wish to run this notebook, it is highly recommended that you do so with a GPU. Time Estimated: 60 min By: Raymond Yuan, Software Engineering Intern End of explanation """ import os # Upload the API token. def get_kaggle_credentials(): token_dir = os.path.join(os.path.expanduser("~"),".kaggle") token_file = os.path.join(token_dir, "kaggle.json") if not os.path.isdir(token_dir): os.mkdir(token_dir) try: with open(token_file,'r') as f: pass except IOError as no_file: try: from google.colab import files except ImportError: raise no_file uploaded = files.upload() if "kaggle.json" not in uploaded: raise ValueError("You need an API key! see: " "https://github.com/Kaggle/kaggle-api#api-credentials") with open(token_file, "wb") as f: f.write(uploaded["kaggle.json"]) os.chmod(token_file, 600) get_kaggle_credentials() """ Explanation: Get all the files Since this tutorial will be using a dataset from Kaggle, it requires creating an API Token for your Kaggle account, and uploading it. End of explanation """ import kaggle """ Explanation: Only import kaggle after adding the credentials. End of explanation """ competition_name = 'carvana-image-masking-challenge' # Download data from Kaggle and unzip the files of interest. def load_data_from_zip(competition, file): with zipfile.ZipFile(os.path.join(competition, file), "r") as zip_ref: unzipped_file = zip_ref.namelist()[0] zip_ref.extractall(competition) def get_data(competition): kaggle.api.competition_download_files(competition, competition) load_data_from_zip(competition, 'train.zip') load_data_from_zip(competition, 'train_masks.zip') load_data_from_zip(competition, 'train_masks.csv.zip') """ Explanation: We'll download the data from Kaggle Caution, large download ahead - downloading all files will require 14GB of diskspace. End of explanation """ get_data(competition_name) img_dir = os.path.join(competition_name, "train") label_dir = os.path.join(competition_name, "train_masks") df_train = pd.read_csv(os.path.join(competition_name, 'train_masks.csv')) ids_train = df_train['img'].map(lambda s: s.split('.')[0]) x_train_filenames = [] y_train_filenames = [] for img_id in ids_train: x_train_filenames.append(os.path.join(img_dir, "{}.jpg".format(img_id))) y_train_filenames.append(os.path.join(label_dir, "{}_mask.gif".format(img_id))) x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = \ train_test_split(x_train_filenames, y_train_filenames, test_size=0.2, random_state=42) num_train_examples = len(x_train_filenames) num_val_examples = len(x_val_filenames) print("Number of training examples: {}".format(num_train_examples)) print("Number of validation examples: {}".format(num_val_examples)) """ Explanation: You must accept the competition rules before downloading the data. End of explanation """ x_train_filenames[:10] y_train_filenames[:10] """ Explanation: Here's what the paths look like End of explanation """ display_num = 5 r_choices = np.random.choice(num_train_examples, display_num) plt.figure(figsize=(10, 15)) for i in range(0, display_num * 2, 2): img_num = r_choices[i // 2] x_pathname = x_train_filenames[img_num] y_pathname = y_train_filenames[img_num] plt.subplot(display_num, 2, i + 1) plt.imshow(mpimg.imread(x_pathname)) plt.title("Original Image") example_labels = Image.open(y_pathname) label_vals = np.unique(example_labels) plt.subplot(display_num, 2, i + 2) plt.imshow(example_labels) plt.title("Masked Image") plt.suptitle("Examples of Images and their Masks") plt.show() """ Explanation: Visualize Let's take a look at some of the examples of different images in our dataset. End of explanation """ img_shape = (256, 256, 3) batch_size = 3 epochs = 5 """ Explanation: Set up Let’s begin by setting up some parameters. We’ll standardize and resize all the shapes of the images. We’ll also set up some training parameters: End of explanation """ def _process_pathnames(fname, label_path): # We map this function onto each pathname pair img_str = tf.read_file(fname) img = tf.image.decode_jpeg(img_str, channels=3) label_img_str = tf.read_file(label_path) # These are gif images so they return as (num_frames, h, w, c) label_img = tf.image.decode_gif(label_img_str)[0] # The label image should only have values of 1 or 0, indicating pixel wise # object (car) or not (background). We take the first channel only. label_img = label_img[:, :, 0] label_img = tf.expand_dims(label_img, axis=-1) return img, label_img """ Explanation: Using these exact same parameters may be too computationally intensive for your hardware, so tweak the parameters accordingly. Also, it is important to note that due to the architecture of our UNet version, the size of the image must be evenly divisible by a factor of 32, as we down sample the spatial resolution by a factor of 2 with each MaxPooling2Dlayer. If your machine can support it, you will achieve better performance using a higher resolution input image (e.g. 512 by 512) as this will allow more precise localization and less loss of information during encoding. In addition, you can also make the model deeper. Alternatively, if your machine cannot support it, lower the image resolution and/or batch size. Note that lowering the image resolution will decrease performance and lowering batch size will increase training time. Build our input pipeline with tf.data Since we begin with filenames, we will need to build a robust and scalable data pipeline that will play nicely with our model. If you are unfamiliar with tf.data you should check out my other tutorial introducing the concept! Our input pipeline will consist of the following steps: Read the bytes of the file in from the filename - for both the image and the label. Recall that our labels are actually images with each pixel annotated as car or background (1, 0). Decode the bytes into an image format Apply image transformations: (optional, according to input parameters) resize - Resize our images to a standard size (as determined by eda or computation/memory restrictions) The reason why this is optional is that U-Net is a fully convolutional network (e.g. with no fully connected units) and is thus not dependent on the input size. However, if you choose to not resize the images, you must use a batch size of 1, since you cannot batch variable image size together Alternatively, you could also bucket your images together and resize them per mini-batch to avoid resizing images as much, as resizing may affect your performance through interpolation, etc. hue_delta - Adjusts the hue of an RGB image by a random factor. This is only applied to the actual image (not our label image). The hue_delta must be in the interval [0, 0.5] horizontal_flip - flip the image horizontally along the central axis with a 0.5 probability. This transformation must be applied to both the label and the actual image. width_shift_range and height_shift_range are ranges (as a fraction of total width or height) within which to randomly translate the image either horizontally or vertically. This transformation must be applied to both the label and the actual image. rescale - rescale the image by a certain factor, e.g. 1/ 255. Shuffle the data, repeat the data (so we can iterate over it multiple times across epochs), batch the data, then prefetch a batch (for efficiency). It is important to note that these transformations that occur in your data pipeline must be symbolic transformations. Why do we do these image transformations? This is known as data augmentation. Data augmentation "increases" the amount of training data by augmenting them via a number of random transformations. During training time, our model would never see twice the exact same picture. This helps prevent overfitting and helps the model generalize better to unseen data. Processing each pathname End of explanation """ def shift_img(output_img, label_img, width_shift_range, height_shift_range): """This fn will perform the horizontal or vertical shift""" if width_shift_range or height_shift_range: if width_shift_range: width_shift_range = tf.random_uniform([], -width_shift_range * img_shape[1], width_shift_range * img_shape[1]) if height_shift_range: height_shift_range = tf.random_uniform([], -height_shift_range * img_shape[0], height_shift_range * img_shape[0]) # Translate both output_img = tfcontrib.image.translate(output_img, [width_shift_range, height_shift_range]) label_img = tfcontrib.image.translate(label_img, [width_shift_range, height_shift_range]) return output_img, label_img """ Explanation: Shifting the image End of explanation """ def flip_img(horizontal_flip, tr_img, label_img): if horizontal_flip: flip_prob = tf.random_uniform([], 0.0, 1.0) tr_img, label_img = tf.cond(tf.less(flip_prob, 0.5), lambda: (tf.image.flip_left_right(tr_img), tf.image.flip_left_right(label_img)), lambda: (tr_img, label_img)) return tr_img, label_img """ Explanation: Flipping the image randomly End of explanation """ def _augment(img, label_img, resize=None, # Resize the image to some size e.g. [256, 256] scale=1, # Scale image e.g. 1 / 255. hue_delta=0, # Adjust the hue of an RGB image by random factor horizontal_flip=False, # Random left right flip, width_shift_range=0, # Randomly translate the image horizontally height_shift_range=0): # Randomly translate the image vertically if resize is not None: # Resize both images label_img = tf.image.resize_images(label_img, resize) img = tf.image.resize_images(img, resize) if hue_delta: img = tf.image.random_hue(img, hue_delta) img, label_img = flip_img(horizontal_flip, img, label_img) img, label_img = shift_img(img, label_img, width_shift_range, height_shift_range) label_img = tf.to_float(label_img) * scale img = tf.to_float(img) * scale return img, label_img def get_baseline_dataset(filenames, labels, preproc_fn=functools.partial(_augment), threads=5, batch_size=batch_size, shuffle=True): num_x = len(filenames) # Create a dataset from the filenames and labels dataset = tf.data.Dataset.from_tensor_slices((filenames, labels)) # Map our preprocessing function to every element in our dataset, taking # advantage of multithreading dataset = dataset.map(_process_pathnames, num_parallel_calls=threads) if preproc_fn.keywords is not None and 'resize' not in preproc_fn.keywords: assert batch_size == 1, "Batching images must be of the same size" dataset = dataset.map(preproc_fn, num_parallel_calls=threads) if shuffle: dataset = dataset.shuffle(num_x) # It's necessary to repeat our data for all epochs dataset = dataset.repeat().batch(batch_size) return dataset """ Explanation: Assembling our transformations into our augment function End of explanation """ tr_cfg = { 'resize': [img_shape[0], img_shape[1]], 'scale': 1 / 255., 'hue_delta': 0.1, 'horizontal_flip': True, 'width_shift_range': 0.1, 'height_shift_range': 0.1 } tr_preprocessing_fn = functools.partial(_augment, **tr_cfg) val_cfg = { 'resize': [img_shape[0], img_shape[1]], 'scale': 1 / 255., } val_preprocessing_fn = functools.partial(_augment, **val_cfg) train_ds = get_baseline_dataset(x_train_filenames, y_train_filenames, preproc_fn=tr_preprocessing_fn, batch_size=batch_size) val_ds = get_baseline_dataset(x_val_filenames, y_val_filenames, preproc_fn=val_preprocessing_fn, batch_size=batch_size) """ Explanation: Set up train and validation datasets Note that we apply image augmentation to our training dataset but not our validation dataset. End of explanation """ temp_ds = get_baseline_dataset(x_train_filenames, y_train_filenames, preproc_fn=tr_preprocessing_fn, batch_size=1, shuffle=False) # Let's examine some of these augmented images data_aug_iter = temp_ds.make_one_shot_iterator() next_element = data_aug_iter.get_next() with tf.Session() as sess: batch_of_imgs, label = sess.run(next_element) # Running next element in our graph will produce a batch of images plt.figure(figsize=(10, 10)) img = batch_of_imgs[0] plt.subplot(1, 2, 1) plt.imshow(img) plt.subplot(1, 2, 2) plt.imshow(label[0, :, :, 0]) plt.show() """ Explanation: Let's see if our image augmentor data pipeline is producing expected results End of explanation """ def conv_block(input_tensor, num_filters): encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor) encoder = layers.BatchNormalization()(encoder) encoder = layers.Activation('relu')(encoder) encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder) encoder = layers.BatchNormalization()(encoder) encoder = layers.Activation('relu')(encoder) return encoder def encoder_block(input_tensor, num_filters): encoder = conv_block(input_tensor, num_filters) encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder) return encoder_pool, encoder def decoder_block(input_tensor, concat_tensor, num_filters): decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor) decoder = layers.concatenate([concat_tensor, decoder], axis=-1) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) return decoder inputs = layers.Input(shape=img_shape) # 256 encoder0_pool, encoder0 = encoder_block(inputs, 32) # 128 encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64 encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32 encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16 encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512) # 8 center = conv_block(encoder4_pool, 1024) # center decoder4 = decoder_block(center, encoder4, 512) # 16 decoder3 = decoder_block(decoder4, encoder3, 256) # 32 decoder2 = decoder_block(decoder3, encoder2, 128) # 64 decoder1 = decoder_block(decoder2, encoder1, 64) # 128 decoder0 = decoder_block(decoder1, encoder0, 32) # 256 outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0) """ Explanation: Build the model We'll build the U-Net model. U-Net is especially good with segmentation tasks because it can localize well to provide high resolution segmentation masks. In addition, it works well with small datasets and is relatively robust against overfitting as the training data is in terms of the number of patches within an image, which is much larger than the number of training images itself. Unlike the original model, we will add batch normalization to each of our blocks. The Unet is built with an encoder portion and a decoder portion. The encoder portion is composed of a linear stack of Conv, BatchNorm, and Relu operations followed by a MaxPool. Each MaxPool will reduce the spatial resolution of our feature map by a factor of 2. We keep track of the outputs of each block as we feed these high resolution feature maps with the decoder portion. The Decoder portion is comprised of UpSampling2D, Conv, BatchNorm, and Relus. Note that we concatenate the feature map of the same size on the decoder side. Finally, we add a final Conv operation that performs a convolution along the channels for each individual pixel (kernel size of (1, 1)) that outputs our final segmentation mask in grayscale. The Keras Functional API The Keras functional API is used when you have multi-input/output models, shared layers, etc. It's a powerful API that allows you to manipulate tensors and build complex graphs with intertwined datastreams easily. In addition it makes layers and models both callable on tensors. * To see more examples check out the get started guide. We'll build these helper functions that will allow us to ensemble our model block operations easily and simply. End of explanation """ model = models.Model(inputs=[inputs], outputs=[outputs]) """ Explanation: Define your model Using functional API, you must define your model by specifying the inputs and outputs associated with the model. End of explanation """ def dice_coeff(y_true, y_pred): smooth = 1. # Flatten y_true_f = tf.reshape(y_true, [-1]) y_pred_f = tf.reshape(y_pred, [-1]) intersection = tf.reduce_sum(y_true_f * y_pred_f) score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) return score def dice_loss(y_true, y_pred): loss = 1 - dice_coeff(y_true, y_pred) return loss """ Explanation: Defining custom metrics and loss functions Defining loss and metric functions are simple with Keras. Simply define a function that takes both the True labels for a given example and the Predicted labels for the same given example. Dice loss is a metric that measures overlap. More info on optimizing for Dice coefficient (our dice loss) can be found in the paper, where it was introduced. We use dice loss here because it performs better at class imbalanced problems by design. In addition, maximizing the dice coefficient and IoU metrics are the actual objectives and goals of our segmentation task. Using cross entropy is more of a proxy which is easier to maximize. Instead, we maximize our objective directly. End of explanation """ def bce_dice_loss(y_true, y_pred): loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) return loss """ Explanation: Here, we'll use a specialized loss function that combines binary cross entropy and our dice loss. This is based on individuals who competed within this competition obtaining better results empirically. Try out your own custom losses to measure performance (e.g. bce + log(dice_loss), only bce, etc.)! End of explanation """ model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[dice_loss]) model.summary() """ Explanation: Compile your model We use our custom loss function to minimize. In addition, we specify what metrics we want to keep track of as we train. Note that metrics are not actually used during the training process to tune the parameters, but are instead used to measure performance of the training process. End of explanation """ save_model_path = '/tmp/weights.hdf5' cp = tf.keras.callbacks.ModelCheckpoint(filepath=save_model_path, monitor='val_dice_loss', save_best_only=True, verbose=1) """ Explanation: Train your model Training your model with tf.data involves simply providing the model's fit function with your training/validation dataset, the number of steps, and epochs. We also include a Model callback, ModelCheckpoint that will save the model to disk after each epoch. We configure it such that it only saves our highest performing model. Note that saving the model capture more than just the weights of the model: by default, it saves the model architecture, weights, as well as information about the training process such as the state of the optimizer, etc. End of explanation """ history = model.fit(train_ds, steps_per_epoch=int(np.ceil(num_train_examples / float(batch_size))), epochs=epochs, validation_data=val_ds, validation_steps=int(np.ceil(num_val_examples / float(batch_size))), callbacks=[cp]) """ Explanation: Don't forget to specify our model callback in the fit function call. End of explanation """ dice = history.history['dice_loss'] val_dice = history.history['val_dice_loss'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(epochs) plt.figure(figsize=(16, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, dice, label='Training Dice Loss') plt.plot(epochs_range, val_dice, label='Validation Dice Loss') plt.legend(loc='upper right') plt.title('Training and Validation Dice Loss') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.show() """ Explanation: Visualize training process End of explanation """ # Alternatively, load the weights directly: model.load_weights(save_model_path) model = models.load_model(save_model_path, custom_objects={'bce_dice_loss': bce_dice_loss, 'dice_loss': dice_loss}) # Let's visualize some of the outputs data_aug_iter = val_ds.make_one_shot_iterator() next_element = data_aug_iter.get_next() # Running next element in our graph will produce a batch of images plt.figure(figsize=(10, 20)) for i in range(5): batch_of_imgs, label = tf.keras.backend.get_session().run(next_element) img = batch_of_imgs[0] predicted_label = model.predict(batch_of_imgs)[0] plt.subplot(5, 3, 3 * i + 1) plt.imshow(img) plt.title("Input image") plt.subplot(5, 3, 3 * i + 2) plt.imshow(label[0, :, :, 0]) plt.title("Actual Mask") plt.subplot(5, 3, 3 * i + 3) plt.imshow(predicted_label[:, :, 0]) plt.title("Predicted Mask") plt.suptitle("Examples of Input Image, Label, and Prediction") plt.show() """ Explanation: Even with only 5 epochs, we see strong performance. Visualize actual performance We'll visualize our performance on the validation set. Note that in an actual setting (competition, deployment, etc.) we'd evaluate on the test set with the full image resolution. To load our model we have two options: 1. Since our model architecture is already in memory, we can simply call load_weights(save_model_path) 2. If you wanted to load the model from scratch (in a different setting without already having the model architecture in memory) we simply call model = models.load_model(save_model_path, custom_objects={'bce_dice_loss': bce_dice_loss, 'dice_loss': dice_loss}), specificing the necessary custom objects, loss and metrics, that we used to train our model. If you want to see more examples, check our the keras guide! End of explanation """
ShubhamDebnath/Coursera-Machine-Learning
Course 1/Logistic Regression with a Neural Network mindset.ipynb
mit
import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage from lr_utils import load_dataset %matplotlib inline """ Explanation: Logistic Regression with a Neural Network mindset Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning. Instructions: - Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so. You will learn to: - Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. 1 - Packages First, let's run the cell below to import all the packages that you will need during this assignment. - numpy is the fundamental package for scientific computing with Python. - h5py is a common package to interact with a dataset that is stored on an H5 file. - matplotlib is a famous library to plot graphs in Python. - PIL and scipy are used here to test your model with your own picture at the end. End of explanation """ # Loading the data (cat/non-cat) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() """ Explanation: 2 - Overview of the Problem set Problem Statement: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px). You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat. Let's get more familiar with the dataset. Load the data by running the following code. End of explanation """ # Example of a picture index = 25 plt.imshow(train_set_x_orig[index]) print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.") """ Explanation: We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing). Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the index value and re-run to see other images. End of explanation """ ### START CODE HERE ### (≈ 3 lines of code) m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig.shape[1] ### END CODE HERE ### print ("Number of training examples: m_train = " + str(m_train)) print ("Number of testing examples: m_test = " + str(m_test)) print ("Height/Width of each image: num_px = " + str(num_px)) print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print ("train_set_x shape: " + str(train_set_x_orig.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x shape: " + str(test_set_x_orig.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) """ Explanation: Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. Exercise: Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image) Remember that train_set_x_orig is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access m_train by writing train_set_x_orig.shape[0]. End of explanation """ # Reshape the training and test examples ### START CODE HERE ### (≈ 2 lines of code) train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T ### END CODE HERE ### print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) """ Explanation: Expected Output for m_train, m_test and num_px: <table style="width:15%"> <tr> <td>**m_train**</td> <td> 209 </td> </tr> <tr> <td>**m_test**</td> <td> 50 </td> </tr> <tr> <td>**num_px**</td> <td> 64 </td> </tr> </table> For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $$ num_px $$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns. Exercise: Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num_px $$ num_px $$ 3, 1). A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$$c$$d, a) is to use: python X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X End of explanation """ train_set_x = train_set_x_flatten/255. test_set_x = test_set_x_flatten/255. """ Explanation: Expected Output: <table style="width:35%"> <tr> <td>**train_set_x_flatten shape**</td> <td> (12288, 209)</td> </tr> <tr> <td>**train_set_y shape**</td> <td>(1, 209)</td> </tr> <tr> <td>**test_set_x_flatten shape**</td> <td>(12288, 50)</td> </tr> <tr> <td>**test_set_y shape**</td> <td>(1, 50)</td> </tr> <tr> <td>**sanity check after reshaping**</td> <td>[17 31 56 22 33]</td> </tr> </table> To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255. One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). <!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> Let's standardize our dataset. End of explanation """ # GRADED FUNCTION: sigmoid def sigmoid(z): """ Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1+ np.exp(-z)) ### END CODE HERE ### return s print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2])))) """ Explanation: <font color='blue'> What you need to remember: Common steps for pre-processing a new dataset are: - Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...) - Reshape the datasets such that each example is now a vector of size (num_px * num_px * 3, 1) - "Standardize" the data 3 - General Architecture of the learning algorithm It's time to design a simple algorithm to distinguish cat images from non-cat images. You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why Logistic Regression is actually a very simple Neural Network! <img src="images/LogReg_kiank.png" style="width:650px;height:400px;"> Mathematical expression of the algorithm: For one example $x^{(i)}$: $$z^{(i)} = w^T x^{(i)} + b \tag{1}$$ $$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$ The cost is then computed by summing over all training examples: $$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$ Key steps: In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude 4 - Building the parts of our algorithm ## The main steps for building a Neural Network are: 1. Define the model structure (such as number of input features) 2. Initialize the model's parameters 3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent) You often build 1-3 separately and integrate them into one function we call model(). 4.1 - Helper functions Exercise: Using your code from "Python Basics", implement sigmoid(). As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp(). End of explanation """ # GRADED FUNCTION: initialize_with_zeros def initialize_with_zeros(dim): """ This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias) """ ### START CODE HERE ### (≈ 1 line of code) w = np.zeros((dim, 1)) b = 0 ### END CODE HERE ### assert(w.shape == (dim, 1)) assert(isinstance(b, float) or isinstance(b, int)) return w, b dim = 2 w, b = initialize_with_zeros(dim) print ("w = " + str(w)) print ("b = " + str(b)) """ Explanation: Expected Output: <table> <tr> <td>**sigmoid([0, 2])**</td> <td> [ 0.5 0.88079708]</td> </tr> </table> 4.2 - Initializing parameters Exercise: Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation. End of explanation """ # GRADED FUNCTION: propagate def propagate(w, b, X, Y): """ Implement the cost function and its gradient for the propagation explained above Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples) Return: cost -- negative log-likelihood cost for logistic regression dw -- gradient of the loss with respect to w, thus same shape as w db -- gradient of the loss with respect to b, thus same shape as b Tips: - Write your code step by step for the propagation. np.log(), np.dot() """ m = X.shape[1] # FORWARD PROPAGATION (FROM X TO COST) ### START CODE HERE ### (≈ 2 lines of code) A = sigmoid(np.dot(w.T,X) +b) # compute activation cost = (-1)* (np.dot(Y, np.log(A).T) + np.dot((1-Y), np.log(1-A).T)) / m # compute cost ### END CODE HERE ### # BACKWARD PROPAGATION (TO FIND GRAD) ### START CODE HERE ### (≈ 2 lines of code) dw = (1/m) * np.dot(X, (A-Y).T) db = (1/m) * np.sum(A-Y) ### END CODE HERE ### assert(dw.shape == w.shape) assert(db.dtype == float) cost = np.squeeze(cost) assert(cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]]) grads, cost = propagate(w, b, X, Y) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) print ("cost = " + str(cost)) """ Explanation: Expected Output: <table style="width:15%"> <tr> <td> ** w ** </td> <td> [[ 0.] [ 0.]] </td> </tr> <tr> <td> ** b ** </td> <td> 0 </td> </tr> </table> For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). 4.3 - Forward and Backward propagation Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters. Exercise: Implement a function propagate() that computes the cost function and its gradient. Hints: Forward Propagation: - You get X - You compute $A = \sigma(w^T X + b) = (a^{(0)}, a^{(1)}, ..., a^{(m-1)}, a^{(m)})$ - You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$ Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$ $$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$ End of explanation """ # GRADED FUNCTION: optimize def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False): """ This function optimizes w and b by running a gradient descent algorithm Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of shape (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples) num_iterations -- number of iterations of the optimization loop learning_rate -- learning rate of the gradient descent update rule print_cost -- True to print the loss every 100 steps Returns: params -- dictionary containing the weights w and bias b grads -- dictionary containing the gradients of the weights and bias with respect to the cost function costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve. Tips: You basically need to write down two steps and iterate through them: 1) Calculate the cost and the gradient for the current parameters. Use propagate(). 2) Update the parameters using gradient descent rule for w and b. """ costs = [] for i in range(num_iterations): # Cost and gradient calculation (≈ 1-4 lines of code) ### START CODE HERE ### grads, cost = propagate(w, b, X, Y) ### END CODE HERE ### # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] # update rule (≈ 2 lines of code) ### START CODE HERE ### w = w - learning_rate * dw b = b - learning_rate * db ### END CODE HERE ### # Record the costs if i % 100 == 0: costs.append(cost) # Print the cost every 100 training examples if print_cost and i % 100 == 0: print ("Cost after iteration %i: %f" %(i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False) print ("w = " + str(params["w"])) print ("b = " + str(params["b"])) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) """ Explanation: Expected Output: <table style="width:50%"> <tr> <td> ** dw ** </td> <td> [[ 0.99845601] [ 2.39507239]]</td> </tr> <tr> <td> ** db ** </td> <td> 0.00145557813678 </td> </tr> <tr> <td> ** cost ** </td> <td> 5.801545319394553 </td> </tr> </table> d) Optimization You have initialized your parameters. You are also able to compute a cost function and its gradient. Now, you want to update the parameters using gradient descent. Exercise: Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate. End of explanation """ # GRADED FUNCTION: predict def predict(w, b, X): ''' Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b) Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Returns: Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X ''' m = X.shape[1] Y_prediction = np.zeros((1,m)) w = w.reshape(X.shape[0], 1) # Compute vector "A" predicting the probabilities of a cat being present in the picture ### START CODE HERE ### (≈ 1 line of code) A = sigmoid(np.dot(w.T,X) +b) ### END CODE HERE ### for i in range(A.shape[1]): # Convert probabilities A[0,i] to actual predictions p[0,i] ### START CODE HERE ### (≈ 4 lines of code) Y_prediction[0, i] = 1 if A[0, i]>0.5 else 0 ### END CODE HERE ### assert(Y_prediction.shape == (1, m)) return Y_prediction w = np.array([[0.1124579],[0.23106775]]) b = -0.3 X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]]) print ("predictions = " + str(predict(w, b, X))) """ Explanation: Expected Output: <table style="width:40%"> <tr> <td> **w** </td> <td>[[ 0.19033591] [ 0.12259159]] </td> </tr> <tr> <td> **b** </td> <td> 1.92535983008 </td> </tr> <tr> <td> **dw** </td> <td> [[ 0.67752042] [ 1.41625495]] </td> </tr> <tr> <td> **db** </td> <td> 0.219194504541 </td> </tr> </table> Exercise: The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the predict() function. There is two steps to computing predictions: Calculate $\hat{Y} = A = \sigma(w^T X + b)$ Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector Y_prediction. If you wish, you can use an if/else statement in a for loop (though there is also a way to vectorize this). End of explanation """ # GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ ### START CODE HERE ### # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) ### END CODE HERE ### # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d """ Explanation: Expected Output: <table style="width:30%"> <tr> <td> **predictions** </td> <td> [[ 1. 1. 0.]] </td> </tr> </table> <font color='blue'> What to remember: You've implemented several functions that: - Initialize (w,b) - Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent - Use the learned (w,b) to predict the labels for a given set of examples 5 - Merge all functions into a model You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order. Exercise: Implement the model function. Use the following notation: - Y_prediction for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize() End of explanation """ d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True) """ Explanation: Run the following cell to train your model. End of explanation """ # Example of a picture that was wrongly classified. index = 1 plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3))) print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.") """ Explanation: Expected Output: <table style="width:40%"> <tr> <td> **Cost after iteration 0 ** </td> <td> 0.693147 </td> </tr> <tr> <td> <center> $\vdots$ </center> </td> <td> <center> $\vdots$ </center> </td> </tr> <tr> <td> **Train Accuracy** </td> <td> 99.04306220095694 % </td> </tr> <tr> <td>**Test Accuracy** </td> <td> 70.0 % </td> </tr> </table> Comment: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week! Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the index variable) you can look at predictions on pictures of the test set. End of explanation """ # Plot learning curve (with costs) costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show() """ Explanation: Let's also plot the cost function and the gradients. End of explanation """ learning_rates = [0.01, 0.001, 0.0001] models = {} for i in learning_rates: print ("learning rate is: " + str(i)) models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False) print ('\n' + "-------------------------------------------------------" + '\n') for i in learning_rates: plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"])) plt.ylabel('cost') plt.xlabel('iterations') legend = plt.legend(loc='upper center', shadow=True) frame = legend.get_frame() frame.set_facecolor('0.90') plt.show() """ Explanation: Interpretation: You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. 6 - Further analysis (optional/ungraded exercise) Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. Choice of learning rate Reminder: In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate. Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the learning_rates variable to contain, and see what happens. End of explanation """ ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "1.jpg" # change this to the name of your image file ## END CODE HERE ## # We preprocess the image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T my_predicted_image = predict(d["w"], d["b"], my_image) plt.imshow(image) print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.") """ Explanation: Interpretation: - Different learning rates give different costs and thus different predictions results. - If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy. - In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) 7 - Test with your own image (optional/ungraded exercise) Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)! End of explanation """
kkkddder/dmc
notebooks/week-6/01-training a RNN model in Keras.ipynb
apache-2.0
import numpy as np from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.callbacks import ModelCheckpoint from keras.utils import np_utils from time import gmtime, strftime import os import re import pickle import random import sys """ Explanation: Lab 6.1 - Keras for RNN In this lab we will use the Keras deep learning library to construct a simple recurrent neural network (RNN) that can learn linguistic structure from a piece of text, and use that knowledge to generate new text passages. To review general RNN architecture, specific types of RNN networks such as the LSTM networks we'll be using here, and other concepts behind this type of machine learning, you should consult the following resources: http://www.wildml.com/2015/09/recurrent-neural-networks-tutorial-part-1-introduction-to-rnns/ http://ml4a.github.io/guides/recurrent_neural_networks/ http://colah.github.io/posts/2015-08-Understanding-LSTMs/ http://karpathy.github.io/2015/05/21/rnn-effectiveness/ This code is an adaptation of these two examples: http://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/ https://github.com/fchollet/keras/blob/master/examples/lstm_text_generation.py You can consult the original sites for more information and documentation. Let's start by importing some of the libraries we'll be using in this lab: End of explanation """ # load ascii text from file filename = "data/obama.txt" raw_text = open(filename).read() # get rid of any characters other than letters, numbers, # and a few special characters raw_text = re.sub('[^\nA-Za-z0-9 ,.:;?!-]+', '', raw_text) # convert all text to lowercase raw_text = raw_text.lower() n_chars = len(raw_text) print "length of text:", n_chars print "text preview:", raw_text[:500] """ Explanation: The first thing we need to do is generate our training data set. In this case we will use a recent article written by Barack Obama for The Economist newspaper. Make sure you have the obama.txt file in the /data folder within the /week-6 folder in your repository. End of explanation """ # extract all unique characters in the text chars = sorted(list(set(raw_text))) n_vocab = len(chars) print "number of unique characters found:", n_vocab # create mapping of characters to integers and back char_to_int = dict((c, i) for i, c in enumerate(chars)) int_to_char = dict((i, c) for i, c in enumerate(chars)) # test our mapping print 'a', "- maps to ->", char_to_int["a"] print 25, "- maps to ->", int_to_char[25] """ Explanation: Next, we use python's set() function to generate a list of all unique characters in the text. This will form our 'vocabulary' of characters, which is similar to the categories found in typical ML classification problems. Since neural networks work with numerical data, we also need to create a mapping between each character and a unique integer value. To do this we create two dictionaries: one which has characters as keys and the associated integers as the value, and one which has integers as keys and the associated characters as the value. These dictionaries will allow us to do translation both ways. End of explanation """ # prepare the dataset of input to output pairs encoded as integers seq_length = 100 inputs = [] outputs = [] for i in range(0, n_chars - seq_length, 1): inputs.append(raw_text[i:i + seq_length]) outputs.append(raw_text[i + seq_length]) n_sequences = len(inputs) print "Total sequences: ", n_sequences """ Explanation: Now we need to define the training data for our network. With RNN's, the training data usually takes the shape of a three-dimensional matrix, with the size of each dimension representing: [# of training sequences, # of training samples per sequence, # of features per sample] The training sequences are the sets of data subjected to the RNN at each training step. As with all neural networks, these training sequences are presented to the network in small batches during training. Each training sequence is composed of some number of training samples. The number of samples in each sequence dictates how far back in the data stream the algorithm will learn, and sets the depth of the RNN layer. Each training sample within a sequence is composed of some number of features. This is the data that the RNN layer is learning from at each time step. In our example, the training samples and targets will use one-hot encoding, so will have a feature for each possible character, with the actual character represented by 1, and all others by 0. To prepare the data, we first set the length of training sequences we want to use. In this case we will set the sequence length to 100, meaning the RNN layer will be able to predict future characters based on the 100 characters that came before. We will then slide this 100 character 'window' over the entire text to create input and output arrays. Each entry in the input array contains 100 characters from the text, and each entry in the output array contains the single character that came after. End of explanation """ indeces = range(len(inputs)) random.shuffle(indeces) inputs = [inputs[x] for x in indeces] outputs = [outputs[x] for x in indeces] """ Explanation: Now let's shuffle both the input and output data so that we can later have Keras split it automatically into a training and test set. To make sure the two lists are shuffled the same way (maintaining correspondance between inputs and outputs), we create a separate shuffled list of indeces, and use these indeces to reorder both lists. End of explanation """ print inputs[0], "-->", outputs[0] """ Explanation: Let's visualize one of these sequences to make sure we are getting what we expect: End of explanation """ # create two empty numpy array with the proper dimensions X = np.zeros((n_sequences, seq_length, n_vocab), dtype=np.bool) y = np.zeros((n_sequences, n_vocab), dtype=np.bool) # iterate over the data and build up the X and y data sets # by setting the appropriate indices to 1 in each one-hot vector for i, example in enumerate(inputs): for t, char in enumerate(example): X[i, t, char_to_int[char]] = 1 y[i, char_to_int[outputs[i]]] = 1 print 'X dims -->', X.shape print 'y dims -->', y.shape """ Explanation: Next we will prepare the actual numpy datasets which will be used to train our network. We first initialize two empty numpy arrays in the proper formatting: X --> [# of training sequences, # of training samples, # of features] y --> [# of training sequences, # of features] We then iterate over the arrays we generated in the previous step and fill the numpy arrays with the proper data. Since all character data is formatted using one-hot encoding, we initialize both data sets with zeros. As we iterate over the data, we use the char_to_int dictionary to map each character to its related position integer, and use that position to change the related value in the data set to 1. End of explanation """ # define the LSTM model model = Sequential() model.add(LSTM(128, return_sequences=False, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.50)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') """ Explanation: Next, we define our RNN model in Keras. This is very similar to how we defined the CNN model, except now we use the LSTM() function to create an LSTM layer with an internal memory of 128 neurons. LSTM is a special type of RNN layer which solves the unstable gradients issue seen in basic RNN. Along with LSTM layers, Keras also supports basic RNN layers and GRU layers, which are similar to LSTM. You can find full documentation for recurrent layers in Keras' documentation As before, we need to explicitly define the input shape for the first layer. Also, we need to tell Keras whether the LSTM layer should pass its sequence of predictions or its internal memory as the output to the next layer. If you are connecting the LSTM layer to a fully connected layer as we do in this case, you should set the return_sequences parameter to False to have the layer pass the value of its hidden neurons. If you are connecting multiple LSTM layers, you should set the parameter to True in all but the last layer, so that subsequent layers can learn from the sequence of predictions of previous layers. We will use dropout with a probability of 50% to regularize the network and prevent overfitting on our training data. The output of the network will be a fully connected layer with one neuron for each character in the vocabulary. The softmax function will convert this output to a probability distribution across all characters. End of explanation """ def sample(preds, temperature=1.0): # helper function to sample an index from a probability array preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) """ Explanation: Next, we define two helper functions: one to select a character based on a probability distribution, and one to generate a sequence of predicted characters based on an input (or 'seed') list of characters. The sample() function will take in a probability distribution generated by the softmax() function, and select a character based on the 'temperature' input. The temperature (also often called the 'diversity') effects how strictly the probability distribution is sampled. Lower values (closer to zero) output more confident predictions, but are also more conservative. In our case, if the model has overfit the training data, lower values are likely to give back exactly what is found in the text Higher values (1 and above) introduce more diversity and randomness into the results. This can lead the model to generate novel information not found in the training data. However, you are also likely to see more errors such as grammatical or spelling mistakes. End of explanation """ def generate(sentence, prediction_length=50, diversity=0.35): print '----- diversity:', diversity generated = sentence sys.stdout.write(generated) # iterate over number of characters requested for i in range(prediction_length): # build up sequence data from current sentence x = np.zeros((1, X.shape[1], X.shape[2])) for t, char in enumerate(sentence): x[0, t, char_to_int[char]] = 1. # use trained model to return probability distribution # for next character based on input sequence preds = model.predict(x, verbose=0)[0] # use sample() function to sample next character # based on probability distribution and desired diversity next_index = sample(preds, diversity) # convert integer to character next_char = int_to_char[next_index] # add new character to generated text generated += next_char # delete the first character from beginning of sentance, # and add new caracter to the end. This will form the # input sequence for the next predicted character. sentence = sentence[1:] + next_char # print results to screen sys.stdout.write(next_char) sys.stdout.flush() print """ Explanation: The generate() function will take in: input sentance ('seed') number of characters to generate and target diversity or temperature and print the resulting sequence of characters to the screen. End of explanation """ filepath="-basic_LSTM.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=0, save_best_only=True, mode='min') callbacks_list = [checkpoint] """ Explanation: Next, we define a system for Keras to save our model's parameters to a local file after each epoch where it achieves an improvement in the overall loss. This will allow us to reuse the trained model at a later time without having to retrain it from scratch. This is useful for recovering models incase your computer crashes, or you want to stop the training early. End of explanation """ epochs = 50 prediction_length = 100 for iteration in range(epochs): print 'epoch:', iteration + 1, '/', epochs model.fit(X, y, validation_split=0.2, batch_size=256, nb_epoch=1, callbacks=callbacks_list) # get random starting point for seed start_index = random.randint(0, len(raw_text) - seq_length - 1) # extract seed sequence from raw text seed = raw_text[start_index: start_index + seq_length] print '----- generating with seed:', seed for diversity in [0.5, 1.2]: generate(seed, prediction_length, diversity) """ Explanation: Now we are finally ready to train the model. We want to train the model over 50 epochs, but we also want to output some generated text after each epoch to see how our model is doing. To do this we create our own loop to iterate over each epoch. Within the loop we first train the model for one epoch. Since all parameters are stored within the model, training one epoch at a time has the same exact effect as training over a longer series of epochs. We also use the model's validation_split parameter to tell Keras to automatically split the data into 80% training data and 20% test data for validation. Remember to always shuffle your data if you will be using validation! After each epoch is trained, we use the raw_text data to extract a new sequence of 100 characters as the 'seed' for our generated text. Finally, we use our generate() helper function to generate text using two different diversity settings. Warning: because of their large depth (remember that an RNN trained on a 100 long sequence effectively has 100 layers!), these networks typically take a much longer time to train than traditional multi-layer ANN's and CNN's. You shoud expect these models to train overnight on the virtual machine, but you should be able to see enough progress after the first few epochs to know if it is worth it to train a model to the end. For more complex RNN models with larger data sets in your own work, you should consider a native installation, along with a dedicated GPU if possible. End of explanation """ pickle_file = '-basic_data.pickle' try: f = open(pickle_file, 'wb') save = { 'X': X, 'y': y, 'int_to_char': int_to_char, 'char_to_int': char_to_int, } pickle.dump(save, f, pickle.HIGHEST_PROTOCOL) f.close() except Exception as e: print 'Unable to save data to', pickle_file, ':', e raise statinfo = os.stat(pickle_file) print 'Saved data to', pickle_file print 'Compressed pickle size:', statinfo.st_size """ Explanation: That looks pretty good! You can see that the RNN has learned alot of the linguistic structure of the original writing, including typical length for words, where to put spaces, and basic punctuation with commas and periods. Many words are still misspelled but seem almost reasonable, and it is pretty amazing that it is able to learn this much in only 50 epochs of training. You can see that the loss is still going down after 50 epochs, so the model can definitely benefit from longer training. If you're curious you can try to train for more epochs, but as the error decreases be careful to monitor the output to make sure that the model is not overfitting. As with other neural network models, you can monitor the difference between training and validation loss to see if overfitting might be occuring. In this case, since we're using the model to generate new information, we can also get a sense of overfitting from the material it generates. A good indication of overfitting is if the model outputs exactly what is in the original text given a seed from the text, but jibberish if given a seed that is not in the original text. Remember we don't want the model to learn how to reproduce exactly the original text, but to learn its style to be able to generate new text. As with other models, regularization methods such as dropout and limiting model complexity can be used to avoid the problem of overfitting. Finally, let's save our training data and character to integer mapping dictionaries to an external file so we can reuse it with the model at a later time. End of explanation """
lujinhong/lujinhong.github.io
_posts/tensorflow-keras的基本使用方式.ipynb
mit
fashion_mnist = keras.datasets.fashion_mnist (x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data() x_valid,x_train = x_train_all[:5000],x_train_all[5000:] y_valid,y_train = y_train_all[:5000],y_train_all[5000:] print(x_train.shape,y_train.shape) print(x_valid.shape,y_valid.shape) print(x_test.shape,y_test.shape) """ Explanation: 1、基本模型构建 1.1 准备数据集 在这里我们使用了fashion_mnist数据集,里面是70000张28*28的图片,图片分为衣服、鞋子等10类。 End of explanation """ def show_single_image(img_arr): plt.imshow(img_arr, cmap='binary') plt.show() show_single_image(x_train[0]) def show_imgs(n_rows, n_cols, x_data, y_data, class_names): assert len(x_data) == len(y_data) assert n_rows * n_cols < len(x_data) plt.figure(figsize = (n_cols * 1.4, n_rows * 1.6)) for row in range(n_rows): for col in range(n_cols): index = n_cols * row + col plt.subplot(n_rows, n_cols, index+1) plt.imshow(x_data[index], cmap="binary", interpolation = 'nearest') plt.axis('off') plt.title(class_names[y_data[index]]) plt.show() class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] show_imgs(3, 5, x_train, y_train, class_names) """ Explanation: 我们看一下图片是什么样子的: End of explanation """ model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) #将输入的二维数组展开成一维向量 model.add(keras.layers.Dense(300,activation='sigmoid')) model.add(keras.layers.Dense(100,activation='sigmoid')) model.add(keras.layers.Dense(10,activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) """ Explanation: 1.2 构建模型 构建模型主要分成2部分: (1)指定模型的各层节点数及其连接 (2)编译模型,指定损失函数、优化方法、metrics等 End of explanation """ model.layers model.summary() """ Explanation: 如果sigmoid改成relu的话,精度就会降低非常的多。为啥呢? 这样我们的模型就构建完成了,我们看一下模型长什么样子的: End of explanation """ history = model.fit(x_train,y_train,epochs=10,validation_data=(x_valid,y_valid)) type(history) history.history """ Explanation: 1.3 训练模型 训练模型fit()返回的是一个History对象,用于保存中间计算过程的数据。 End of explanation """ def print_learning_curves(history): pd.DataFrame(history.history).plot(figsize=(10,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() print_learning_curves(history) """ Explanation: 我们把训练过程中的loss及accuracy打印出来: End of explanation """ model.evaluate(x_test,y_test) """ Explanation: 1.4 evaluate模型 End of explanation """ import numpy as tf import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf from tensorflow import keras fashion_mnist = keras.datasets.fashion_mnist (x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data() x_valid,x_train = x_train_all[:5000],x_train_all[5000:] y_valid,y_train = y_train_all[:5000],y_train_all[5000:] print(x_train.shape,y_train.shape) print(x_valid.shape,y_valid.shape) print(x_test.shape,y_test.shape) def show_single_image(img_arr): plt.imshow(img_arr, cmap='binary') plt.show() show_single_image(x_train[0]) def show_imgs(n_rows, n_cols, x_data, y_data, class_names): assert len(x_data) == len(y_data) assert n_rows * n_cols < len(x_data) plt.figure(figsize = (n_cols * 1.4, n_rows * 1.6)) for row in range(n_rows): for col in range(n_cols): index = n_cols * row + col plt.subplot(n_rows, n_cols, index+1) plt.imshow(x_data[index], cmap="binary", interpolation = 'nearest') plt.axis('off') plt.title(class_names[y_data[index]]) plt.show() class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] show_imgs(3, 5, x_train, y_train, class_names) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) #将输入的二维数组展开成一维向量 model.add(keras.layers.Dense(300,activation='sigmoid')) model.add(keras.layers.Dense(100,activation='sigmoid')) model.add(keras.layers.Dense(10,activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.layers model.summary() history = model.fit(x_train,y_train,epochs=10,validation_data=(x_valid,y_valid)) type(history) history.history def print_learning_curves(history): pd.DataFrame(history.history).plot(figsize=(10,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() print_learning_curves(history) model.evaluate(x_test,y_test) """ Explanation: 1.5 完整代码 End of explanation """ print(np.max(x_train), np.min(x_train)) """ Explanation: 2、归一化 End of explanation """ #scaler = sklearn.preprocessing.StandardScaler() from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train_scaler = scaler.fit_transform( x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28) x_valid_scaler = scaler.transform( x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28) x_test_scaler = scaler.transform( x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28) """ Explanation: 现有数据在0~255之间,下面我们对数据做归一化。 我们使用均值是0,方差为1的标准正则归一化(也叫Z-score归一化),即: x = (x-u)/std 还有一种常见的归一化方式:Min-max归一化:x*=(x-min)/(max-min),取值在[0,1]之间。 End of explanation """ history = model.fit(x_train_scaler,y_train,epochs=10,validation_data=(x_valid_scaler,y_valid)) model.evaluate(x_test_scaler,y_test) """ Explanation: 上述代码的几个说明: (1)fit_transform/transform接受的是一个二维浮点数向量作为参数,所以需要先转成2维向量再转回三维。 (2)fit_transform()和transform():fit_transform()除了transform的归一化功能外,还把数据集的均值和方差记录下来,供下面的验证集、测试集使用。 然后我们再训练时使用上面经过归一化的数据: End of explanation """ logdir = './callbacks' if not os.path.exists(logdir): os.mkdir(logdir) output_model_file = os.path.join(logdir,"fashion_mnist_model.h5") callbacks = [ keras.callbacks.TensorBoard(logdir), keras.callbacks.ModelCheckpoint(output_model_file, save_best_only = True), keras.callbacks.EarlyStopping(patience=5,min_delta=1e-3) ] history = model.fit(x_train_scaler, y_train, epochs=10, validation_data=(x_valid_scaler,y_valid), callbacks = callbacks) """ Explanation: 3、回调函数:TensorBoard EarlyStopping ModelCheckpoint Callbacks: utilities called at certain points during model training. 也就是说模型训练过程中在某些点会触发一些功能或者操作。 最常用的就是TensorBoard EarlyStopping ModelCheckpoint这3类,以下会分别介绍。完整的callback请参考官方文档的 tf.keras.callback:https://www.tensorflow.org/api_docs/python/tf/keras/callbacks?hl=zh-cn End of explanation """ model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) for _ in range(20): model.add(keras.layers.Dense(10,activation='relu')) model.add(keras.layers.Dense(10,activation='softmax')) """ Explanation: 启动tensorborad的方式很简单: tensorboard --logdir=callbacks 然后打开http://localhost:6006/ 即可。 4、深度神经网络 DNN也没什么特别,就是层数比较多: End of explanation """ model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) for _ in range(20): model.add(keras.layers.Dense(10,activation='relu')) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Dense(10,activation='softmax')) """ Explanation: DNN在前几个ecpochs的训练时,loss降低的很慢,导致这个问题的原因主要有: (1)参数众多、训练不足 (2)梯度消失 多层符合函数的链式法则导致的。 5、批归一化、dropout、激活函数 5.1 批归一化 归一化是对训练、测试数据做了归一化,就是模型的输入数据做了归一化。 而批归一化是对每一层激活函数的输出(也就是下一层的输入)都做了归一化。 End of explanation """ model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) for _ in range(20): model.add(keras.layers.Dense(10,activation='selu')) model.add(keras.layers.Dense(10,activation='softmax')) """ Explanation: 5.2 selu 上述relu+批归一化也可以通过直接使用selu激活函数代替: End of explanation """ model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) for _ in range(20): model.add(keras.layers.Dense(10,activation='selu')) model.add(keras.layers.AlphaDropout(rate=0.5))# 只在最后一层添加了dropout # AlphaDropout: 1. 均值和方差不变 2. 归一化性质也不变 # model.add(keras.layers.Dropout(rate=0.5)) model.add(keras.layers.Dense(10,activation='softmax')) """ Explanation: 5.3 dropout End of explanation """ import numpy as np import matplotlib.pyplot as plt import pandas as pd import tensorflow as tf from tensorflow import keras import sklearn import os #导入数据 fashion_mnist = keras.datasets.fashion_mnist (x_train_all,y_train_all),(x_test,y_test) = fashion_mnist.load_data() x_valid,x_train = x_train_all[:5000],x_train_all[5000:] y_valid,y_train = y_train_all[:5000],y_train_all[5000:] #训练数据归一化 from sklearn.preprocessing import StandardScaler scaler = StandardScaler() x_train_scaler = scaler.fit_transform( x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28) x_valid_scaler = scaler.transform( x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28) x_test_scaler = scaler.transform( x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28) #构建及compile模型 model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28,28])) for _ in range(20): model.add(keras.layers.Dense(100,activation='selu')) model.add(keras.layers.AlphaDropout(rate=0.5)) model.add(keras.layers.Dense(10,activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) #定义callback logdir = './callbacks' if not os.path.exists(logdir): os.mkdir(logdir) output_model_file = os.path.join(logdir,"fashion_mnist_model.h5") callbacks = [ keras.callbacks.TensorBoard(logdir), keras.callbacks.ModelCheckpoint(output_model_file, save_best_only = True), keras.callbacks.EarlyStopping(patience=5,min_delta=1e-3) ] #训练模型 history = model.fit(x_train_scaler, y_train, epochs=10, validation_data=(x_valid_scaler,y_valid), callbacks = callbacks) #检查模型效果 def print_learning_curves(history): pd.DataFrame(history.history).plot(figsize=(10,5)) plt.grid(True) plt.gca().set_ylim(0,1) plt.show() print_learning_curves(history) model.evaluate(x_test_scaler,y_test) """ Explanation: 6、完整代码 End of explanation """
Weenkus/Machine-Learning-University-of-Washington
Regression/examples/week-3-polynomial-regression-assignment-blank.ipynb
mit
import graphlab """ Explanation: Regression Week 3: Assessing Fit (polynomial regression) In this notebook you will compare different regression models in order to assess which model fits best. We will be using polynomial regression as a means to examine this topic. In particular you will: * Write a function to take an SArray and a degree and return an SFrame where each column is the SArray to a polynomial value up to the total degree e.g. degree = 3 then column 1 is the SArray column 2 is the SArray squared and column 3 is the SArray cubed * Use matplotlib to visualize polynomial regressions * Use matplotlib to visualize the same polynomial degree on different subsets of the data * Use a validation set to select a polynomial degree * Assess the final fit using test data We will continue to use the House data from previous notebooks. Fire up graphlab create End of explanation """ tmp = graphlab.SArray([1., 2., 3.]) tmp_cubed = tmp.apply(lambda x: x**3) print tmp print tmp_cubed """ Explanation: Next we're going to write a polynomial function that takes an SArray and a maximal degree and returns an SFrame with columns containing the SArray to all the powers up to the maximal degree. The easiest way to apply a power to an SArray is to use the .apply() and lambda x: functions. For example to take the example array and compute the third power we can do as follows: (note running this cell the first time may take longer than expected since it loads graphlab) End of explanation """ ex_sframe = graphlab.SFrame() ex_sframe['power_1'] = tmp print ex_sframe """ Explanation: We can create an empty SFrame using graphlab.SFrame() and then add any columns to it with ex_sframe['column_name'] = value. For example we create an empty SFrame and make the column 'power_1' to be the first power of tmp (i.e. tmp itself). End of explanation """ def polynomial_sframe(feature, degree): # assume that degree >= 1 # initialize the SFrame: poly_sframe = graphlab.SFrame() # and set poly_sframe['power_1'] equal to the passed feature # first check if degree > 1 if degree > 1: # then loop over the remaining degrees: # range usually starts at 0 and stops at the endpoint-1. We want it to start at 2 and stop at degree for power in range(2, degree+1): # first we'll give the column a name: name = 'power_' + str(power) # then assign poly_sframe[name] to the appropriate power of feature return poly_sframe """ Explanation: Polynomial_sframe function Using the hints above complete the following function to create an SFrame consisting of the powers of an SArray up to a specific degree: End of explanation """ print polynomial_sframe(tmp, 3) """ Explanation: To test your function consider the smaller tmp variable and what you would expect the outcome of the following call: End of explanation """ sales = graphlab.SFrame('kc_house_data.gl/') """ Explanation: Visualizing polynomial regression Let's use matplotlib to visualize what a polynomial regression looks like on some real data. End of explanation """ sales = sales.sort(['sqft_living', 'price']) """ Explanation: As in Week 3, we will use the sqft_living variable. For plotting purposes (connecting the dots), you'll need to sort by the values of sqft_living. For houses with identical square footage, we break the tie by their prices. End of explanation """ poly1_data = polynomial_sframe(sales['sqft_living'], 1) poly1_data['price'] = sales['price'] # add price to the data since it's the target """ Explanation: Let's start with a degree 1 polynomial using 'sqft_living' (i.e. a line) to predict 'price' and plot what it looks like. End of explanation """ model1 = graphlab.linear_regression.create(poly1_data, target = 'price', features = ['power_1'], validation_set = None) #let's take a look at the weights before we plot model1.get("coefficients") import matplotlib.pyplot as plt %matplotlib inline plt.plot(poly1_data['power_1'],poly1_data['price'],'.', poly1_data['power_1'], model1.predict(poly1_data),'-') """ Explanation: NOTE: for all the models in this notebook use validation_set = None to ensure that all results are consistent across users. End of explanation """ poly2_data = polynomial_sframe(sales['sqft_living'], 2) my_features = poly2_data.column_names() # get the name of the features poly2_data['price'] = sales['price'] # add price to the data since it's the target model2 = graphlab.linear_regression.create(poly2_data, target = 'price', features = my_features, validation_set = None) model2.get("coefficients") plt.plot(poly2_data['power_1'],poly2_data['price'],'.', poly2_data['power_1'], model2.predict(poly2_data),'-') """ Explanation: Let's unpack that plt.plot() command. The first pair of SArrays we passed are the 1st power of sqft and the actual price we then ask it to print these as dots '.'. The next pair we pass is the 1st power of sqft and the predicted values from the linear model. We ask these to be plotted as a line '-'. We can see, not surprisingly, that the predicted values all fall on a line, specifically the one with slope 280 and intercept -43579. What if we wanted to plot a second degree polynomial? End of explanation """
google/android-management-api-samples
notebooks/codelab_kiosk.ipynb
apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ from apiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from random import randint # This is a public OAuth config, you can use it to run this guide but please use # different credentials when building your own solution. CLIENT_CONFIG = { 'installed': { 'client_id':'882252295571-uvkkfelq073vq73bbq9cmr0rn8bt80ee.apps.googleusercontent.com', 'client_secret': 'S2QcoBe0jxNLUoqnpeksCLxI', 'auth_uri':'https://accounts.google.com/o/oauth2/auth', 'token_uri':'https://accounts.google.com/o/oauth2/token' } } SCOPES = ['https://www.googleapis.com/auth/androidmanagement'] # Run the OAuth flow. flow = InstalledAppFlow.from_client_config(CLIENT_CONFIG, SCOPES) credentials = flow.run_console() # Create the API client. androidmanagement = build('androidmanagement', 'v1', credentials=credentials) print('\nAuthentication succeeded.') """ Explanation: Android Management API - Quickstart If you have not yet read the Android Management API Codelab we recommend that you do so before using this notebook. If you opened this notebook from the Codelab then follow the next instructions on the Codelab. In order to run this notebook, you need: An Android 6.0+ device. Setup The base resource of your Android Management solution is a Google Cloud Platform project. All other resources (Enterprises, Devices, Policies, etc) belong to the project and the project controls access to these resources. A solution is typically associated with a single project, but you can create multiple projects if you want to restrict access to resources. For this Codelab we have already created a project for you (project ID: android-management-io-codelab). To create and access resources, you need to authenticate with an account that has edit rights over the project. The account running this Codelab has been given rights over the project above. To start the authentication flow, run the cell below. To run a cell: Click anywhere in the code block. Click the &#9654; button in the top-left of the code block. When you build a server-based solution, you should create a service account so you don't need to authorize the access every time. End of explanation """ enterprise_name = 'enterprises/LC02de1hmx' """ Explanation: Select an enterprise An Enterprise resource binds an organization to your Android Management solution. Devices and Policies both belong to an enterprise. Typically, a single enterprise resource is associated with a single organization. However, you can create multiple enterprises for the same organization based on their needs. For example, an organization may want separate enterprises for its different departments or regions. For this Codelab we have already created an enterprise for you. Run the next cell to select it. End of explanation """ import json # Create a random policy name to avoid colision with other Codelabs if 'policy_name' not in locals(): policy_name = enterprise_name + '/policies/' + str(randint(1, 1000000000)) policy_json = ''' { "applications": [ { "packageName": "com.google.samples.apps.iosched", "installType": "FORCE_INSTALLED" } ], "debuggingFeaturesAllowed": true } ''' androidmanagement.enterprises().policies().patch( name=policy_name, body=json.loads(policy_json) ).execute() """ Explanation: Create a policy A Policy is a group of settings that determine the behavior of a managed device and the apps installed on it. Each Policy resource represents a unique group of device and app settings and can be applied to one or more devices. Once a device is linked to a policy, any updates to the policy are automatically applied to the device. To create a basic policy, run the cell below. You'll see how to create more advanced policies later in this guide. End of explanation """ enrollment_token = androidmanagement.enterprises().enrollmentTokens().create( parent=enterprise_name, body={"policyName": policy_name} ).execute() """ Explanation: Provision a device Provisioning refers to the process of enrolling a device with an enterprise, applying the appropriate policies to the device, and guiding the user to complete the set up of their device in accordance with those policies. Before attempting to provision a device, ensure that the device is running Android 6.0 or above. You need an enrollment token for each device that you want to provision (you can use the same token for multiple devices), when creating a token you can specify a policy that will be applied to the device. End of explanation """ from urllib.parse import urlencode image = { 'cht': 'qr', 'chs': '500x500', 'chl': enrollment_token['qrCode'] } qrcode_url = 'https://chart.googleapis.com/chart?' + urlencode(image) print('Please visit this URL to scan the QR code:', qrcode_url) enrollment_link = 'https://enterprise.google.com/android/enroll?et=' + enrollment_token['value'] print('Please open this link on your device:', enrollment_link) """ Explanation: Embed your enrollment token in either an enrollment link or a QR code, and then follow the provisioning instructions below. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/ec-earth-consortium/cmip6/models/sandbox-1/atmos.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'sandbox-1', 'atmos') """ Explanation: ES-DOC CMIP6 Model Properties - Atmos MIP Era: CMIP6 Institute: EC-EARTH-CONSORTIUM Source ID: SANDBOX-1 Topic: Atmos Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. Properties: 156 (127 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:53:59 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties --&gt; Overview 2. Key Properties --&gt; Resolution 3. Key Properties --&gt; Timestepping 4. Key Properties --&gt; Orography 5. Grid --&gt; Discretisation 6. Grid --&gt; Discretisation --&gt; Horizontal 7. Grid --&gt; Discretisation --&gt; Vertical 8. Dynamical Core 9. Dynamical Core --&gt; Top Boundary 10. Dynamical Core --&gt; Lateral Boundary 11. Dynamical Core --&gt; Diffusion Horizontal 12. Dynamical Core --&gt; Advection Tracers 13. Dynamical Core --&gt; Advection Momentum 14. Radiation 15. Radiation --&gt; Shortwave Radiation 16. Radiation --&gt; Shortwave GHG 17. Radiation --&gt; Shortwave Cloud Ice 18. Radiation --&gt; Shortwave Cloud Liquid 19. Radiation --&gt; Shortwave Cloud Inhomogeneity 20. Radiation --&gt; Shortwave Aerosols 21. Radiation --&gt; Shortwave Gases 22. Radiation --&gt; Longwave Radiation 23. Radiation --&gt; Longwave GHG 24. Radiation --&gt; Longwave Cloud Ice 25. Radiation --&gt; Longwave Cloud Liquid 26. Radiation --&gt; Longwave Cloud Inhomogeneity 27. Radiation --&gt; Longwave Aerosols 28. Radiation --&gt; Longwave Gases 29. Turbulence Convection 30. Turbulence Convection --&gt; Boundary Layer Turbulence 31. Turbulence Convection --&gt; Deep Convection 32. Turbulence Convection --&gt; Shallow Convection 33. Microphysics Precipitation 34. Microphysics Precipitation --&gt; Large Scale Precipitation 35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics 36. Cloud Scheme 37. Cloud Scheme --&gt; Optical Cloud Properties 38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution 39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution 40. Observation Simulation 41. Observation Simulation --&gt; Isscp Attributes 42. Observation Simulation --&gt; Cosp Attributes 43. Observation Simulation --&gt; Radar Inputs 44. Observation Simulation --&gt; Lidar Inputs 45. Gravity Waves 46. Gravity Waves --&gt; Orographic Gravity Waves 47. Gravity Waves --&gt; Non Orographic Gravity Waves 48. Solar 49. Solar --&gt; Solar Pathways 50. Solar --&gt; Solar Constant 51. Solar --&gt; Orbital Parameters 52. Solar --&gt; Insolation Ozone 53. Volcanos 54. Volcanos --&gt; Volcanoes Treatment 1. Key Properties --&gt; Overview Top level key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "AGCM" # "ARCM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of atmospheric model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "primitive equations" # "non-hydrostatic" # "anelastic" # "Boussinesq" # "hydrostatic" # "quasi-hydrostatic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Basic approximations made in the atmosphere. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Resolution Characteristics of the model resolution 2.1. Horizontal Resolution Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Range Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.4. Number Of Vertical Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels resolved on the computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.resolution.high_top') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 2.5. High Top Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Timestepping Characteristics of the atmosphere model time stepping 3.1. Timestep Dynamics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestep for the dynamics, e.g. 30 min. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. Timestep Shortwave Radiative Transfer Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for the shortwave radiative transfer, e.g. 1.5 hours. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Timestep Longwave Radiative Transfer Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Timestep for the longwave radiative transfer, e.g. 3 hours. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "present day" # "modified" # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Orography Characteristics of the model orography 4.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time adaptation of the orography. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.key_properties.orography.changes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "related to ice sheets" # "related to tectonics" # "modified mean" # "modified variance if taken into account in model (cf gravity waves)" # TODO - please enter value(s) """ Explanation: 4.2. Changes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N If the orography type is modified describe the time adaptation changes. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Grid --&gt; Discretisation Atmosphere grid discretisation 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of grid discretisation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "spectral" # "fixed grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6. Grid --&gt; Discretisation --&gt; Horizontal Atmosphere discretisation in the horizontal 6.1. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "finite elements" # "finite volumes" # "finite difference" # "centered finite difference" # TODO - please enter value(s) """ Explanation: 6.2. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "second" # "third" # "fourth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.3. Scheme Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation function order End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "filter" # "pole rotation" # "artificial island" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.4. Horizontal Pole Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal discretisation pole singularity treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Gaussian" # "Latitude-Longitude" # "Cubed-Sphere" # "Icosahedral" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 6.5. Grid Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "isobaric" # "sigma" # "hybrid sigma-pressure" # "hybrid pressure" # "vertically lagrangian" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 7. Grid --&gt; Discretisation --&gt; Vertical Atmosphere discretisation in the vertical 7.1. Coordinate Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Type of vertical coordinate system End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Dynamical Core Characteristics of the dynamical core 8.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of atmosphere dynamical core End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the dynamical core of the model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Adams-Bashforth" # "explicit" # "implicit" # "semi-implicit" # "leap frog" # "multi-step" # "Runge Kutta fifth order" # "Runge Kutta second order" # "Runge Kutta third order" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.3. Timestepping Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Timestepping framework type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "surface pressure" # "wind components" # "divergence/curl" # "temperature" # "potential temperature" # "total water" # "water vapour" # "water liquid" # "water ice" # "total water moments" # "clouds" # "radiation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.4. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of the model prognostic variables End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 9. Dynamical Core --&gt; Top Boundary Type of boundary layer at the top of the model 9.1. Top Boundary Condition Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.2. Top Heat Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top boundary heat treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9.3. Top Wind Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top boundary wind treatment End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "sponge layer" # "radiation boundary condition" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Dynamical Core --&gt; Lateral Boundary Type of lateral boundary condition (if the model is a regional model) 10.1. Condition Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Type of lateral boundary condition End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Dynamical Core --&gt; Diffusion Horizontal Horizontal diffusion scheme 11.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal diffusion scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "iterated Laplacian" # "bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal diffusion scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Heun" # "Roe and VanLeer" # "Roe and Superbee" # "Prather" # "UTOPIA" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Dynamical Core --&gt; Advection Tracers Tracer advection scheme 12.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Tracer advection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Eulerian" # "modified Euler" # "Lagrangian" # "semi-Lagrangian" # "cubic semi-Lagrangian" # "quintic semi-Lagrangian" # "mass-conserving" # "finite volume" # "flux-corrected" # "linear" # "quadratic" # "quartic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Scheme Characteristics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Tracer advection scheme characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "dry mass" # "tracer mass" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.3. Conserved Quantities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Tracer advection scheme conserved quantities End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Priestley algorithm" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.4. Conservation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracer advection scheme conservation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "VanLeer" # "Janjic" # "SUPG (Streamline Upwind Petrov-Galerkin)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Dynamical Core --&gt; Advection Momentum Momentum advection scheme 13.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Momentum advection schemes name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "2nd order" # "4th order" # "cell-centred" # "staggered grid" # "semi-staggered grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Scheme Characteristics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Momentum advection scheme characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa D-grid" # "Arakawa E-grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.3. Scheme Staggering Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Momentum advection scheme staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Angular momentum" # "Horizontal momentum" # "Enstrophy" # "Mass" # "Total energy" # "Vorticity" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.4. Conserved Quantities Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Momentum advection scheme conserved quantities End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "conservation fixer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.5. Conservation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Momentum advection scheme conservation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.aerosols') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "sulphate" # "nitrate" # "sea salt" # "dust" # "ice" # "organic" # "BC (black carbon / soot)" # "SOA (secondary organic aerosols)" # "POM (particulate organic matter)" # "polar stratospheric ice" # "NAT (nitric acid trihydrate)" # "NAD (nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particle)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiation Characteristics of the atmosphere radiation process 14.1. Aerosols Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Aerosols whose radiative effect is taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Radiation --&gt; Shortwave Radiation Properties of the shortwave radiation scheme 15.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of shortwave radiation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.3. Spectral Integration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Shortwave radiation scheme spectral integration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15.4. Transport Calculation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Shortwave radiation transport calculation methods End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.5. Spectral Intervals Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Shortwave radiation scheme number of spectral intervals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiation --&gt; Shortwave GHG Representation of greenhouse gases in the shortwave radiation scheme 16.1. Greenhouse Gas Complexity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.2. ODS Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16.3. Other Flourinated Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiation --&gt; Shortwave Cloud Ice Shortwave radiative properties of ice crystals in clouds 17.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with cloud ice crystals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud ice crystals in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud ice crystals in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiation --&gt; Shortwave Cloud Liquid Shortwave radiative properties of liquid droplets in clouds 18.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with cloud liquid droplets End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud liquid droplets in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiation --&gt; Shortwave Cloud Inhomogeneity Cloud inhomogeneity in the shortwave radiation scheme 19.1. Cloud Inhomogeneity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for taking into account horizontal cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiation --&gt; Shortwave Aerosols Shortwave radiative properties of aerosols 20.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with aerosols End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of aerosols in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to aerosols in the shortwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiation --&gt; Shortwave Gases Shortwave radiative properties of gases 21.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General shortwave radiative interactions with gases End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22. Radiation --&gt; Longwave Radiation Properties of the longwave radiation scheme 22.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of longwave radiation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the longwave radiation scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "wide-band model" # "correlated-k" # "exponential sum fitting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Spectral Integration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Longwave radiation scheme spectral integration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "two-stream" # "layer interaction" # "bulk" # "adaptive" # "multi-stream" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.4. Transport Calculation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Longwave radiation transport calculation methods End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 22.5. Spectral Intervals Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Longwave radiation scheme number of spectral intervals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CO2" # "CH4" # "N2O" # "CFC-11 eq" # "CFC-12 eq" # "HFC-134a eq" # "Explicit ODSs" # "Explicit other fluorinated gases" # "O3" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiation --&gt; Longwave GHG Representation of greenhouse gases in the longwave radiation scheme 23.1. Greenhouse Gas Complexity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CFC-12" # "CFC-11" # "CFC-113" # "CFC-114" # "CFC-115" # "HCFC-22" # "HCFC-141b" # "HCFC-142b" # "Halon-1211" # "Halon-1301" # "Halon-2402" # "methyl chloroform" # "carbon tetrachloride" # "methyl chloride" # "methylene chloride" # "chloroform" # "methyl bromide" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.2. ODS Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HFC-134a" # "HFC-23" # "HFC-32" # "HFC-125" # "HFC-143a" # "HFC-152a" # "HFC-227ea" # "HFC-236fa" # "HFC-245fa" # "HFC-365mfc" # "HFC-43-10mee" # "CF4" # "C2F6" # "C3F8" # "C4F10" # "C5F12" # "C6F14" # "C7F16" # "C8F18" # "c-C4F8" # "NF3" # "SF6" # "SO2F2" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23.3. Other Flourinated Gases Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiation --&gt; Longwave Cloud Ice Longwave radiative properties of ice crystals in clouds 24.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with cloud ice crystals End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "bi-modal size distribution" # "ensemble of ice crystals" # "mean projected area" # "ice water path" # "crystal asymmetry" # "crystal aspect ratio" # "effective crystal radius" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24.2. Physical Reprenstation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud ice crystals in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud ice crystals in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiation --&gt; Longwave Cloud Liquid Longwave radiative properties of liquid droplets in clouds 25.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with cloud liquid droplets End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud droplet number concentration" # "effective cloud droplet radii" # "droplet size distribution" # "liquid water path" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of cloud liquid droplets in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "geometric optics" # "Mie theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to cloud liquid droplets in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Monte Carlo Independent Column Approximation" # "Triplecloud" # "analytic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiation --&gt; Longwave Cloud Inhomogeneity Cloud inhomogeneity in the longwave radiation scheme 26.1. Cloud Inhomogeneity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method for taking into account horizontal cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiation --&gt; Longwave Aerosols Longwave radiative properties of aerosols 27.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with aerosols End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "number concentration" # "effective radii" # "size distribution" # "asymmetry" # "aspect ratio" # "mixing state" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27.2. Physical Representation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical representation of aerosols in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "T-matrix" # "geometric optics" # "finite difference time domain (FDTD)" # "Mie theory" # "anomalous diffraction approximation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27.3. Optical Methods Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Optical methods applicable to aerosols in the longwave radiation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "scattering" # "emission/absorption" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiation --&gt; Longwave Gases Longwave radiative properties of gases 28.1. General Interactions Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N General longwave radiative interactions with gases End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29. Turbulence Convection Atmosphere Convective Turbulence and Clouds 29.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of atmosphere convection and turbulence End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Mellor-Yamada" # "Holtslag-Boville" # "EDMF" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Turbulence Convection --&gt; Boundary Layer Turbulence Properties of the boundary layer turbulence scheme 30.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Boundary layer turbulence scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "TKE prognostic" # "TKE diagnostic" # "TKE coupled with water" # "vertical profile of Kz" # "non-local diffusion" # "Monin-Obukhov similarity" # "Coastal Buddy Scheme" # "Coupled with convection" # "Coupled with gravity waves" # "Depth capped at cloud base" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30.2. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Boundary layer turbulence scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Closure Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Boundary layer turbulence scheme closure order End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 30.4. Counter Gradient Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Uses boundary layer turbulence scheme counter gradient End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31. Turbulence Convection --&gt; Deep Convection Properties of the deep convection scheme 31.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Deep convection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "adjustment" # "plume ensemble" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.2. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Deep convection scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "CAPE" # "bulk" # "ensemble" # "CAPE/WFN based" # "TKE/CIN based" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.3. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Deep convection scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "vertical momentum transport" # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "updrafts" # "downdrafts" # "radiative effect of anvils" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.4. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical processes taken into account in the parameterisation of deep convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31.5. Microphysics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32. Turbulence Convection --&gt; Shallow Convection Properties of the shallow convection scheme 32.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Shallow convection scheme name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mass-flux" # "cumulus-capped boundary layer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.2. Scheme Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N shallow convection scheme type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "same as deep (unified)" # "included in boundary layer turbulence" # "separate diagnosis" # TODO - please enter value(s) """ Explanation: 32.3. Scheme Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 shallow convection scheme method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convective momentum transport" # "entrainment" # "detrainment" # "penetrative convection" # "re-evaporation of convective precipitation" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.4. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Physical processes taken into account in the parameterisation of shallow convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "tuning parameter based" # "single moment" # "two moment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32.5. Microphysics Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Microphysics scheme for shallow convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33. Microphysics Precipitation Large Scale Cloud Microphysics and Precipitation 33.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of large scale cloud microphysics and precipitation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34. Microphysics Precipitation --&gt; Large Scale Precipitation Properties of the large scale precipitation scheme 34.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name of the large scale precipitation parameterisation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "liquid rain" # "snow" # "hail" # "graupel" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34.2. Hydrometeors Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Precipitating hydrometeors taken into account in the large scale precipitation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Microphysics Precipitation --&gt; Large Scale Cloud Microphysics Properties of the large scale cloud microphysics scheme 35.1. Scheme Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name of the microphysics parameterisation scheme used for large scale clouds. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "mixed phase" # "cloud droplets" # "cloud ice" # "ice nucleation" # "water vapour deposition" # "effect of raindrops" # "effect of snow" # "effect of graupel" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Large scale cloud microphysics processes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Cloud Scheme Characteristics of the cloud scheme 36.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of the atmosphere cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.2. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "atmosphere_radiation" # "atmosphere_microphysics_precipitation" # "atmosphere_turbulence_convection" # "atmosphere_gravity_waves" # "atmosphere_solar" # "atmosphere_volcano" # "atmosphere_cloud_simulator" # TODO - please enter value(s) """ Explanation: 36.3. Atmos Coupling Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Atmosphere components that are linked to the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.4. Uses Separate Treatment Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.processes') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "entrainment" # "detrainment" # "bulk cloud" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.5. Processes Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Processes included in the cloud scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.6. Prognostic Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the cloud scheme a prognostic scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 36.7. Diagnostic Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the cloud scheme a diagnostic scheme? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "cloud amount" # "liquid" # "ice" # "rain" # "snow" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.8. Prognostic Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List the prognostic variables used by the cloud scheme, if applicable. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "random" # "maximum" # "maximum-random" # "exponential" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 37. Cloud Scheme --&gt; Optical Cloud Properties Optical cloud properties 37.1. Cloud Overlap Method Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Method for taking into account overlapping of cloud layers End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Cloud Inhomogeneity Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Method for taking into account cloud inhomogeneity End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) """ Explanation: 38. Cloud Scheme --&gt; Sub Grid Scale Water Distribution Sub-grid scale water distribution 38.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale water distribution type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 38.2. Function Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale water distribution function name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 38.3. Function Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale water distribution function type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) """ Explanation: 38.4. Convection Coupling Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Sub-grid scale water distribution coupling with convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "prognostic" # "diagnostic" # TODO - please enter value(s) """ Explanation: 39. Cloud Scheme --&gt; Sub Grid Scale Ice Distribution Sub-grid scale ice distribution 39.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale ice distribution type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 39.2. Function Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale ice distribution function name End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 39.3. Function Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sub-grid scale ice distribution function type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "coupled with deep" # "coupled with shallow" # "not coupled with convection" # TODO - please enter value(s) """ Explanation: 39.4. Convection Coupling Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Sub-grid scale ice distribution coupling with convection End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40. Observation Simulation Characteristics of observation simulation 40.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of observation simulator characteristics End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "no adjustment" # "IR brightness" # "visible optical depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Observation Simulation --&gt; Isscp Attributes ISSCP Characteristics 41.1. Top Height Estimation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Cloud simulator ISSCP top height estimation methodUo End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "lowest altitude level" # "highest altitude level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. Top Height Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator ISSCP top height direction End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Inline" # "Offline" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 42. Observation Simulation --&gt; Cosp Attributes CFMIP Observational Simulator Package attributes 42.1. Run Configuration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP run configuration End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.2. Number Of Grid Points Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP number of grid points End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.3. Number Of Sub Columns Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 42.4. Number Of Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator COSP number of levels End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 43. Observation Simulation --&gt; Radar Inputs Characteristics of the cloud radar simulator 43.1. Frequency Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar frequency (Hz) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "surface" # "space borne" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 43.2. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 43.3. Gas Absorption Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar uses gas absorption End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 43.4. Effective Radius Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator radar uses effective radius End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "ice spheres" # "ice non-spherical" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 44. Observation Simulation --&gt; Lidar Inputs Characteristics of the cloud lidar simulator 44.1. Ice Types Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Cloud simulator lidar ice type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "max" # "random" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 44.2. Overlap Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Cloud simulator lidar overlap End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 45. Gravity Waves Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources. 45.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of gravity wave parameterisation in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Rayleigh friction" # "Diffusive sponge layer" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.2. Sponge Layer Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Sponge layer in the upper levels in order to avoid gravity wave reflection at the top. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "continuous spectrum" # "discrete spectrum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.3. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background wave distribution End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "effect on drag" # "effect on lifting" # "enhanced topography" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 45.4. Subgrid Scale Orography Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Subgrid scale orography effects taken into account. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 46. Gravity Waves --&gt; Orographic Gravity Waves Gravity waves generated due to the presence of orography 46.1. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the orographic gravity wave scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "linear mountain waves" # "hydraulic jump" # "envelope orography" # "low level flow blocking" # "statistical sub-grid scale variance" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.2. Source Mechanisms Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Orographic gravity wave source mechanisms End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "non-linear calculation" # "more than two cardinal directions" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.3. Calculation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Orographic gravity wave calculation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "includes boundary layer ducting" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.4. Propagation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Orographic gravity wave propogation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 46.5. Dissipation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Orographic gravity wave dissipation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 47. Gravity Waves --&gt; Non Orographic Gravity Waves Gravity waves generated by non-orographic processes. 47.1. Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Commonly used name for the non-orographic gravity wave scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "convection" # "precipitation" # "background spectrum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.2. Source Mechanisms Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Non-orographic gravity wave source mechanisms End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "spatially dependent" # "temporally dependent" # TODO - please enter value(s) """ Explanation: 47.3. Calculation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Non-orographic gravity wave calculation method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "linear theory" # "non-linear theory" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.4. Propagation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Non-orographic gravity wave propogation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "total wave" # "single wave" # "spectral" # "linear" # "wave saturation vs Richardson number" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 47.5. Dissipation Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Non-orographic gravity wave dissipation scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 48. Solar Top of atmosphere solar insolation characteristics 48.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of solar insolation of the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "SW radiation" # "precipitating energetic particles" # "cosmic rays" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 49. Solar --&gt; Solar Pathways Pathways for solar forcing of the atmosphere 49.1. Pathways Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Pathways for the solar forcing of the atmosphere model domain End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) """ Explanation: 50. Solar --&gt; Solar Constant Solar constant and top of atmosphere insolation characteristics 50.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time adaptation of the solar constant. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 50.2. Fixed Value Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If the solar constant is fixed, enter the value of the solar constant (W m-2). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 50.3. Transient Characteristics Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 solar constant transient characteristics (W m-2) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "fixed" # "transient" # TODO - please enter value(s) """ Explanation: 51. Solar --&gt; Orbital Parameters Orbital parameters and top of atmosphere insolation characteristics 51.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time adaptation of orbital parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 51.2. Fixed Reference Date Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Reference date for fixed orbital parameters (yyyy) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 51.3. Transient Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Description of transient orbital parameters End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Berger 1978" # "Laskar 2004" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 51.4. Computation Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Method used for computing orbital parameters. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 52. Solar --&gt; Insolation Ozone Impact of solar insolation on stratospheric ozone 52.1. Solar Ozone Impact Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Does top of atmosphere insolation impact on stratospheric ozone? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 53. Volcanos Characteristics of the implementation of volcanoes 53.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview description of the implementation of volcanic effects in the atmosphere End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "high frequency solar constant anomaly" # "stratospheric aerosols optical thickness" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 54. Volcanos --&gt; Volcanoes Treatment Treatment of volcanoes in the atmosphere 54.1. Volcanoes Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How volcanic effects are modeled in the atmosphere. End of explanation """
robertoalotufo/ia898
2S2018/04 Gerando imagens sinteticas.ipynb
mit
import numpy as np """ Explanation: Criação de imagens sintéticas Imagens sintéticas são bastante utilizadas nos testes de algoritmos e na geração de padrões de imagens. Iremos aprender a gerar os valores dos pixels de uma imagem a partir de uma equação matemática de forma muito eficiente, sem a necessidade de se usar explicitamente a varredura dos pixels através do comando for. A forma preferencial de criação de imagens sintéticas, quando sua equação é fornecida, é através do uso das funções que geram uma matriz de coordenadas. As duas funções que iremos utilizar neste curso são indices e meshgrid. Estude o tutorial em: Indices e Meshgrid Ele é fundamental para entender os exemplos a seguir. End of explanation """ r,c = np.meshgrid(np.arange(-75,75), np.arange(-100,100), indexing='ij') f = r * c %matplotlib inline import matplotlib.pyplot as plt plt.title('Ponto de sela') plt.imshow(f, cmap='gray') """ Explanation: Imagem da função "sela" A função "sela" bidimensional é uma função dada pelo produto de suas coordenadas r e c. Observe que, implementando desta forma, é possível fazer com que o código Python/NumPy fique muito próximo à equação matemática, colocada a seguir. Vamos gerar uma função sela, onde os valores para as linhas serão valores inteiros entre -75 e 75 e os valores para as colunas, inteiros no intervalo [-100,100]: $$ f(r,c) = r \ c $$ $$ \text{para} \ r \in [-75,75] $$ $$ c \in [-100,100]$$ No exemplo a seguir é utilizado a função arange para gerar os vetores de coordenadas. Para melhorar a visualização foi utilizada a função ia636:iaisolines iaisolines que permite visualizar os pixels de mesmo valores (isolinhas) da imagem gerada com uma cor destacada. End of explanation """
google/data-driven-discretization-1d
notebooks/burgers-super-resolution.ipynb
apache-2.0
! pip install -q -U xarray matplotlib ! rm -rf data-driven-discretization-1d ! git clone https://github.com/google/data-driven-discretization-1d.git ! pip install -q -e data-driven-discretization-1d # install the seaborn bug-fix from https://github.com/mwaskom/seaborn/pull/1602 ! pip install -U -q git+git://github.com/stfnrpplngr/seaborn.git@309a9de383fac4db1c66dbf87815c4ba0c439c59 # Ensure we're using Tensorflow 1.x in Colab. If not using Colab, remove this magic. %tensorflow_version 1.x import tensorflow as tf assert tf.__version__[:2] == '1.' import seaborn assert seaborn.__version__ == '0.9.0', 'restart kernel after running previous cell' from matplotlib.colors import LogNorm import enum import numpy as np import matplotlib from matplotlib import pyplot as plt import sys, time, os, h5py import os import json import numpy as np import seaborn import pandas as pd import xarray import matplotlib.pyplot as plt import pde_superresolution.utils import pde_superresolution as pde """ Explanation: Super-resolution of Burgers' equation This notebook reconstructs Figure 1 from our paper Learning data-driven discretizations for partial differential equations, training a model from scratch. This is useful for gaining insight about these methods (and our code), but it's worth reiterating that this model not one that we used for actual time-integration. Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Setup environment End of explanation """ def _stack_all_rolls(inputs: tf.Tensor, max_offset: int) -> tf.Tensor: """Stack together all rolls of inputs, from 0 to max_offset.""" rolled = [tf.concat([inputs[i:, ...], inputs[:i, ...]], axis=0) for i in range(max_offset)] return tf.stack(rolled, axis=0) @enum.unique class Dataset(enum.Enum): TRAINING = 0 VALIDATION = 1 def _model_inputs(fine_inputs, resample_factor): inputs = fine_inputs[:, resample_factor-1::resample_factor] labels = tf.stack([fine_inputs[:, offset-1::resample_factor] for offset in range(1, resample_factor)], axis=-1) base_grid = pde.polynomials.regular_grid( pde.polynomials.GridOffset.STAGGERED, derivative_order=0, accuracy_order=hparams.coefficient_grid_min_size, dx=1) baselines = [] for offset in range(1, hparams.resample_factor): current_grid = base_grid + 0.5 - offset / hparams.resample_factor method = pde.polynomials.Method.FINITE_DIFFERENCES reconstruction = pde.polynomials.reconstruct( inputs, current_grid, method, derivative_order=0) baselines.append(reconstruction) baseline = tf.stack(baselines, axis=-1) results = {'inputs': inputs, 'labels': labels, 'baseline': baseline} for accuracy_order in [1, 3, 5]: base_grid = pde.polynomials.regular_grid( pde.polynomials.GridOffset.STAGGERED, derivative_order=0, accuracy_order=accuracy_order, dx=1) baselines = [] for offset in range(1, hparams.resample_factor): current_grid = base_grid + 0.5 - offset / hparams.resample_factor method = pde.polynomials.Method.FINITE_DIFFERENCES reconstruction = pde.polynomials.reconstruct( inputs, current_grid, method, derivative_order=0) baselines.append(reconstruction) results[f'baseline_{accuracy_order}'] = tf.stack(baselines, axis=-1) return results def make_dataset(snapshots, hparams, dataset_type: Dataset = Dataset.TRAINING, repeat: bool = True, evaluation: bool = False) -> tf.data.Dataset: snapshots = np.asarray(snapshots, dtype=np.float32) num_training = int(round(snapshots.shape[0] * hparams.frac_training)) if dataset_type is Dataset.TRAINING: indexer = slice(None, num_training) else: assert dataset_type is Dataset.VALIDATION indexer = slice(num_training, None) dataset = tf.data.Dataset.from_tensor_slices(snapshots[indexer]) # no need to do dataset augmentation with rolling for eval rolls_stop = 1 if evaluation else hparams.resample_factor dataset = dataset.map(lambda x: _stack_all_rolls(x, rolls_stop)) dataset = dataset.map(lambda x: _model_inputs(x, hparams.resample_factor)) dataset = dataset.apply(tf.data.experimental.unbatch()) dataset = dataset.cache() if repeat: dataset = dataset.apply( tf.data.experimental.shuffle_and_repeat(buffer_size=10000)) batch_size = hparams.base_batch_size * hparams.resample_factor dataset = dataset.batch(batch_size) dataset = dataset.prefetch(buffer_size=1) return dataset def stack_reconstruction(inputs, predictions): if isinstance(inputs, tf.Tensor): stacked = tf.concat([predictions, inputs[..., tf.newaxis], ], axis=-1) return tf.layers.flatten(stacked) else: stacked = np.concatenate([predictions, inputs[..., np.newaxis]], axis=-1) new_shape = stacked.shape[:-2] + (np.prod(stacked.shape[-2:]),) return stacked.reshape(new_shape) """ Explanation: Library code dataset generation End of explanation """ def predict_coefficients(inputs: tf.Tensor, hparams: tf.contrib.training.HParams, reuse: object = tf.AUTO_REUSE) -> tf.Tensor: _, equation = pde.equations.from_hparams(hparams) pde.model.assert_consistent_solution(equation, inputs) with tf.variable_scope('predict_coefficients', reuse=reuse): num_derivatives = len(equation.DERIVATIVE_ORDERS) base_grid = pde.polynomials.regular_grid( pde.polynomials.GridOffset.STAGGERED, derivative_order=0, accuracy_order=hparams.coefficient_grid_min_size, dx=1.0) net = inputs[:, :, tf.newaxis] net /= equation.standard_deviation activation = pde.model._NONLINEARITIES[hparams.nonlinearity] for _ in range(hparams.num_layers - 1): net = pde.layers.conv1d_periodic_layer(net, filters=hparams.filter_size, kernel_size=hparams.kernel_size, activation=activation, center=True) poly_accuracy_layers = [] for offset in range(1, hparams.resample_factor): current_grid = base_grid + 0.5 - offset / hparams.resample_factor method = pde.polynomials.Method.FINITE_DIFFERENCES poly_accuracy_layers.append( pde.polynomials.PolynomialAccuracyLayer( grid=current_grid, method=method, derivative_order=0, accuracy_order=hparams.polynomial_accuracy_order, out_scale=hparams.polynomial_accuracy_scale) ) input_sizes = [layer.input_size for layer in poly_accuracy_layers] if hparams.num_layers > 0: net = pde.layers.conv1d_periodic_layer(net, filters=sum(input_sizes), kernel_size=hparams.kernel_size, activation=None, center=True) else: initializer = tf.initializers.zeros() coefficients = tf.get_variable( 'coefficients', (sum(input_sizes),), initializer=initializer) net = tf.tile(coefficients[tf.newaxis, tf.newaxis, :], [tf.shape(inputs)[0], inputs.shape[1].value, 1]) cum_sizes = np.cumsum(input_sizes) starts = [0] + cum_sizes[:-1].tolist() stops = cum_sizes.tolist() zipped = zip(starts, stops, poly_accuracy_layers) outputs = tf.stack([layer.apply(net[..., start:stop]) for start, stop, layer in zipped], axis=-2) assert outputs.shape.as_list()[-1] == base_grid.size return outputs def predict(inputs, hparams): coefficients = predict_coefficients(inputs, hparams) return pde.model.apply_coefficients(coefficients, inputs) def setup_training(dataset, hparams, scale=1.0): tensors = dataset.make_one_shot_iterator().get_next() predictions = predict(tensors['inputs'], hparams) loss = tf.reduce_mean((tensors['labels'] - predictions) ** 2) / scale train_step = pde.training.create_training_step(loss, hparams) return loss, train_step def baseline_loss(snapshots, hparams): dataset = make_dataset(snapshots, hparams, repeat=False, evaluation=True) tensors = dataset.make_one_shot_iterator().get_next() loss = tf.reduce_mean((tensors['labels'] - tensors['baseline_1']) ** 2) sess = tf.Session(config=pde.training._session_config()) losses = [] while True: try: losses.append(sess.run(loss)) except tf.errors.OutOfRangeError: break return np.mean(losses) """ Explanation: model End of explanation """ ! gsutil cp gs://data-driven-discretization-public/training-data/burgers.h5 . with h5py.File('burgers.h5') as f: snapshots = f['v'][...] """ Explanation: one time step End of explanation """ hparams = pde.training.create_hparams( equation='burgers', conservative=False, coefficient_grid_min_size=6, resample_factor=16, equation_kwargs=json.dumps(dict(num_points=512)), base_batch_size=32, ) """ Explanation: visualize an example End of explanation """ demo_dataset = make_dataset(snapshots, hparams, repeat=False, evaluation=True) sess = tf.Session(config=pde.training._session_config()) tf_example = demo_dataset.make_one_shot_iterator().get_next() example = sess.run(tf_example) plt.figure(figsize=(16, 4)) example_id = 2 plt.scatter(np.arange(0, 512, hparams.resample_factor), np.roll(example['inputs'][example_id], 1, axis=-1), marker='s') plt.plot(stack_reconstruction(example['inputs'], example['baseline'])[example_id], label='baseline') plt.plot(stack_reconstruction(example['inputs'], example['labels'])[example_id], label='exact') plt.legend() """ Explanation: Baseline performance End of explanation """ demo_dataset = make_dataset(snapshots, hparams, Dataset.VALIDATION, repeat=False, evaluation=True) tensors = demo_dataset.make_one_shot_iterator().get_next() tensors['predictions'] = predict(tensors['inputs'], hparams) sess.run(tf.global_variables_initializer()) example = sess.run(tensors) for example_id in [0, 10, 20, 30, 40]: plt.figure(figsize=(16, 4)) plt.scatter(np.arange(0, 512, hparams.resample_factor), np.roll(example['inputs'][example_id], 1, axis=-1), marker='s') plt.plot(stack_reconstruction(example['inputs'], example['baseline'])[example_id], label='baseline') plt.plot(stack_reconstruction(example['inputs'], example['labels'])[example_id], label='exact') plt.plot(stack_reconstruction(example['inputs'], example['predictions'])[example_id], label='predictions') plt.legend() """ Explanation: Untrained model End of explanation """ hparams = pde.training.create_hparams( equation='burgers', conservative=False, coefficient_grid_min_size=6, resample_factor=8, equation_kwargs=json.dumps(dict(num_points=512)), eval_interval=500, learning_stops=[20000, 40000], learning_rates=[3e-3, 3e-4], ) loss_scale = baseline_loss(snapshots, hparams) %%time tf.reset_default_graph() dataset = make_dataset(snapshots, hparams) loss, train_step = setup_training(dataset, hparams, scale=loss_scale) sess = tf.Session(config=pde.training._session_config()) sess.run(tf.global_variables_initializer()) %%time for step in range(hparams.learning_stops[-1]): sess.run(train_step) if (step + 1) % hparams.eval_interval == 0: print(step, sess.run(loss)) demo_dataset = make_dataset(snapshots, hparams, Dataset.VALIDATION, repeat=False, evaluation=True) tensors = demo_dataset.make_one_shot_iterator().get_next() tensors['predictions'] = predict(tensors['inputs'], hparams) array_list = [] while True: try: array_list.append(sess.run(tensors)) except tf.errors.OutOfRangeError: break arrays = {k: np.concatenate([d[k] for d in array_list]) for k in array_list[0]} ds = xarray.Dataset({ 'inputs': (('sample', 'x'), arrays['inputs']), 'labels': (('sample', 'x', 'offset'), arrays['labels']), 'nn_predictions': (('sample', 'x', 'offset'), arrays['predictions']), 'poly_predictions': (('sample', 'x', 'accuracy_order', 'offset'), np.stack([arrays['baseline_1'],arrays['baseline_3'], arrays['baseline_5']], axis=-2)), }, coords={'accuracy_order': [1, 3, 5]}) ds """ Explanation: train a model (optional) Note that below we'll download the saved results from a previous run. End of explanation """ !gsutil cp gs://data-driven-discretization-public/reconstruction/burgers_results_8x.nc . hparams = pde.training.create_hparams( equation='burgers', conservative=False, coefficient_grid_min_size=6, resample_factor=8, equation_kwargs=json.dumps(dict(num_points=512)), eval_interval=500, learning_stops=[20000, 40000], learning_rates=[3e-3, 3e-4], ) ds = xarray.open_dataset('burgers_results_8x.nc').load() ds """ Explanation: Examine results from the trained model End of explanation """ plt.hist(abs(ds.labels - ds.poly_predictions.sel(accuracy_order=3)).data.ravel(), bins=np.geomspace(1e-6, 2, num=51), alpha=0.5, label='3rd order'); plt.hist(abs(ds.labels - ds.nn_predictions).data.ravel(), bins=np.geomspace(1e-6, 2, num=51), alpha=0.5, label='neural net'); plt.xscale('log') plt.legend() """ Explanation: Figures Overall histogram of absolut error End of explanation """ example_id = 0 fig, axes = plt.subplots(3, 1, figsize=(10, 15)) x = np.arange(512) * 2 * np.pi / 512 colors = seaborn.color_palette(n_colors=3) for ax, example_id in zip(axes.ravel(), [0, 10, 20]): ax.scatter(x[hparams.resample_factor-1::hparams.resample_factor], ds.inputs.data[example_id], marker='s', color=colors[0]) ax.plot(x, stack_reconstruction(ds.inputs.data, ds.labels.data)[example_id], label='exact', color=colors[0]) ax.plot(x, stack_reconstruction(ds.inputs.data, ds.poly_predictions.sel(accuracy_order=3).data)[example_id], label='baseline', color=colors[1]) ax.plot(x, stack_reconstruction(ds.inputs.data, ds.nn_predictions.data)[example_id], label='predictions', linestyle='--', color=colors[2]) """ Explanation: Some full examples -- we are much better than third-order polynomial interpolation! End of explanation """ disc = xarray.Dataset() disc['nn_error'] = abs(ds.nn_predictions - ds.labels).mean('offset') disc['poly_error'] = abs(ds.poly_predictions.sel(accuracy_order=3) - ds.labels).mean('offset') # https://en.wikipedia.org/wiki/Curvature#Curvature_of_the_graph_of_a_function use_slope = 0 dx = 2*np.pi/512 y_xx = ds.labels.diff('offset', 2) / dx ** 2 y_x = 0.5 * (ds.labels.diff('offset').sel(offset=slice(None, -1)) + ds.labels.diff('offset').sel(offset=slice(1, None))) / dx disc['bin_curvature'] = (abs(y_xx) / (1 + use_slope * y_x ** 2) ** (3/2)).max('offset') y = stack_reconstruction(ds.inputs.data, ds.labels.data).astype(np.float64) y_xx = (np.roll(y, -1, axis=-1) - 2 * y + np.roll(y, 1, axis=-1)) / dx ** 2 y_x = (0.5 * np.roll(y, -1, axis=-1) - 0.5 * np.roll(y, 1, axis=-1)) / dx curvature = abs(y_xx) / (1 + use_slope * y_x ** 2) ** (3/2) resample_factor = 8 curvature = np.stack([curvature[:, offset-1::resample_factor] for offset in range(1, resample_factor)], axis=-1) disc['curvature'] = ds.labels.copy(data=curvature) disc['nearest_curvature'] = 10 ** np.log10(disc.bin_curvature).round(1) df = disc.to_dataframe().reset_index() curvature_count = (df.groupby('nearest_curvature').count())['sample'] import pandas as pd # TODO(shoyer): upstream this into Seaborn class CustomLinePlotter(seaborn.relational._LinePlotter): def aggregate(self, vals, grouper, units=None): """Compute an estimate and confidence interval using grouper.""" func = self.estimator ci = self.ci n_boot = self.n_boot # Define a "null" CI for when we only have one value null_ci = pd.Series(index=["low", "high"], dtype=np.float) # Group and get the aggregation estimate grouped = vals.groupby(grouper, sort=self.sort) est = grouped.agg(func) lower = grouped.quantile(1 - ci) upper = grouped.quantile(ci) cis = pd.DataFrame(np.c_[lower, upper], index=est.index, columns=["low", "high"]).stack() # Unpack the CIs into "wide" format for plotting if cis.notnull().any(): cis = cis.unstack().reindex(est.index) else: cis = None return est.index, est, cis def custom_lineplot(x=None, y=None, hue=None, size=None, style=None, data=None, palette=None, hue_order=None, hue_norm=None, sizes=None, size_order=None, size_norm=None, dashes=True, markers=None, style_order=None, units=None, estimator="mean", ci=95, n_boot=1000, sort=True, err_style="band", err_kws=None, legend="brief", ax=None, **kwargs): p = CustomLinePlotter( x=x, y=y, hue=hue, size=size, style=style, data=data, palette=palette, hue_order=hue_order, hue_norm=hue_norm, sizes=sizes, size_order=size_order, size_norm=size_norm, dashes=dashes, markers=markers, style_order=style_order, units=units, estimator=estimator, ci=ci, n_boot=n_boot, sort=sort, err_style=err_style, err_kws=err_kws, legend=legend, ) if ax is None: ax = plt.gca() p.plot(ax, kwargs) return ax seaborn.set_context("notebook", font_scale=12/11) fig = plt.figure(figsize=(2*3.42, 2*2)) # LEFT example_id = 0 ax = fig.subplots(1, 1, gridspec_kw=dict(bottom=0.11, top=0.985, left=0.095, right=0.58)) x = np.arange(512) * 2 * np.pi / 512 colors = ['C0', 'C1', 'C2'] ax.scatter(x[hparams.resample_factor-1::hparams.resample_factor], ds.inputs.data[example_id], marker='D', label='Known points', color=colors[0]) ax.plot(x, stack_reconstruction(ds.inputs.data, ds.labels.data)[example_id], label='Exact solution', color=colors[0], linewidth=3, ) ax.plot(x, stack_reconstruction(ds.inputs.data, ds.poly_predictions.sel(accuracy_order=3).data)[example_id], label='Polynomial interp.', color=colors[1]) ax.plot(x, stack_reconstruction(ds.inputs.data, ds.nn_predictions.data)[example_id], label='Neural net interp.', linestyle='--', color=colors[-1]) ax.legend(frameon=False, loc='lower left') ax.set_xlim(1.13, 1.88) ax.set_ylim(-1.2, 1.2) ax.set_xlabel('$x$', labelpad=1) ax.set_ylabel('$v$', labelpad=-5) seaborn.despine() # TOP RIGHT axes = fig.subplots(1, 2, sharex=False, sharey=True, gridspec_kw=dict(bottom=0.65, top=0.945, left=0.58, right=0.92, wspace=0)) axes[0].set_aspect('equal') axes[1].set_aspect('equal') bins = np.linspace(-2, 2, num=51) im = axes[0].hist2d( ds.labels.data.ravel(), ds.poly_predictions.sel(accuracy_order=3).data.ravel(), bins=2*[bins], cmin=1, norm=LogNorm(vmin=1, vmax=1e4)) im[-1].set_edgecolor('none') im[-1].set_rasterized(True) im[-1].set_zorder(-1) im = axes[1].hist2d( ds.labels.data.ravel(), ds.nn_predictions.data.ravel(), bins=2*[bins], cmin=1, norm=LogNorm(vmin=1, vmax=1e4)) im[-1].set_edgecolor('none') im[-1].set_rasterized(True) im[-1].set_zorder(-1) cbaxes = fig.add_axes([0.93, 0.65, 0.01, 0.295]) cb = plt.colorbar(im[3], cax=cbaxes, extendfrac=0.05, extend='max') axes[0].set_title('Polynomial') axes[1].set_title('Neural net') axes[0].set_xticks([-2, 0, 2]) axes[0].set_xticklabels(['-2', '0', '2 ']) axes[0].get_xaxis().majorTicks[2].label1.set_horizontalalignment('right') axes[1].set_xticks([-2, 0, 2]) axes[1].get_xaxis().majorTicks[0].label1.set_horizontalalignment('left') axes[0].set_yticks([-2, 0, 2]) axes[0].set_xlabel(r'$v_\mathrm{exact}$', labelpad=1) axes[1].set_xlabel(r'$v_\mathrm{exact}$', labelpad=1) axes[0].set_ylabel(r'$v_\mathrm{predicted}$', labelpad=-3) # BOTTOM RIGHT xmin = 1e-1 xmax = 1e4 ax = fig.subplots( 1, 1, gridspec_kw=dict(bottom=0.115, top=0.46, left=0.73, right=1)) custom_lineplot(x='nearest_curvature', y='poly_error', data=df, ax=ax, color=colors[1], estimator=np.median, ci=0.95) custom_lineplot(x='nearest_curvature', y='nn_error', data=df, ax=ax, color=colors[-1], estimator=np.median, ci=0.95) plt.setp(ax.get_lines()[1], linestyle='--') ax.text(4e2, 2.5e0, 'Polynomial', va='center', ha='center') ax.text(1e3, 2e-4, 'Neural\nnet', va='center', ha='center') ax.set_xscale('log') ax.set_xlim(xmin, xmax) ax.set_yscale('log') ax.set_yticks([1e-4, 1e-2, 1]) ax.set_xlabel(r'Curvature', labelpad=1) ax.set_ylabel('Abs. error', labelpad=1) seaborn.despine(ax=ax) plt.figtext(0, 1, '(a)', ha='left', va='top') plt.figtext(.5, 1, '(b)', ha='left', va='top') plt.figtext(.62, 0.48, '(c)', ha='left', va='top') fig.dpi = 90 """ Explanation: Create Figure 1 from the paper: End of explanation """
Charleo85/ml_project
resource/scribe/sample.ipynb
mit
import numpy as np import numpy.matlib import matplotlib.pyplot as plt import matplotlib.cm as cm %matplotlib inline import math import random import time import os import pickle import tensorflow as tf #built with TensorFlow version 0.9 """ Explanation: Scribe: Realistic Handwriting with TensorFlow <img src="static/author.png" alt="A project by Sam Greydanus" style="width: 70%;"/> This model is trained on the IAM handwriting dataset and was inspired by the model described by the famous 2014 Alex Graves paper. It consists of a three-layer recurrent neural network (LSTM cells) with a Gaussian Mixture Density Network (MDN) cap on top. I have also implemented the attention mechanism from the paper which allows the network to 'focus' on character at a time in a sequence as it draws them. This iPython notebook (named sample) demonstrates how to 1. build the model 2. load saved parameters 3. generate handwriting Dependencies End of explanation """ # in the real project class, we use argparse (https://docs.python.org/3/library/argparse.html) class FakeArgParse(): def __init__(self): pass args = FakeArgParse() #general model params args.train = False args.rnn_size = 100 #400 hidden units args.tsteps = 256 if args.train else 1 args.batch_size = 32 if args.train else 1 args.nmixtures = 8 # number of Gaussian mixtures in MDN #window params args.kmixtures = 1 # number of Gaussian mixtures in attention mechanism (for soft convolution window) args.alphabet = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' #later we'll add an <UNK> slot for unknown chars args.tsteps_per_ascii = 25 # an approximate estimate #book-keeping args.save_path = './saved/model.ckpt' args.data_dir = './data' args.log_dir = './logs/' args.text = 'call me ishmael some years ago' args.style = -1 # don't use a custom style args.bias = 1.0 args.eos_prob = 0.4 # threshold probability for ending a stroke """ Explanation: Define hyperparameters Note that tsteps and batch_size are both 1 when we're sampling End of explanation """ # in real life the model is a class. I used this hack to make the iPython notebook more readable class FakeModel(): def __init__(self): pass model = FakeModel() """ Explanation: Model overview Let's start with an overview. The backbone of the model is three LSTM cells (green). There is a a custom attention mechanism (yellow) which digests a one-hot encoding of the sentence we want the model to write. The Mixture Density Network (red) on top choses apropriate Gaussian distributions from which to sample the next pen point, adding some natural randomness to the model. <img src="static/model_rolled.png" alt="Handwriting model structure" style="width: 40%;"/> As we build the model, we'll start with the inputs and work our way upwards through the computational graph. The code is optimized for minibatch training but in this case we already set the args.batch_size to 1. End of explanation """ model.char_vec_len = len(args.alphabet) + 1 #plus one for <UNK> token model.ascii_steps = len(args.text) model.graves_initializer = tf.truncated_normal_initializer(mean=0., stddev=.075, seed=None, dtype=tf.float32) model.window_b_initializer = tf.truncated_normal_initializer(mean=-3.0, stddev=.25, seed=None, dtype=tf.float32) # ----- build the basic recurrent network architecture cell_func = tf.contrib.rnn.LSTMCell # could be GRUCell or RNNCell model.cell0 = cell_func(args.rnn_size, state_is_tuple=True, initializer=model.graves_initializer) model.cell1 = cell_func(args.rnn_size, state_is_tuple=True, initializer=model.graves_initializer) model.cell2 = cell_func(args.rnn_size, state_is_tuple=True, initializer=model.graves_initializer) model.input_data = tf.placeholder(dtype=tf.float32, shape=[None, args.tsteps, 3]) model.target_data = tf.placeholder(dtype=tf.float32, shape=[None, args.tsteps, 3]) model.istate_cell0 = model.cell0.zero_state(batch_size=args.batch_size, dtype=tf.float32) model.istate_cell1 = model.cell1.zero_state(batch_size=args.batch_size, dtype=tf.float32) model.istate_cell2 = model.cell2.zero_state(batch_size=args.batch_size, dtype=tf.float32) #slice the input volume into separate vols for each tstep inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(model.input_data, args.tsteps, 1)] #build model.cell0 computational graph outs_cell0, model.fstate_cell0 = tf.contrib.legacy_seq2seq.rnn_decoder(inputs, model.istate_cell0, \ model.cell0, loop_function=None, scope='cell0') """ Explanation: Initialize LSTMs and build LSTM 1 End of explanation """ # ----- build the gaussian character window def get_window(alpha, beta, kappa, c): # phi -> [? x 1 x ascii_steps] and is a tf matrix # c -> [? x ascii_steps x alphabet] and is a tf matrix ascii_steps = c.get_shape()[1].value #number of items in sequence phi = get_phi(ascii_steps, alpha, beta, kappa) window = tf.matmul(phi,c) window = tf.squeeze(window, [1]) # window ~ [?,alphabet] return window, phi #get phi for all t,u (returns a [1 x tsteps] matrix) that defines the window def get_phi(ascii_steps, alpha, beta, kappa): # alpha, beta, kappa -> [?,kmixtures,1] and each is a tf variable u = np.linspace(0,ascii_steps-1,ascii_steps) # weight all the U items in the sequence kappa_term = tf.square( tf.subtract(kappa,u)) exp_term = tf.multiply(-beta,kappa_term) phi_k = tf.multiply(alpha, tf.exp(exp_term)) phi = tf.reduce_sum(phi_k,1, keep_dims=True) return phi # phi ~ [?,1,ascii_steps] def get_window_params(i, out_cell0, kmixtures, prev_kappa, reuse=True): hidden = out_cell0.get_shape()[1] n_out = 3*kmixtures with tf.variable_scope('window',reuse=reuse): window_w = tf.get_variable("window_w", [hidden, n_out], initializer=model.graves_initializer) window_b = tf.get_variable("window_b", [n_out], initializer=model.window_b_initializer) abk_hats = tf.nn.xw_plus_b(out_cell0, window_w, window_b) # abk_hats ~ [?,n_out] = "alpha, beta, kappa hats" abk = tf.exp(tf.reshape(abk_hats, [-1, 3*kmixtures,1])) alpha, beta, kappa = tf.split(abk, 3, 1) # alpha_hat, etc ~ [?,kmixtures] kappa = kappa + prev_kappa return alpha, beta, kappa # each ~ [?,kmixtures,1] """ Explanation: In the cell above we use the TensorFlow seq2seq API to create three LSTM cells. Learn about how LSTMs work here. When we unroll the model in time, it will look like this (arrows denote flow of data) <img src="static/model_unrolled.png" alt="Handwriting model unrolled" style="width: 80%;"/> We've already built the computational graph for the first LSTM cell. Our next task is to build the attention mechanism Build the attention mechanism The attention mechanism performs a Gaussian convolution over a one-hot encoding of the input text using a mixture of Gaussians. Its final output is a soft window into the one-hot encoding of the character the model thinks it is drawing. When we stack these soft windows vertically over time, we get a heatmap like this: <img src="static/onehot_window.png" alt="One-hot window" style="width: 50%;"/> Don't worry about how the mixture of Gaussians works together - we'll go over this in more detail later. For now, focus on building an intuition for how the parameters $\alpha$, $\beta$, and $\kappa$ affect the window's behavior. These three parameters control the character window $w_t$ according to $$(\hat \alpha_t,\hat \beta_t, \hat \kappa_t)=W_{h^1 p}h_t^1+b_p$$ Each of these parameters are outputs from a dense layer on top of the first LSTM which we then transform according to: $$\alpha_t=\exp (\hat \alpha_t) \quad \quad \beta_t=\exp (\hat \beta_t) \quad \quad \kappa_t= \kappa_{t-1} + \exp (\hat \kappa_t)$$ From these parameters we can construct the window as a convolution: $$w_t=\sum_{u=1}^U \phi(t,u)c_u \quad \quad \phi(t,u)= \sum_{k=1}^K \alpha_t^k \exp \left( -\beta_t^k(\kappa_t^k-u)^2 \right)$$ End of explanation """ model.init_kappa = tf.placeholder(dtype=tf.float32, shape=[None, args.kmixtures, 1]) model.char_seq = tf.placeholder(dtype=tf.float32, shape=[None, model.ascii_steps, model.char_vec_len]) wavg_prev_kappa = model.init_kappa prev_window = model.char_seq[:,0,:] #add gaussian window result reuse = False for i in range(len(outs_cell0)): [alpha, beta, new_kappa] = get_window_params(i, outs_cell0[i], args.kmixtures, wavg_prev_kappa, reuse=reuse) window, phi = get_window(alpha, beta, new_kappa, model.char_seq) outs_cell0[i] = tf.concat((outs_cell0[i],window), 1) #concat outputs outs_cell0[i] = tf.concat((outs_cell0[i],inputs[i]), 1) #concat input data # prev_kappa = new_kappa #tf.ones_like(new_kappa, dtype=tf.float32, name="prev_kappa_ones") # wavg_prev_kappa = tf.reduce_mean( new_kappa, reduction_indices=1, keep_dims=True) # mean along kmixtures dimension reuse = True model.window = window #save the last window (for generation) model.phi = phi #save the last window (for generation) model.new_kappa = new_kappa #save the last window (for generation) model.alpha = alpha #save the last window (for generation) model.wavg_prev_kappa = wavg_prev_kappa """ Explanation: Alex Graves gives a really nice intuition for the roles of $\alpha$, $\beta$, and $\kappa$ in his paper: "Intuitively, the $\kappa_t$ parameters control the location of the window, the $\beta_t$ parameters control the width of the window and the $\alpha_t$ parameters control the importance of the window within the mixture." Now that we've defined how the attention mechanism works, we need to loop through each of LSTM 1's outputs states and use them as inputs to the attention mechanism. Then, we simply concatenate the outputs of the attention mechanism to the LSTM's state vector. While we're at it, we concatenate the original pen stroke data for good measure. The modified LSTM 1 state vector looks like this: <img src="static/modified_lstm1_state.png" alt="Motified LSTM 1 state vector" style="width: 75%;"/> End of explanation """ # ----- finish building second recurrent cell outs_cell1, model.fstate_cell1 = tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell0, model.istate_cell1, model.cell1, \ loop_function=None, scope='cell1') #use scope from training # ----- finish building third recurrent cell outs_cell2, model.fstate_cell2 = tf.contrib.legacy_seq2seq.rnn_decoder(outs_cell1, model.istate_cell2, model.cell2, \ loop_function=None, scope='cell2') out_cell2 = tf.reshape(tf.concat(outs_cell2, 1), [-1, args.rnn_size]) #concat outputs for efficiency """ Explanation: Build LSTMs 2 and 3 End of explanation """ #put a dense cap on top of the rnn cells (to interface with the mixture density network) n_out = 1 + args.nmixtures * 6 # params = end_of_stroke + 6 parameters per Gaussian with tf.variable_scope('mdn_dense'): output_w = tf.get_variable("output_w", [args.rnn_size, n_out], initializer=model.graves_initializer) output_b = tf.get_variable("output_b", [n_out], initializer=model.graves_initializer) output = tf.nn.xw_plus_b(out_cell2, output_w, output_b) #data flows through dense nn """ Explanation: Build Mixture Density Network (MDN) The core idea is to have a network that predicts an entire distribution. Here we are predicting a mixture of Gaussians distributions by estimating their the means and covariances with the output from a dense neural network. In effect, the network will be able to estimate its own uncertainty. When the target is noisy it will predict diffuse distributions and where the target is really likely it will predict a peaky distribution. If the following plot represents the target data space, then the MDN will fit Gaussian distributions as shown. Since we are drawing from a mixture of Gaussians, we use make the network predict $\pi$, which defines how likely a given point was drawn from each Gaussian in the mixture. <img src="static/MDN.png" alt="Mixture of Gaussians" style="width: 50%;"/> The $\pi$ values in the image are likely values for the red dot. The probability that it came from the first distribution is really unlikely, but the probablities are pretty evenly balanced between the second two First, build the dense NN End of explanation """ # ----- build mixture density cap on top of second recurrent cell def gaussian2d(x1, x2, mu1, mu2, s1, s2, rho): # define gaussian mdn (eq 24, 25 from http://arxiv.org/abs/1308.0850) x_mu1 = tf.subtract(x1, mu1) x_mu2 = tf.subtract(x2, mu2) Z = tf.square(tf.div(x_mu1, s1)) + \ tf.square(tf.div(x_mu2, s2)) - \ 2*tf.div(tf.multiply(rho, tf.multiply(x_mu1, x_mu2)), tf.multiply(s1, s2)) rho_square_term = 1-tf.square(rho) power_e = tf.exp(tf.div(-Z,2*rho_square_term)) regularize_term = 2*np.pi*tf.multiply(tf.multiply(s1, s2), tf.sqrt(rho_square_term)) gaussian = tf.div(power_e, regularize_term) return gaussian """ Explanation: Next, define a 2D Gaussian using tensor operations A 2D gaussian looks like $\mathcal{N}(x|\mu,\sigma,\rho)=\frac{1}{2\pi\sigma_1\sigma_2\sqrt(1-\rho^2)}exp\left[\frac{-Z}{2(1-\rho^2)}\right]$ where $Z=\frac{(x_1-\mu_1)^2}{\sigma_1^2}+\frac{(x_2-\mu_2)^2}{\sigma_2^2}-\frac{2\rho(x_1-\mu_1)(x_2-\mu_2)}{\sigma_1\sigma_2}$ End of explanation """ # now transform dense NN outputs into params for MDN def get_mdn_coef(Z): # returns the tf slices containing mdn dist params (eq 18...23 of http://arxiv.org/abs/1308.0850) eos_hat = Z[:, 0:1] #end of sentence tokens pi_hat, mu1_hat, mu2_hat, sigma1_hat, sigma2_hat, rho_hat = tf.split(Z[:, 1:], 6, 1) model.pi_hat, model.sigma1_hat, model.sigma2_hat = \ pi_hat, sigma1_hat, sigma2_hat # these are useful for biasing eos = tf.sigmoid(-1*eos_hat) # technically we gained a negative sign pi = tf.nn.softmax(pi_hat) # softmax z_pi: mu1 = mu1_hat; mu2 = mu2_hat # leave mu1, mu2 as they are sigma1 = tf.exp(sigma1_hat); sigma2 = tf.exp(sigma2_hat) # exp for sigmas rho = tf.tanh(rho_hat) # tanh for rho (squish between -1 and 1) return [eos, pi, mu1, mu2, sigma1, sigma2, rho] """ Explanation: Finally, transform the NN outputs into parameters for a mixture of Gaussians The gaussian mixture density network parameters are $$e_t=\frac{1}{1+\exp(\hat e_t)} \quad \quad \pi_t^j=\frac{\exp(\hat \pi_t^j)}{\sum_{j'=1}^M\exp(\hat \pi_t^{j'})} \quad \quad \mu_t^j=\hat \mu_t^j \quad \quad \sigma_t^j=\exp(\hat \sigma_t^j) \quad \quad \rho_t^j=\tanh(\hat \rho_t^j)$$ End of explanation """ # reshape target data (as we did the input data) flat_target_data = tf.reshape(model.target_data,[-1, 3]) [x1_data, x2_data, eos_data] = tf.split(flat_target_data, 3, 1) #we might as well split these now [model.eos, model.pi, model.mu1, model.mu2, model.sigma1, model.sigma2, model.rho] = get_mdn_coef(output) """ Explanation: Now we just map the outputs from the dense NN to the Gaussian mixture parameters. End of explanation """ model.sess = tf.InteractiveSession() model.saver = tf.train.Saver(tf.global_variables()) model.sess.run(tf.global_variables_initializer()) load_was_success = True # yes, I'm being optimistic global_step = 0 try: save_dir = '/'.join(args.save_path.split('/')[:-1]) ckpt = tf.train.get_checkpoint_state(save_dir) load_path = ckpt.model_checkpoint_path model.saver.restore(model.sess, load_path) except: print("no saved model to load. starting new session") load_was_success = False else: print("loaded model: {}".format(load_path)) model.saver = tf.train.Saver(tf.global_variables()) global_step = int(load_path.split('-')[-1]) """ Explanation: Side note: loss We don't actually need to calculate loss in order to generate handwriting, but it is necessary for training the model. I'll provide the equations from the original paper but omit the code. If you want to see how to implement them in TensorFlow, check out the model.py file in this Github project. We define in terms of the MDN parameters and the target data as follows: $$ \mathcal{L}(x)=\sum_{t=1}^{T} -log\left(\sum_{j} \pi_t^j\mathcal{N}(x_{t+1}|\mu_t^j,\sigma_t^j,\rho_t^j) \right) -\left{ \begin{array}{ll} \log e_t & (x_{t+1})_3=1\ \log(1-e_t) & \quad \mathrm{otherwise} \end{array} \right. $$ Load saved model Now that the entire model is built, we can start a session and try to load weights from a saved model. Look on this project's Github page for instructions on how to download a pretrained model. End of explanation """ # utility function for converting input ascii characters into vectors the network can understand. # index position 0 means "unknown" def to_one_hot(s, ascii_steps, alphabet): steplimit=3e3; s = s[:3e3] if len(s) > 3e3 else s # clip super-long strings seq = [alphabet.find(char) + 1 for char in s] if len(seq) >= ascii_steps: seq = seq[:ascii_steps] else: seq = seq + [0]*(ascii_steps - len(seq)) one_hot = np.zeros((ascii_steps,len(alphabet)+1)) one_hot[np.arange(ascii_steps),seq] = 1 return one_hot """ Explanation: Generate handwriting! End of explanation """ def get_style_states(model, args): with open(os.path.join(args.data_dir, 'styles.p'),'rb') as f: style_strokes, style_strings = pickle.load(f, encoding='latin1') style_strokes, style_string = style_strokes[args.style], style_strings[args.style] style_onehot = [to_one_hot(style_string, model.ascii_steps, args.alphabet)] c0, c1, c2 = model.istate_cell0.c.eval(), model.istate_cell1.c.eval(), model.istate_cell2.c.eval() h0, h1, h2 = model.istate_cell0.h.eval(), model.istate_cell1.h.eval(), model.istate_cell2.h.eval() if args.style is -1: return [c0, c1, c2, h0, h1, h2] #model 'chooses' random style style_stroke = np.zeros((1, 1, 3), dtype=np.float32) style_kappa = np.zeros((1, args.kmixtures, 1)) prime_len = 500 # must be <= 700 for i in xrange(prime_len): style_stroke[0][0] = style_strokes[i,:] feed = {model.input_data: style_stroke, model.char_seq: style_onehot, model.init_kappa: style_kappa, \ model.istate_cell0.c: c0, model.istate_cell1.c: c1, model.istate_cell2.c: c2, \ model.istate_cell0.h: h0, model.istate_cell1.h: h1, model.istate_cell2.h: h2} fetch = [model.wavg_prev_kappa, \ model.fstate_cell0.c, model.fstate_cell1.c, model.fstate_cell2.c, model.fstate_cell0.h, model.fstate_cell1.h, model.fstate_cell2.h] [style_kappa, c0, c1, c2, h0, h1, h2] = model.sess.run(fetch, feed) return [c0, c1, c2, np.zeros_like(h0), np.zeros_like(h1), np.zeros_like(h2)] #only the c vectors should be primed """ Explanation: Prime the model This is a function that is meant to 'prime' the model on a particular style of handwriting. Graves does this in his paper but I was unable to replicate his success. Currently, when I prime the network I get signs of that style, but the output becomes far more messy and unpredictable End of explanation """ # initialize some sampling parameters one_hot = [to_one_hot(args.text, model.ascii_steps, args.alphabet)] # convert input string to one-hot vector [c0, c1, c2, h0, h1, h2] = get_style_states(model, args) # get numpy zeros states for all three LSTMs kappa = np.zeros((1, args.kmixtures, 1)) # attention's read head starts at index 0 prev_x = np.asarray([[[0, 0, 1]]], dtype=np.float32) # start with a pen stroke at (0,0) strokes, pis, windows, phis, kappas = [], [], [], [], [] # the data we're going to generate will go here """ Explanation: Prepare feed parameters for the model When you ask TensorFlow to find the values in one or more of its tensors, you pass these tensors in as a list. This is your fetch list. TensorFlow looks through the computational graph you've built and figures out all of the inputs it needs in order to calculate values for the tensors in the fetch list. It takes these inputs as a feed dictionary, where the keys are TensorFlow entry-point variables (called Placeholders) and the values are numpy inputs. In the next cell, we'll collect all the numpy values we'll need to make the feed dictionary when we evaluate the model End of explanation """ def sample_gaussian2d(mu1, mu2, s1, s2, rho): mean = [mu1, mu2] cov = [[s1*s1, rho*s1*s2], [rho*s1*s2, s2*s2]] x = np.random.multivariate_normal(mean, cov, 1) return x[0][0], x[0][1] finished = False ; i = 0 while not finished and i < 800: feed = {model.input_data: prev_x, model.char_seq: one_hot, model.init_kappa: kappa, \ model.istate_cell0.c: c0, model.istate_cell1.c: c1, model.istate_cell2.c: c2, \ model.istate_cell0.h: h0, model.istate_cell1.h: h1, model.istate_cell2.h: h2} fetch = [model.pi_hat, model.mu1, model.mu2, model.sigma1_hat, model.sigma2_hat, model.rho, model.eos, \ model.window, model.phi, model.new_kappa, model.wavg_prev_kappa, model.alpha, \ model.fstate_cell0.c, model.fstate_cell1.c, model.fstate_cell2.c,\ model.fstate_cell0.h, model.fstate_cell1.h, model.fstate_cell2.h] [pi_hat, mu1, mu2, sigma1_hat, sigma2_hat, rho, eos, window, phi, kappa, wavg_kappa, alpha, \ c0, c1, c2, h0, h1, h2] = model.sess.run(fetch, feed) #bias stuff: sigma1 = np.exp(sigma1_hat - args.bias) sigma2 = np.exp(sigma2_hat - args.bias) pi_hat *= 1 + args.bias # apply bias pi = np.zeros_like(pi_hat) # need to preallocate pi[0] = np.exp(pi_hat[0]) / np.sum(np.exp(pi_hat[0]), axis=0) # softmax # choose a component from the MDN idx = np.random.choice(pi.shape[1], p=pi[0]) eos = 1 if args.eos_prob < eos[0][0] else 0 # use 0.5 as arbitrary boundary x1, x2 = sample_gaussian2d(mu1[0][idx], mu2[0][idx], sigma1[0][idx], sigma2[0][idx], rho[0][idx]) # store the info at this time step windows.append(window) phis.append(phi[0]) kappas.append(kappa[0]) pis.append(pi[0]) strokes.append([mu1[0][idx], mu2[0][idx], sigma1[0][idx], sigma2[0][idx], rho[0][idx], eos]) # test if finished (has the read head seen the whole ascii sequence?) main_kappa_idx = np.where(alpha[0]==np.max(alpha[0])); # choose the read head with the highes alpha value finished = True if kappa[0][main_kappa_idx] > len(args.text) + 1 else False # new input is previous output prev_x[0][0] = np.array([x1, x2, eos], dtype=np.float32) kappa = wavg_kappa i+=1 windows = np.vstack(windows) phis = np.vstack(phis) kappas = np.vstack(kappas) pis = np.vstack(pis) strokes = np.vstack(strokes) # the network predicts the displacements between pen points, so do a running sum over the time dimension strokes[:,:2] = np.cumsum(strokes[:,:2], axis=0) """ Explanation: Sample from the model Each loop is a full pass through the model. The input for the model at time t $i_t$ is the output from the model at time t-1 $o_{t-1}$ End of explanation """ # plots parameters from the attention mechanism def window_plots(phis, windows): plt.figure(figsize=(16,4)) plt.subplot(121) plt.title('Phis', fontsize=20) plt.xlabel("ascii #", fontsize=15) plt.ylabel("time steps", fontsize=15) plt.imshow(phis, interpolation='nearest', aspect='auto', cmap=cm.jet) plt.subplot(122) plt.title('Soft attention window', fontsize=20) plt.xlabel("one-hot vector", fontsize=15) plt.ylabel("time steps", fontsize=15) plt.imshow(windows, interpolation='nearest', aspect='auto', cmap=cm.jet) window_plots(phis, windows) """ Explanation: Plots End of explanation """ plt.figure(figsize=(8,4)) plt.title("How MDN $\pi$ values change over time", fontsize=15) plt.xlabel("$\pi$ values", fontsize=15) plt.ylabel("time step", fontsize=15) plt.imshow(pis, interpolation='nearest', aspect='auto', cmap=cm.jet) """ Explanation: Phis: A time series plot of the window's position. The vertical axis is time (descending) and the horizontal axis is the sequence of ascii characters that the model is drawing. One-hot windows: A time series of one-hot encodings produced by the attention mechanism. Again, the vertical axis is time. The horizontal axis what the model sees when it looks through the soft window. Note that the bright stripes in the first plot are the model’s way of encoding the end of a pen stroke. We never hard-coded this behavior! End of explanation """ def gauss_plot(strokes, title, figsize = (20,2)): plt.figure(figsize=figsize) import matplotlib.mlab as mlab buff = 1 ; epsilon = 1e-4 minx, maxx = np.min(strokes[:,0])-buff, np.max(strokes[:,0])+buff miny, maxy = np.min(strokes[:,1])-buff, np.max(strokes[:,1])+buff delta = abs(maxx-minx)/400. ; x = np.arange(minx, maxx, delta) y = np.arange(miny, maxy, delta) X, Y = np.meshgrid(x, y) Z = np.zeros_like(X) for i in range(strokes.shape[0]): gauss = mlab.bivariate_normal(X, Y, mux=strokes[i,0], muy=strokes[i,1], \ sigmax=strokes[i,2], sigmay=strokes[i,3], sigmaxy=0) # sigmaxy=strokes[i,4] gives error Z += gauss/(np.max(gauss) + epsilon) plt.title(title, fontsize=20) plt.imshow(Z) gauss_plot(strokes, "Stroke probability", figsize = (2*model.ascii_steps,4)) """ Explanation: Mixture Density Network $\pi$ probabilities: Each element of the pis vector corresponds to the probability that the model will sample a point from that Gaussian component. Notice that the model has learned to draw from different distributions depending on what letters/strokes it is drawing End of explanation """ # plots the stroke data (handwriting!) def line_plot(strokes, title, figsize = (20,2)): plt.figure(figsize=figsize) eos_preds = np.where(strokes[:,-1] == 1) eos_preds = [0] + list(eos_preds[0]) + [-1] #add start and end indices for i in range(len(eos_preds)-1): start = eos_preds[i]+1 stop = eos_preds[i+1] plt.plot(strokes[start:stop,0], strokes[start:stop,1],'b-', linewidth=2.0) #draw a stroke plt.title(title, fontsize=20) plt.gca().invert_yaxis() plt.show() line_plot(strokes, 'Line plot: "{}"'.format(args.text), figsize=(model.ascii_steps,2)) """ Explanation: Stroke probability: The model as learned to predict diffuse distributions at the beginning of pen strokes and peaky distributions in the middle (where uncertainty in the next point's location is much lower) End of explanation """
antoniomezzacapo/qiskit-tutorial
community/aqua/artificial_intelligence/svm_classical.ipynb
apache-2.0
from datasets import * from qiskit_aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name from qiskit_aqua.input import get_input_instance from qiskit_aqua import run_algorithm """ Explanation: SVM with a classical RBF kernel We have shown here a QSVM_Kernel notebook with the classification problem solved using a quantum algorithm. By comparison this shows the problem solved classically. This notebook shows the SVM implementation based on the classical RBF kernel. End of explanation """ n = 2 # dimension of each data point training_dataset_size = 20 testing_dataset_size = 10 sample_Total, training_input, test_input, class_labels = ad_hoc_data(training_size=training_dataset_size, test_size=testing_dataset_size, n=n, gap=0.3, PLOT_DATA=True) datapoints, class_to_label = split_dataset_to_data_and_labels(test_input) print(class_to_label) """ Explanation: First we prepare the dataset, which is used for training, testing and the finally prediction. Note: You can easily switch to a different dataset, such as the Breast Cancer dataset, by replacing 'ad_hoc_data' to 'Breast_cancer' below. End of explanation """ params = { 'problem': {'name': 'svm_classification'}, 'algorithm': { 'name': 'SVM' } } algo_input = get_input_instance('SVMInput') algo_input.training_dataset = training_input algo_input.test_dataset = test_input algo_input.datapoints = datapoints[0] # 0 is data, 1 is labels """ Explanation: With the dataset ready we initialize the necessary inputs for the algorithm: - the input dictionary (params) - the input object containing the dataset info (algo_input). End of explanation """ result = run_algorithm(params, algo_input) print("kernel matrix during the training:") kernel_matrix = result['kernel_matrix_training'] img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r') plt.show() print("testing success ratio: ", result['testing_accuracy']) print("predicted classes:", result['predicted_classes']) """ Explanation: With everything setup, we can now run the algorithm. For the testing, the result includes the details and the success ratio. For the prediction, the result includes the predicted labels. End of explanation """ sample_Total, training_input, test_input, class_labels = Breast_cancer(training_size=20, test_size=10, n=2, PLOT_DATA=True) # n =2 is the dimension of each data point datapoints, class_to_label = split_dataset_to_data_and_labels(test_input) label_to_class = {label:class_name for class_name, label in class_to_label.items()} print(class_to_label, label_to_class) algo_input = get_input_instance('SVMInput') algo_input.training_dataset = training_input algo_input.test_dataset = test_input algo_input.datapoints = datapoints[0] result = run_algorithm(params, algo_input) # print(result) print("kernel matrix during the training:") kernel_matrix = result['kernel_matrix_training'] img = plt.imshow(np.asmatrix(kernel_matrix),interpolation='nearest',origin='upper',cmap='bone_r') plt.show() print("testing success ratio: ", result['testing_accuracy']) print("ground truth: {}".format(map_label_to_class_name(datapoints[1], label_to_class))) print("predicted: {}".format(result['predicted_classes'])) """ Explanation: The breast cancer dataset Now we run our algorithm with the real-world dataset: the breast cancer dataset End of explanation """
ProfessorKazarinoff/staticsite
content/code/ENGR213/Problem_4C2.ipynb
gpl-3.0
d = 351 tf = 9.78 tw = 6.86 bf = 171 ys = 300 E = 200*10**3 #Elastic modulus in MPa """ Explanation: Problem 4.C2 in Beer and Johnson Below is an engineering mechanics problem that can be solved with Python. Follow along to see how to solve the problem with code. Problem Given: An I-beam (also called a W-shape for wide-flange shape) below with dimension $d$, $t_f$, $t_w$ and $b_f$. The $y$-axis runs vertically down though the center of the beam. The $x$-axis run horizontally through the center of the beam. The beam is symmetric about the $x$ and $y$ axes. yield strength $\sigma_y = 300 \ MPa$ elastic modulus $E = 200 \ GPa$ Find: (a) For values of $y_y$ from $\frac{1}{2}d$ to $\frac{1}{6}d$ in increments of $\frac{1}{2}t_f$, calculate the bending moment $M$ and the radius of curvature $\rho$ (b) Using the dimensions below, determine the bending moment $M$ and radius of curvature of the beam $\rho$ if the plastic zones at the top and bottom of the beam are $40 \ mm$ thick. $d = 140 \ mm$ $t_f = 10 \ mm$ $t_w = 10 \ mm$ $b_f = 120 \ mm$ Assume: We will assume that for part (a), we have a standard W360 $\times$ 44 metric beam with the following measurements: $d = 351 \ mm$ $t_f = 9.78 \ mm$ $t_w = 6.86 \ mm$ $b_f = 171 \ mm$ Solution Start the solution: install Python We are going to use Python to code the solution to this problem. If you don't already have Python installed on your computer, I recommend installing the Anaconda distribution of Python. See this post to learn how to install Anaconda on your computer. I am coding this solution in a Jupyter Notebook. Once you install Anaconda, you can open a Jupyter notebook from the Windows Start Menu or the Anaconda Prompt. See this post to learn about 3 ways to open a Jupyter notebook. Alternatively, instead of using a Jupyter notebook, you could code your solution in a .py file. Alright. Let's get coding.... Define variables based on our assumption about the size of the beam. Based on our assumption of a standard W360 $\times$ 44 metric beam, and the given elastic modulus and yield strength given in the problem, we can define a number of constants. $d = 351 \ mm$ $t_f = 9.78 \ mm$ $t_w = 6.86 \ mm$ $b_f = 171 \ mm$ yield strength $\sigma_y = 300 \ MPa$ elastic modulus $E = 200 \ GPa$ End of explanation """ A1 = tf*bf # area of the top rectangle d1 = d/2 - tf/2 # distance between the centroid of the top rectangle and the x-axis I1 = (1/12)*bf*tf**3 + A1*d1**2 A2 = tw*(d-2*tf) # area of the middle rectangle d2 = 0 # the centroid of the middle rectangle is on the x-axis I2 = (1/12)*tw*(d-2*tf)**3 + A2*d2**2 A3 = tf*bf # area of the bottom rectangle d3 = d/2 - tf/2 # distance between the centroid of the bottom rectangle and the x-axis I3 = (1/12)*bf*tf**3 + A3*d3**2 # sum the individual moments of inertia to caclulate the moment of inertia of the entire beam I = I1 + I2 + I3 print(f"The moment of inertia I = {I} mm4") """ Explanation: Compute the moment of inertia, I, based on the bean cross-section Next we will compute the moment of inertia, $I$ based on the beam cross-section. The beam is made of three rectangular shapes. We will call the upper horizontal plate rectangle 1, the middle vertical plate rectangle 2, and the bottom horizontal plate rectangle 3. The moment of inertia, $I_x$, of a rectangular shape is equal to: $I_x = \frac{1}{12}bh^3 + Ad^2$ where $b$ is the width of the rectangle base, $h$ is the rectangle height, $A$ is the rectangle area, and $d$ is the distance between the centroid of the rectangle to the axis we are trying to find the moment of inertia about (in our case the x-axis). We have three rectangles: The top, middle, and bottom of the beam. We will call the component each of these rectangles contributes to the total moment of inertia I1, I2, and I3. After the components of the total moment of inertia are calculated, they can be summed together to produce the total moment of inertia $I$. $I = I_1 + I_2 + I_3$ The Python code below completes these operations. At the end of the code section, a Python f-string is used to print out the value of $I$ after it is calculated. Make sure prepend f-strings with the letter f before the quotation marks. End of explanation """ My = ys*(I)/(d/2) print(f"The maximum elastic moment My = {My} N mm") """ Explanation: Calculate the Maximum elastic moment, My After the moment of inertia, $I$, is calculated, the next step to solve this problem is to compute the maximum elastic moment, $M_y$. The maximum bending moment is limited by the yield strength, $\sigma_y$ of the material. The equation for maximum elastic moment $M_y$ is below: $M_y = \sigma_y\frac{I_x}{d/2}$ Where $\sigma_y$ is the yield strength of the material, $I_x$ is the moment of inertia and $d$ is the full-height of the beam. The code below completes this calculation. A Python f-string is used to print out the calculated $M_y$ value. The units of $M_y$ come out to be $N \cdot mm$ End of explanation """ c = d/2 print(f"the maximum value of y is c = {c} mm") """ Explanation: Calculate the maximum value of y, called c Since the entire beam is assumed to be elastoplastic, we need to calculate the maximum value of $y$, which is typically denoted $c$. Since our $x$-axis runs runs horizontally through the center of the beam, the maximum value of $y$ is half of the total height of the beam $ c = d/2 $ Coding this calculation is pretty simple End of explanation """ yy=d/2 p = yy*E/ys print("The radius of curvature if the yield strength is only reached") print(f"at the very end of the beam (y_y=d/2) is p = {p} mm") """ Explanation: Calculate the radius of curvature, p, for a Yy value equal to c Now let's find the radius of curvature $\rho$ if we just reach the yield stress, $\sigma_y$ at the very top of the beam. The distance up the beam relative to the neutral axis where the yield strength is reached will be called $y_y$. If the yield strength is only reached at the very top of the beam, that means that $y_y = c = d/2$. For a rectangular elastoplastic material, the distribution of strain across the section remains linear after the onset of yield. Strain is dependent on the bend radius $\rho$ and how far away from the neutral axis the strain occurs $y_y$. $$ \epsilon = \frac{y_y}{\rho} $$ $$ y_y = \epsilon\rho $$ Now take the standard definition of elastic modulus $E$, stress $\sigma$ over strain $\epsilon$ and rearrange it to solve for strain $\epsilon$ in terms of $E$ and $\sigma$. $$ E = \frac{\sigma}{\epsilon} $$ $$ \epsilon = \frac{\sigma}{E} $$ We can multiply both sides of the equation above by $\rho$. $$ \epsilon\rho = \frac{\sigma}{E}\rho $$ From above $y_y = \epsilon\rho$. Therefore, we can substitute $y_y$ in for $\epsilon\rho$ on the left-hand side of the equation. $$ y_y = \frac{\sigma}{E}\rho $$ Now we can solve the equation above for $\rho$ in terms of $y_y$, $E$, and $\sigma$. $$ \rho = \frac{y_yE}{\sigma} $$ This gives us an equation for radius of curvature $\rho$ in terms of parameters that we know, elastic modulus $E$, yield strength $\sigma_y$ and location in the beam compared to the neutral axis where plastic elongation starts to occur $y_y$. Remember from before: yield strength $\sigma_y = 300 \ MPa$ elastic modulus $E = 200 \ GPa$ And we are going to calculate the radius of curvature $\rho$ when the yield strength is only reached at the very top of the beam. $y_y=c=d/2$ This can be coded in Python is just a few lines End of explanation """ import numpy as np for yy in np.arange((1/2)*d, (1/6)*d + (1/2)*tf, -(1/2)*tf): p = yy*E/ys print(f"The radius of curvature at y_y = {round(yy,2)} mm is p = {round(p,1)} mm") """ Explanation: Iterate through Yy values and calculate radius of curvature p for each Yy We can iterate through the $y_y$ distances (the distance compared to the neutral axis where plastic deformation begins and the stress in the beam has reached the yield strength) and calculate the radius of curvature $\rho$ for each $y_y$ distance. In Python, this can be accomplished with a for loop. The code below iterates through $y_y$ values from $\frac{1}{2}d$ to $\frac{1}{6}d$ in increments of $\frac{1}{2}t_f$ and prints the resulting radius of curvature $\rho$ for each $y_y$ value. Remember that Python counting starts at zero and ends at n-1, so we need to tack on an extra $\frac{1}{2}t_f$ to the stop value of our np.arange() function. Since the $y_y$ values are not integers, we need to use NumPy and NumPy's np.arange() function instead of Python's build-in range() function. Make sure import numpy as np before calling np.arange(). End of explanation """
wilomaku/IA369Z
dev/Autoencoderxclass.ipynb
gpl-3.0
## Functions import sys,os import copy path = os.path.abspath('../dev/') if path not in sys.path: sys.path.append(path) import bib_mri as FW import numpy as np import scipy as scipy import scipy.misc as misc import matplotlib as mpl import matplotlib.pyplot as plt from numpy import genfromtxt import platform import torch from torch.autograd import Variable import torch.nn as nn import torch.nn.functional as F %matplotlib inline def sign_extract(seg, resols): #Function for shape signature extraction splines = FW.get_spline(seg,smoothness) sign_vect = np.array([]).reshape(0,points) #Initializing temporal signature vector for resol in resols: sign_vect = np.vstack((sign_vect, FW.get_profile(splines, n_samples=points, radius=resol))) return sign_vect def sign_fit(sig_ref, sig_fit): #Function for signature fitting dif_curv = [] for shift in range(points): dif_curv.append(np.abs(np.sum((sig_ref - np.roll(sig_fit[0],shift))**2))) return np.apply_along_axis(np.roll, 1, sig_fit, np.argmin(dif_curv)) print "Python version: ", platform.python_version() print "Numpy version: ", np.version.version print "Scipy version: ", scipy.__version__ print "Matplotlib version: ", mpl.__version__ """ Explanation: Corpus callosum's shape signature for segmentation error detection in large datasets Abstract Corpus Callosum (CC) is a subcortical, white matter structure with great importance in clinical and research studies because its shape and volume are correlated with subject's characteristics and neurodegenerative diseases. CC segmentation is a important step for any medical, clinical or research posterior study. Currently, magnetic resonance imaging (MRI) is the main tool for evaluating brain because it offers the better soft tissue contrast. Particullary, segmentation in MRI difussion modality has great importante given information associated to brain microstruture and fiber composition. In this work a method for detection of erroneous segmentations in large datasets is proposed based-on shape signature. Shape signature is obtained from segmentation, calculating curvature along contour using a spline formulation. A mean correct signature is used as reference for compare new segmentations through root mean square error. This method was applied to 152 subject dataset for three different segmentation methods in diffusion: Watershed, ROQS and pixel-based presenting high accuracy in error detection. This method do not require per-segmentation reference and it can be applied to any MRI modality and other image aplications. End of explanation """ #Loading labeled segmentations seg_label = genfromtxt('../../dataset/Seg_Watershed/watershed_label.csv', delimiter=',').astype('uint8') list_masks = seg_label[np.logical_or(seg_label[:,1] == 0, seg_label[:,1] == 1), 0] #Extracting segmentations list_labels = seg_label[np.logical_or(seg_label[:,1] == 0, seg_label[:,1] == 1), 1] #Extracting labels ind_ex_err = list_masks[np.where(list_labels)[0]] ind_ex_cor = list_masks[np.where(np.logical_not(list_labels))[0]] print "Mask List", list_masks print "Label List", list_labels print "Correct List", ind_ex_cor print "Erroneous List", ind_ex_err mask_correct = np.load('../../dataset/Seg_Watershed/mask_wate_{}.npy'.format(ind_ex_cor[10])) mask_error = np.load('../../dataset/Seg_Watershed/mask_wate_{}.npy'.format(ind_ex_err[10])) plt.figure() plt.axis('off') plt.imshow(mask_correct,'gray',interpolation='none') plt.title("Correct segmentation example") plt.show() plt.figure() plt.axis('off') plt.imshow(mask_error,'gray',interpolation='none') plt.title("Erroneous segmentation example") plt.show() """ Explanation: Introduction The Corpus Callosum (CC) is the largest white matter structure in the central nervous system that connects both brain hemispheres and allows the communication between them. The CC has great importance in research studies due to the correlation between shape and volume with some subject's characteristics, such as: gender, age, numeric and mathematical skills and handedness. In addition, some neurodegenerative diseases like Alzheimer, autism, schizophrenia and dyslexia could cause CC shape deformation. CC segmentation is a necessary step for morphological and physiological features extraction in order to analyze the structure in image-based clinical and research applications. Magnetic Resonance Imaging (MRI) is the most suitable image technique for CC segmentation due to its ability to provide contrast between brain tissues however CC segmentation is challenging because of the shape and intensity variability between subjects, volume partial effect in diffusion MRI, fornex proximity and narrow areas in CC. Among the known MRI modalities, Diffusion-MRI arouses special interest to study the CC, despite its low resolution and high complexity, since it provides useful information related to the organization of brain tissues and the magnetic field does not interfere with the diffusion process itself. Some CC segmentation approaches using Diffusion-MRI were found in the literature. Niogi et al. proposed a method based on thresholding, Freitas et al. e Rittner et al. proposed region methods based on Watershed transform, Nazem-Zadeh et al. implemented based on level surfaces, Kong et al. presented an clustering algorithm for segmentation, Herrera et al. segmented CC directly in diffusion weighted imaging (DWI) using a model based on pixel classification and Garcia et al. proposed a hybrid segmentation method based on active geodesic regions and level surfaces. With the growing of data and the proliferation of automatic algorithms, segmentation over large databases is affordable. Therefore, error automatic detection is important in order to facilitate and speed up filter on CC segmentation databases. presented proposals for content-based image retrieval (CBIR) using shape signature of the planar object representation. In this work, a method for automatic detection of segmentation error in large datasets is proposed based on CC shape signature. Signature offers shape characterization of the CC and therefore it is expected that a "typical correct signature" represents well any correct segmentation. Signature is extracted measuring curvature along segmentation contour. The method was implemented in three main stages: mean correct signature generation, signature configuration and method testing. The first one takes 20 corrects segmentations and generates one correct signature of reference (typical correct signature), per-resolution, using mean values in each point. The second stage stage takes 10 correct segmentations and 10 erroneous segmentations and adjusts the optimal resolution and threshold, based on mean correct signature, that lets detection of erroneous segmentations. The third stage labels a new segmentation as correct and erroneous comparing with the mean signature using optimal resolution and threshold. <img src="../figures/workflow.png"> The comparison between signatures is done using root mean square error (RMSE). True label for each segmentation was done visually. Correct segmentation corresponds to segmentations with at least 50% of agreement with the structure. It is expected that RMSE for correct segmentations is lower than RMSE associated to erroneous segmentation when compared with a typical correct segmentation. End of explanation """ smoothness = 700 #Smoothness degree = 5 #Spline degree fit_res = 0.35 resols = np.arange(0.01,0.5,0.01) #Signature resolutions resols = np.insert(resols,0,fit_res) #Insert resolution for signature fitting points = 500 #Points of Spline reconstruction prof_vec = np.empty((len(list_masks),resols.shape[0],points)) #Initializing correct signature vector for ind, mask in enumerate(list_masks): #Loading correct mask mask_pn = np.load('../../dataset/Seg_Watershed/mask_wate_{}.npy'.format(mask)) refer_temp = sign_extract(mask_pn, resols) #Function for shape signature extraction prof_vec[ind] = refer_temp if mask > 0: #Fitting curves using the first one as basis prof_ref = prof_vec[0] prof_vec[ind] = sign_fit(prof_ref[0], refer_temp) #Function for signature fitting ind_rel_cor = np.where(np.logical_not(list_labels))[0] ind_rel_err = np.where(list_labels)[0] print "Correct segmentations' vector: ", prof_vec[ind_rel_cor].shape print "Erroneous segmentations' vector: ", prof_vec[ind_rel_err].shape print(ind_rel_cor.shape) print(ind_ex_cor.shape) res_ex = 15 #for ind_ex, ind_rel in zip(ind_ex_cor, ind_rel_cor): # plt.figure() # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) # ax1.plot(prof_vec[ind_rel,res_ex,:].T) # ax1.set_title("Signature %i at res: %f"%(ind_ex, resols[res_ex])) # # mask_correct = np.load('../../dataset/Seg_Watershed/mask_wate_{}.npy'.format(ind_ex)) # ax2.axis('off') # ax2.imshow(mask_correct,'gray',interpolation='none') # # plt.show() plt.figure() plt.plot(prof_vec[ind_rel_cor,res_ex,:].T) plt.title("Correct signatures for res: %f"%(resols[res_ex])) plt.show() plt.figure() plt.plot(prof_vec[ind_rel_err,res_ex,:].T) plt.title("Erroneous signatures for res: %f"%(resols[res_ex])) plt.show() """ Explanation: Shape signature for comparison Signature is a shape descriptor that measures the rate of variation along the segmentation contour. As shown in figure, the curvature $k$ in the pivot point $p$, with coordinates ($x_p$,$y_p$), is calculated using the next equation. This curvature depict the angle between the segments $\overline{(x_{p-ls},y_{p-ls})(x_p,y_p)}$ and $\overline{(x_p,y_p)(x_{p+ls},y_{p+ls})}$. These segments are located to a distance $ls>0$, starting in a pivot point and finishing in anterior and posterior points, respectively. The signature is obtained calculating the curvature along all segmentation contour. \begin{equation} \label{eq:per1} k(x_p,y_p) = \arctan\left(\frac{y_{p+ls}-y_p}{x_{p+ls}-x_p}\right)-\arctan\left(\frac{y_p-y_{p-ls}}{x_p-x_{p-ls}}\right) \end{equation} <img src="../figures/curvature.png"> Signature construction is performed from segmentation contour of the CC. From contour, spline is obtained. Spline purpose is twofold: to get a smooth representation of the contour and to facilitate calculation of the curvature using its parametric representation. The signature is obtained measuring curvature along spline. $ls$ is the parametric distance between pivot point and both posterior and anterior points and it determines signature resolution. By simplicity, $ls$ is measured in percentage of reconstructed spline points. In order to achieve quantitative comparison between two signatures root mean square error (RMSE) is introduced. RMSE measures distance, point to point, between signatures $a$ and $b$ along all points $p$ of signatures. \begin{equation} \label{eq:per4} RMSE = \sqrt{\frac{1}{P}\sum_{p=1}^{P}(k_{ap}-k_{bp})^2} \end{equation} Frequently, signatures of different segmentations are not fitted along the 'x' axis because of the initial point on the spline calculation starts in different relative positions. This makes impossible to compare directly two signatures and therefore, a prior fitting process must be accomplished. The fitting process is done shifting one of the signature while the other is kept fixed. For each shift, RMSE between the two signatures is measured. The point giving the minor error is the fitting point. Fitting was done at resolution $ls = 0.35$. This resolution represents globally the CC's shape and eases their fitting. After fitting, RMSE between signatures can be measured in order to achieve final quantitative comparison. Signature for segmentation error detection For segmentation error detection, a typical correct signature is obtained calculating mean over a group of signatures from correct segmentations. Because of this signature could be used in any resolution, $ls$ must be chosen for achieve segmentation error detection. The optimal resolution must be able to return the greatest RMSE difference between correct and erroneous segmentation when compared with a typical correct signature. In the optimal resolution, a threshold must be chosen for separate erroneous and correct segmentations. This threshold stays between RMSE associated to correct ($RMSE_E$) and erroneous ($RMSE_C$) signatures and it is given by the next equation where N (in percentage) represents proximity to correct or erroneous RMSE. If RMSE calculated over a group of signatures, mean value is applied. \begin{equation} \label{eq:eq3} th = N*(\overline{RMSE_E}-\overline{RMSE_C})+\overline{RMSE_C} \end{equation} Experiments and results In this work, comparison of signatures through RMSE is used for segmentation error detection in large datasets. For this, it will be calculated a mean correct signature based on 20 correct segmentation signatures. This mean correct signature represents a tipycal correct segmentation. For a new segmentation, signature is extracted and compared with mean signature. For experiments, DWI from 152 subjects at the University of Campinas, were acquired on a Philips scanner Achieva 3T in the axial plane with a $1$x$1mm$ spatial resolution and $2mm$ slice thickness, along $32$ directions ($b-value=1000s/mm^2$, $TR=8.5s$, and $TE=61ms$). All data used in this experiment was acquired through a project approved by the research ethics committee from the School of Medicine at UNICAMP. From each acquired DWI volume, only the midsaggital slice was used. Three segmentation methods were implemented to obtained binary masks over a 152 subject dataset: Watershed, ROQS and pixel-based. 40 Watershed segmentations were chosen as follows: 20 correct segmentations for mean correct signature generation and 10 correct and 10 erroneous segmentations for signature configuration stage. Watershed was chosen to generate and adjust the mean signature because of its higher error rate and its variability in the erroneous segmentation shape. These characteristics allow improve generalization. The method was tested on the remaining Watershed segmentations (108 masks) and two additional segmentations methods: ROQS (152 masks) and pixel-based (152 masks). Mean correct signature generation In this work, segmentations based on Watershed method were used for implementation of the first and second stages. From the Watershed dataset, 20 correct segmentations were chosen. Spline for each one was obtained from segmentation contour. The contour was obtained using mathematical morphology, applying xor logical operation, pixel-wise, between original segmentation and the eroded version of itself by an structuring element b: \begin{equation} \label{eq:per2} G_E = XOR(S,S \ominus b) \end{equation} From contour, it is calculated spline. The implementation, is a B-spline (Boor's basic spline). This formulation has two parameters: degree, representing polynomial degrees of the spline, and smoothness, being the trade off between proximity and smoothness in the fitness of the spline. Degree was fixed in 5 allowing adequate representation of the contour. Smoothness was fixed in 700. This value is based on the mean quantity of pixels of the contour that are passed for spline calculation. The curvature was measured over 500 points over the spline to generate the signature along 20 segmentations. Signatures were fitted to make possible comparison (Fig. signatures). Fitting resolution was fixed in 0.35. In order to get a representative correct signature, mean signature per-resolution is generated using 20 correct signatures. The mean is calculated in each point. Signature configuration Because of the mean signature was extracted for all the resolutions, it is necessary to find resolution in that diference between RMSE for correct signature and RMSE for erroneous signature is maximum. So, 20 news segmentations were used to find this optimal resolution, being divided as 10 correct segmentations and 10 erroneous segmentations. For each segmentation, it was extracted signature for all resolutions. End of explanation """ def train(model,train_loader,loss_fn,optimizer,epochs=100,patience=5,criteria_stop="loss"): hist_train_loss = hist_val_loss = hist_train_acc = hist_val_acc = np.array([]) best_epoch = patience_count = 0 print("Training starts along %i epoch"%epochs) for e in range(epochs): correct_train = correct_val = total_train = total_val = 0 cont_i = loss_t_e = loss_v_e = 0 for data_train in train_loader: var_inputs = Variable(data_train) predict, encode = model(var_inputs) loss = loss_fn(predict, var_inputs.view(-1, 500)) loss_t_e += loss.data[0] optimizer.zero_grad() loss.backward() optimizer.step() cont_i += 1 #Stacking historical hist_train_loss = np.hstack((hist_train_loss, loss_t_e/(cont_i*1.0))) print('Epoch: ', e, 'train loss: ', hist_train_loss[-1]) if(e == epochs-1): best_epoch = e best_model = copy.deepcopy(model) print("Training stopped") patience_count += 1 return(best_model, hist_train_loss, hist_val_loss) class autoencoder(nn.Module): def __init__(self): super(autoencoder, self).__init__() self.fc1 = nn.Linear(500, 200) self.fc21 = nn.Linear(200, 2) self.fc3 = nn.Linear(2, 200) self.fc4 = nn.Linear(200, 500) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def encode(self, x): h1 = self.relu(self.fc1(x)) return self.fc21(h1) def decode(self, z): h3 = self.relu(self.fc3(z)) return self.sigmoid(self.fc4(h3)) def forward(self, x): z = self.encode(x.view(-1, 500)) return self.decode(z), z class decoder(nn.Module): def __init__(self): super(decoder, self).__init__() self.fc3 = nn.Linear(2, 200) self.fc4 = nn.Linear(200, 500) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def decode(self, z): h3 = self.relu(self.fc3(z)) return self.sigmoid(self.fc4(h3)) def forward(self, x): return self.decode(x.view(-1, 2)) net = autoencoder() print(net) res_chs = res_ex trainloader = prof_vec[:,res_chs,:] val_norm = np.amax(trainloader).astype(float) print val_norm trainloader = trainloader / val_norm trainloader = torch.FloatTensor(trainloader) print trainloader.size() loss_fn = torch.nn.MSELoss() optimizer = torch.optim.Adam(net.parameters()) epochs = 20 patience = 5 max_batch = 64 criteria = "loss" best_model, loss, loss_test = train(net, trainloader, loss_fn, optimizer, epochs = epochs, patience = patience, criteria_stop = criteria) plt.title('Loss') plt.xlabel('epochs') plt.ylabel('loss') plt.plot(loss, label='Train') plt.legend() plt.show() decode, encode = net(Variable(trainloader)) out_decod = decode.data.numpy() out_encod = encode.data.numpy() print(out_decod.shape, out_encod.shape, list_labels.shape) plt.figure(figsize=(7, 6)) plt.scatter(out_encod[:,0], out_encod[:,1], c=list_labels) plt.show() """ Explanation: Autoencoder End of explanation """ #Loading labeled segmentations seg_label = genfromtxt('../../dataset/Seg_ROQS/roqs_label.csv', delimiter=',').astype('uint8') list_masks = seg_label[np.logical_or(seg_label[:,1] == 0, seg_label[:,1] == 1), 0] #Extracting segmentations list_labels = seg_label[np.logical_or(seg_label[:,1] == 0, seg_label[:,1] == 1), 1] #Extracting labels ind_ex_err = list_masks[np.where(list_labels)[0]] ind_ex_cor = list_masks[np.where(np.logical_not(list_labels))[0]] prof_vec_roqs = np.empty((len(list_masks),resols.shape[0],points)) #Initializing correct signature vector for ind, mask in enumerate(list_masks): mask_pn = np.load('../../dataset/Seg_ROQS/mask_roqs_{}.npy'.format(mask)) #Loading mask refer_temp = sign_extract(mask_pn, resols) #Function for shape signature extraction prof_vec_roqs[ind] = sign_fit(prof_ref[0], refer_temp) #Function for signature fitting using Watershed as basis ind_rel_cor = np.where(np.logical_not(list_labels))[0] ind_rel_err = np.where(list_labels)[0] print "Correct segmentations' vector: ", prof_vec_roqs[ind_rel_cor].shape print "Erroneous segmentations' vector: ", prof_vec_roqs[ind_rel_err].shape #for ind_ex, ind_rel in zip(ind_ex_err, ind_rel_err): # plt.figure() # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) # ax1.plot(prof_vec_roqs[ind_rel,res_ex,:].T) # ax1.set_title("Signature %i at res: %f"%(ind_ex, resols[res_ex])) # # mask_correct = np.load('../../dataset/Seg_ROQS/mask_roqs_{}.npy'.format(ind_ex)) # ax2.axis('off') # ax2.imshow(mask_correct,'gray',interpolation='none') # # plt.show() plt.figure() plt.plot(prof_vec_roqs[ind_rel_cor,res_ex,:].T) plt.title("Correct signatures for res: %f"%(resols[res_ex])) plt.show() plt.figure() plt.plot(prof_vec_roqs[ind_rel_err,res_ex,:].T) plt.title("Erroneous signatures for res: %f"%(resols[res_ex])) plt.show() trainloader = prof_vec_roqs[:,res_chs,:] trainloader = trainloader / val_norm trainloader = torch.FloatTensor(trainloader) print trainloader.size() decode, encode = net(Variable(trainloader)) out_decod = decode.data.numpy() out_encod = encode.data.numpy() print(out_decod.shape, out_encod.shape, list_labels.shape) plt.figure(figsize=(7, 6)) plt.scatter(out_encod[:,0], out_encod[:,1], c=list_labels) plt.show() """ Explanation: Testing in new datasets ROQS test End of explanation """ #Loading labeled segmentations seg_label = genfromtxt('../../dataset/Seg_pixel/pixel_label.csv', delimiter=',').astype('uint8') list_masks = seg_label[np.logical_or(seg_label[:,1] == 0, seg_label[:,1] == 1), 0] #Extracting segmentations list_labels = seg_label[np.logical_or(seg_label[:,1] == 0, seg_label[:,1] == 1), 1] #Extracting labels ind_ex_err = list_masks[np.where(list_labels)[0]] ind_ex_cor = list_masks[np.where(np.logical_not(list_labels))[0]] prof_vec_pixe = np.empty((len(list_masks),resols.shape[0],points)) #Initializing correct signature vector for ind, mask in enumerate(list_masks): mask_pn = np.load('../../dataset/Seg_pixel/mask_pixe_{}.npy'.format(mask)) #Loading mask refer_temp = sign_extract(mask_pn, resols) #Function for shape signature extraction prof_vec_pixe[ind] = sign_fit(prof_ref[0], refer_temp) #Function for signature fitting using Watershed as basis ind_rel_cor = np.where(np.logical_not(list_labels))[0] ind_rel_err = np.where(list_labels)[0] print "Correct segmentations' vector: ", prof_vec_pixe[ind_rel_cor].shape print "Erroneous segmentations' vector: ", prof_vec_pixe[ind_rel_err].shape #for ind_ex, ind_rel in zip(ind_ex_cor, ind_rel_cor): # plt.figure() # f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) # ax1.plot(prof_vec_pixe[ind_rel,res_ex,:].T) # ax1.set_title("Signature %i at res: %f"%(ind_ex, resols[res_ex])) # # mask_correct = np.load('../../dataset/Seg_pixel/mask_pixe_{}.npy'.format(ind_ex)) # ax2.axis('off') # ax2.imshow(mask_correct,'gray',interpolation='none') # # plt.show() plt.figure() plt.plot(prof_vec_pixe[ind_rel_cor,res_ex,:].T) plt.title("Correct signatures for res: %f"%(resols[res_ex])) plt.show() plt.figure() plt.plot(prof_vec_pixe[ind_rel_err,res_ex,:].T) plt.title("Erroneous signatures for res: %f"%(resols[res_ex])) plt.show() trainloader = prof_vec_pixe[:,res_chs,:] trainloader = trainloader / val_norm trainloader = torch.FloatTensor(trainloader) print trainloader.size() decode, encode = net(Variable(trainloader)) out_decod = decode.data.numpy() out_encod = encode.data.numpy() print(out_decod.shape, out_encod.shape, list_labels.shape) plt.figure(figsize=(7, 6)) plt.scatter(out_encod[:,0], out_encod[:,1], c=list_labels) plt.show() """ Explanation: Pixel-based test End of explanation """
google-research/google-research
micronet_challenge/EfficientNetCounting.ipynb
apache-2.0
# Copyright 2019 MicroNet Challenge Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License atte # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Explanation: Copyright 2019 MicroNet Challenge Authors. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ # Download the official EfficientNet implementation and add an init file to # the EfficientNet module s.t. we can use the model builders for our counting. %%bash test -d tpu || git clone https://github.com/tensorflow/tpu tpu && mv tpu/models/official/efficientnet/* ./ test -d gresearch || git clone https://github.com/google-research/google-research gresearch && mv gresearch/micronet_challenge ./ import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) from micronet_challenge import counting import efficientnet_builder import efficientnet_model """ Explanation: Counting the Parameters and Operations for EfficientNet Here is the plan 1. Create an instance of tf.keras.Model implementation of EfficientNet using create_model(). 2. Extract the operations manually from the compiled model into the framework-agnostic API defined in micronet_challenge.counting using `read_model() 3. Using the operations in the given list, we print total parameter count using micronet_challenge.counting.MicroNetCounter() class. Let's start with creating the model and running an input of ones through. End of explanation """ """Creates and read the operations of EfficientNet instances. """ DEFAULT_INPUT_SIZES = { # (width_coefficient, depth_coefficient, resolution) 'efficientnet-b0': 224, 'efficientnet-b1': 240, 'efficientnet-b2': 260, 'efficientnet-b3': 300, 'efficientnet-b4': 380, 'efficientnet-b5': 456, 'efficientnet-b6': 528, 'efficientnet-b7': 600} def create_model(model_name, input_shape=None): """Creates and reads operations from the given model. Args: model_name: str, one of the DEFAULT_INPUT_SIZES.keys() input_shape: str or None, if None will be read from the dictionary. Returns: list, of operations. """ if input_shape is None: input_size = DEFAULT_INPUT_SIZES[model_name] input_shape = (1, input_size, input_size, 3) blocks_args, global_params = efficientnet_builder.get_model_params( model_name, None) print('global_params= %s' % str(global_params)) print('blocks_args= %s' % str('\n'.join(map(str, blocks_args)))) tf.reset_default_graph() with tf.variable_scope(model_name): model = efficientnet_model.Model(blocks_args, global_params) # This will initialize the variables. _ = model(tf.ones((input_shape))) return model, input_shape model_name = 'efficientnet-b0' model, input_shape = create_model(model_name) """ Explanation: 1. Creating the Model End of explanation """ #@title Reading Utils # assumes everything square # returns number of pixels for which a convolution is calculated def read_block(block, input_size, f_activation='swish'): """Reads the operations on a single EfficientNet block. Args: block: efficientnet_model.MBConvBlock, input_shape: int, square image assumed. f_activation: str or None, one of 'relu', 'swish', None. Returns: list, of operations. """ ops = [] # 1 l_name = '_expand_conv' if hasattr(block, l_name): layer = getattr(block, l_name) layer_temp = counting.Conv2D( input_size, layer.kernel.shape.as_list(), layer.strides, layer.padding, True, f_activation) # Use bias true since batch_norm ops.append((l_name, layer_temp)) # 2 l_name = '_depthwise_conv' layer = getattr(block, l_name) layer_temp = counting.DepthWiseConv2D( input_size, layer.weights[0].shape.as_list(), layer.strides, layer.padding, True, f_activation) # Use bias true since batch_norm ops.append((l_name, layer_temp)) # Input size might have changed. input_size = counting.get_conv_output_size( image_size=input_size, filter_size=layer_temp.kernel_shape[0], padding=layer_temp.padding, stride=layer_temp.strides[0]) # 3 if block._has_se: se_reduce = getattr(block, '_se_reduce') se_expand = getattr(block, '_se_expand') # Kernel has the input features in its second dimension. n_channels = se_reduce.kernel.shape.as_list()[2] ops.append(('_se_reduce_mean', counting.GlobalAvg(input_size, n_channels))) # input size is 1 layer_temp = counting.Conv2D( 1, se_reduce.kernel.shape.as_list(), se_reduce.strides, se_reduce.padding, True, f_activation) ops.append(('_se_reduce', layer_temp)) layer_temp = counting.Conv2D( 1, se_expand.kernel.shape.as_list(), se_expand.strides, se_expand.padding, True, 'sigmoid') ops.append(('_se_expand', layer_temp)) ops.append(('_se_scale', counting.Scale(input_size, n_channels))) # 4 l_name = '_project_conv' layer = getattr(block, l_name) layer_temp = counting.Conv2D( input_size, layer.kernel.shape.as_list(), layer.strides, layer.padding, True, None) # Use bias true since batch_norm, no activation ops.append((l_name, layer_temp)) if (block._block_args.id_skip and all(s == 1 for s in block._block_args.strides) and block._block_args.input_filters == block._block_args.output_filters): ops.append(('_skip_add', counting.Add(input_size, n_channels))) return ops, input_size def read_model(model, input_shape, f_activation='swish'): """Reads the operations on a single EfficientNet block. Args: model: efficientnet_model.Model, input_shape: int, square image assumed. f_activation: str or None, one of 'relu', 'swish', None. Returns: list, of operations. """ # Ensure that the input run through model _ = model(tf.ones(input_shape)) input_size = input_shape[1] # Assuming square ops = [] # 1 l_name = '_conv_stem' layer = getattr(model, l_name) layer_temp = counting.Conv2D( input_size, layer.weights[0].shape.as_list(), layer.strides, layer.padding, True, f_activation) # Use bias true since batch_norm ops.append((l_name, layer_temp)) # Input size might have changed. input_size = counting.get_conv_output_size( image_size=input_size, filter_size=layer_temp.kernel_shape[0], padding=layer_temp.padding, stride=layer_temp.strides[0]) # Blocks for idx, block in enumerate(model._blocks): block_ops, input_size = read_block(block, input_size, f_activation=f_activation) ops.append(('block_%d' % idx, block_ops)) # Head l_name = '_conv_head' layer = getattr(model, l_name) layer_temp = counting.Conv2D( input_size, layer.weights[0].shape.as_list(), layer.strides, layer.padding, True, f_activation) # Use bias true since batch_norm n_channels_out = layer.weights[0].shape.as_list()[-1] ops.append((l_name, layer_temp)) ops.append(('_avg_pooling', counting.GlobalAvg(input_size, n_channels_out))) l_name = '_fc' layer = getattr(model, l_name) ops.append(('_fc', counting.FullyConnected( layer.kernel.shape.as_list(), True, None))) return ops F_ACTIVATION = 'swish' all_ops = read_model(model, input_shape, f_activation=F_ACTIVATION) print('\n'.join(map(str, all_ops))) """ Explanation: 2. Extracting Operations We assume 'same' padding with square images/conv kernels. batchnorm scales are not counted since they can be merged. Bias added for each batch norm applied on a layer's output. f_activation can be changed to one of the followin relu or swish. End of explanation """ # add_bits_base=32, since 32 bit adds count 1 add. # mul_bits_base=32, since multiplications with 32 bit input count 1 multiplication. counter = counting.MicroNetCounter(all_ops, add_bits_base=32, mul_bits_base=32) # Constants INPUT_BITS = 16 ACCUMULATOR_BITS = 32 PARAMETER_BITS = INPUT_BITS SUMMARIZE_BLOCKS = True counter.print_summary(0, PARAMETER_BITS, ACCUMULATOR_BITS, INPUT_BITS, summarize_blocks=SUMMARIZE_BLOCKS) counter.print_summary(0.1, PARAMETER_BITS, ACCUMULATOR_BITS, INPUT_BITS, summarize_blocks=SUMMARIZE_BLOCKS) counter.print_summary(0.5, PARAMETER_BITS, ACCUMULATOR_BITS, INPUT_BITS, summarize_blocks=SUMMARIZE_BLOCKS) counter.print_summary(0.9, PARAMETER_BITS, ACCUMULATOR_BITS, INPUT_BITS, summarize_blocks=SUMMARIZE_BLOCKS) """ Explanation: 3. Counting Let's define some constants need for counting. INPUT_BITS used for the inputs of the multiplication. ACCUMULATOR_BITS used for the accumulator of the additions. PARAMETER_BITS used to store individual parameter: which is equal to INPUT_BITS for simplicity here. IS_DEBUG, if True, reports the individual operations of a single block in addition to aggregations. Sparsity is applied on convolutional layers and fully connected layers and reduces number of multiplies and adds of a vector product. Sparsity mask is defined as a binary mask and added to the total parameter count. End of explanation """
ThomasProctor/Slide-Rule-Data-Intensive
TaxicabProject/Code/Feature Selection.ipynb
mit
import pandas as pd import sqlalchemy as sqla import numpy as np #import matplotlib import matplotlib.pyplot as plt import statsmodels.api as sm #%matplotlib qt %matplotlib inline engine = sqla.create_engine('postgresql://postgres:postgres@localhost:5432/TaxiData',echo=False) columntypelist=pd.read_sql_query("SELECT column_name, data_type FROM information_schema.columns WHERE table_name = 'lotsofdata';", engine) columntypelist.set_index('column_name',inplace=True) columntypelist.groupby(['data_type'])['data_type'].count() numbercolumns=columntypelist[(columntypelist['data_type']=='double precision')|(columntypelist['data_type']=='bigint')|(columntypelist['data_type']=='integer')|(columntypelist.index=='fipscodes')]['data_type'] numbercolumns.index=numbercolumns.index.str.strip() numbercolumns=numbercolumns[numbercolumns.index.str.find(' ')==-1].index.tolist() len(numbercolumns) #columnstring="'"+"', '".join(numbercolumns)+"'" columnstring='"'+'", "'.join(numbercolumns)+'"' full=pd.read_sql_query('SELECT '+columnstring + ' FROM lotsofdata',engine).set_index('fipscodes') full=full[full['totalpopulation']>=1000] Yname='twentythirteen_full_count_pc' dropoffitems=['abridged2013ycdrpoffpc','counts','abridged2013ycdrpoff',\ 'driver_income_standard_dev_resid', 'twentythirteen_full_count_pc',\ 'time_dif_derived_approxcount_error', 'time_dif_derived_approxcount',\ 'twentythirteen_full_count', 'driver_income_anscombe_resid'] full.drop([i for i in dropoffitems if i is not Yname],axis=1,inplace=True) full.replace(np.inf, np.nan,inplace=True) full.dropna(axis='columns',how='all',inplace=True) full.dropna(inplace=True) full.drop_duplicates(inplace=True) full=full.T.drop_duplicates(keep='last').T pcfull=full #pcfull.replace({'totalpopulation':{0:np.nan}},inplace=True) #pcfull.dropna().shape Y=pcfull[Yname] pcfull=pcfull.divide(full['totalpopulation'],axis='index') #These were already per-capita type data columns, or shouldn't be per-capita #pcfull['nondrivercommuterrat']=((pcfull['MOGE001']-pcfull['MOGE011'])/pcfull['MOGE001']) pcfull['MRUE001']=full['MRUE001'] pcfull['MRUM001']=full['MRUM001'] pcfull['totalpopulation']=full['totalpopulation'] pcfull['boro_int_code']=full['boro_int_code'] pcfull['nondrivercomrat']=full['nondrivercomrat'] """ Explanation: Data Prep End of explanation """ import os.path codebookpath=os.path.expanduser('~/Documents/TaxiTripData/TIGERFiles/nhgis_codebooks') def searchcodebook(code,path=codebookpath): import subprocess command='grep -r -h -m 1 '+code+' '+path+'*' try: grepstring=subprocess.check_output(command,shell=True) return grepstring[grepstring.find(code)+len(code)+1:grepstring.find('\r')].strip() except subprocess.CalledProcessError: return code codebookdict=pcfull.columns.to_series().apply(searchcodebook) codebookdict[((codebookdict.str.find('capita'))!=-1)] """ Explanation: The census/ACS data columns all are given a 7 letter/number code. The data comes with "code books", text files with descriptions of the data and what the codes mean. The function below grabs the one-line descriptions that go with each code, giving a nice, brief description of what each code means. End of explanation """ from sklearn import feature_selection featureselect=feature_selection.SelectKBest(feature_selection.f_regression) featureselect.fit(pcfull.drop(Yname,axis=1),Y) pcfeaturescores=pd.Series(featureselect.pvalues_) pcfeaturescores.index=pcfull.drop(Yname,axis=1).columns pcfeaturescores.sort_values(ascending=True,inplace=True) #codebookdict[pcfeaturescores.iloc[:10].index].tolist() codebookdict[pcfeaturescores.iloc[:10].index] """ Explanation: That makes sure I didn't miss any per-capita data columns that I didn't know about. Feature Selection for all the data End of explanation """ correlm=pcfull[pcfeaturescores.iloc[:3].index.tolist()+[Yname,'MOJE016']] #correlm['logWalked']=np.log(correlm['MOGE101']) correlm['logIncome']=np.log(correlm['MRUE001']) correlm['logDropOffs']=np.log(correlm[Yname]) corr_matrix = np.corrcoef(correlm.T) sm.graphics.plot_corr(corr_matrix, xnames=correlm.columns.tolist(),cmap=plt.cm.get_cmap('viridis')) plt.show() """ Explanation: The 4th letter in the codes correspond to whether or not the code is an estimate of the value (E) or a margin of error for that estimate (M). Apparently the margin of error for per capita income, MRUM001, is better correlated with drop-offs per capita than the estimate itself. Since it seems drop-offs have a pretty significant power relationship to income and are not linear, I'm not about to look into that too deeply. Also, apparently the number of commuters who walk to work is pretty predictive too. However, it does look like the feature that I first had the instinct to look at, per-capita income, is relatively predictive. End of explanation """
quoniammm/mine-tensorflow-examples
gan/gan_mnist/Intro_to_GANs_Solution.ipynb
mit
%matplotlib inline import pickle as pkl import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data') """ Explanation: Generative Adversarial Network In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits! GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out: Pix2Pix CycleGAN A whole list The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator. The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator. The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow. End of explanation """ def model_inputs(real_dim, z_dim): inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real') inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z') return inputs_real, inputs_z """ Explanation: Model Inputs First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks. End of explanation """ def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('generator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(z, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) # Logits and tanh output logits = tf.layers.dense(h1, out_dim, activation=None) out = tf.tanh(logits) return out """ Explanation: Generator network Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values. Variable Scope Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks. We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again. To use tf.variable_scope, you use a with statement: python with tf.variable_scope('scope_name', reuse=False): # code here Here's more from the TensorFlow documentation to get another look at using tf.variable_scope. Leaky ReLU TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x: $$ f(x) = max(\alpha * x, x) $$ Tanh Output The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1. End of explanation """ def discriminator(x, n_units=128, reuse=False, alpha=0.01): with tf.variable_scope('discriminator', reuse=reuse): # Hidden layer h1 = tf.layers.dense(x, n_units, activation=None) # Leaky ReLU h1 = tf.maximum(alpha * h1, h1) logits = tf.layers.dense(h1, 1, activation=None) out = tf.sigmoid(logits) return out, logits """ Explanation: Discriminator The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer. End of explanation """ # Size of input image to discriminator input_size = 784 # Size of latent vector to generator z_size = 100 # Sizes of hidden layers in generator and discriminator g_hidden_size = 128 d_hidden_size = 128 # Leak factor for leaky ReLU alpha = 0.01 # Smoothing smooth = 0.1 """ Explanation: Hyperparameters End of explanation """ tf.reset_default_graph() # Create our input placeholders input_real, input_z = model_inputs(input_size, z_size) # Build the model g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha) # g_model is the generator output d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha) d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha) """ Explanation: Build network Now we're building the network from the functions defined above. First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z. Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes. Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True). End of explanation """ # Calculate losses d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_real))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake))) """ Explanation: Discriminator and Generator Losses Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like python tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)) For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth) The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that. Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images. End of explanation """ # Optimizers learning_rate = 0.002 # Get the trainable_variables, split into G and D parts t_vars = tf.trainable_variables() g_vars = [var for var in t_vars if var.name.startswith('generator')] d_vars = [var for var in t_vars if var.name.startswith('discriminator')] d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars) """ Explanation: Optimizers We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph. For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). We can do something similar with the discriminator. All the variables in the discriminator start with discriminator. Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list. End of explanation """ batch_size = 100 epochs = 100 samples = [] losses = [] # Only save generator variables saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for ii in range(mnist.train.num_examples//batch_size): batch = mnist.train.next_batch(batch_size) # Get images, reshape and rescale to pass to D batch_images = batch[0].reshape((batch_size, 784)) batch_images = batch_images*2 - 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size)) # Run optimizers _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z}) _ = sess.run(g_train_opt, feed_dict={input_z: batch_z}) # At the end of each epoch, get the losses and print them out train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images}) train_loss_g = g_loss.eval({input_z: batch_z}) print("Epoch {}/{}...".format(e+1, epochs), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # Save losses to view after training losses.append((train_loss_d, train_loss_g)) # Sample from generator as we're training for viewing afterwards sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) samples.append(gen_samples) saver.save(sess, './checkpoints/generator.ckpt') # Save training generator samples with open('train_samples.pkl', 'wb') as f: pkl.dump(samples, f) """ Explanation: Training End of explanation """ fig, ax = plt.subplots() losses = np.array(losses) plt.plot(losses.T[0], label='Discriminator') plt.plot(losses.T[1], label='Generator') plt.title("Training Losses") plt.legend()plt.legend() """ Explanation: Training loss Here we'll check out the training losses for the generator and discriminator. End of explanation """ def view_samples(epoch, samples): fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True) for ax, img in zip(axes.flatten(), samples[epoch]): ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) im = ax.imshow(img.reshape((28,28)), cmap='Greys_r') return fig, axes # Load samples from generator taken while training with open('train_samples.pkl', 'rb') as f: samples = pkl.load(f) """ Explanation: Generator samples from training Here we can view samples of images from the generator. First we'll look at images taken while training. End of explanation """ _ = view_samples(-1, samples) """ Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make. End of explanation """ rows, cols = 10, 6 fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True) for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes): for img, ax in zip(sample[::int(len(sample)/cols)], ax_row): ax.imshow(img.reshape((28,28)), cmap='Greys_r') ax.xaxis.set_visible(False) ax.yaxis.set_visible(False) """ Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion! End of explanation """ saver = tf.train.Saver(var_list=g_vars) with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) sample_z = np.random.uniform(-1, 1, size=(16, z_size)) gen_samples = sess.run( generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha), feed_dict={input_z: sample_z}) _ = view_samples(0, [gen_samples]) """ Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s. Sampling from the generator We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples! End of explanation """
igor-sokolov/dataminingcapstone
Capstone project 6.ipynb
mit
basePath = 'dataminingcapstone-001' hygienePath = 'Hygiene' workingDir = os.path.join(os.curdir, basePath, hygienePath) reviewsPath = os.path.join(workingDir, 'hygiene.dat') labelsPath = os.path.join(workingDir, 'hygiene.dat.labels') """ Explanation: Task 6: Hygiene Prediction End of explanation """ N = 546 with open(reviewsPath, 'r') as f: data_train = [next(f) for x in xrange(N)] data_pred = [x for x in f] with open(labelsPath, 'r') as f: y_train = [next(f) for x in xrange(N)] print "Train data length: {}".format(len(data_train)) print "Predicted data length: {}".format(len(data_pred)) from sklearn.feature_extraction.text import TfidfVectorizer tfidf_simple = TfidfVectorizer(sublinear_tf=True, max_df=0.8, min_df=2, stop_words='english') X_train = tfidf_simple.fit_transform(data_train) X_test = tfidf_simple.transform(data_pred) from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=100) clf.fit(X_train, y_train) pred = clf.predict(X_test) predictedLabelsPath = os.path.join(workingDir, 'output1.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in pred: f.write(line) """ Explanation: 1. Straightforward solution End of explanation """ s = u'I\'ll be waiting for you! I don\'t mind. What\'s the hell? I\'m gonna home!!' punctuation = """!"#$%&()*+,-./:;<=>?@[\]^_`{|}~""" remove_punctuation_map = dict((ord(char), None) for char in punctuation) s = s.translate(remove_punctuation_map).lower() from nltk.tokenize.stanford import StanfordTokenizer path_to_jar = '/Users/igorsokolov/stanford-postagger-2015-04-20/stanford-postagger-3.5.2.jar' tokens = StanfordTokenizer(path_to_jar=path_to_jar).tokenize(s) print tokens from nltk.stem.porter import PorterStemmer porterStemmer = PorterStemmer() stems = [porterStemmer.stem(item) for item in tokens] print stems """ Explanation: 2. Preprocessing improvements First of all I'll make simple checks a couple cases for chosen preprocessing steps. End of explanation """ # I could use string.punctionation constant but it contains symbol ' which is used widely in casual speak. # So defined my own constant. punctuation = """!"#$%&()*+,-./:;<=>?@[\]^_`{|}~""" remove_punctuation_map = dict((ord(char), None) for char in punctuation) def remove_punctuation(line): return line.translate(remove_punctuation_map).lower() from nltk.tokenize.stanford import StanfordTokenizer path_to_jar = '/Users/igorsokolov/stanford-postagger-2015-04-20/stanford-postagger-3.5.2.jar' tokenizer = StanfordTokenizer(path_to_jar=path_to_jar, options={"americanize": True}) def tokenize(line): return tokenizer.tokenize(line) from nltk.stem.porter import PorterStemmer porterStemmer = PorterStemmer() def stemming(tokens): return [porterStemmer.stem(item) for item in tokens] import re reductions_map = {'\'m': 'am', 'n\'t': 'not', '\'ll': 'will', '\'s': 'is', '\'ve': 'have', '\'d': 'would', '\'re': 'are'} def replace_reductions(line): return reduce(lambda x, y: x.replace(y, reductions_map[y]), reductions_map, line) reviewsRDD = sc.textFile(reviewsPath, use_unicode=True) labelsRDD = sc.textFile(labelsPath) prerocessed_reviews_RDD = (reviewsRDD .map(lambda line: line.lower()) .map(lambda line: line.replace('&#160;', '')) .map(lambda line: remove_punctuation(line)) .map(lambda line: tokenize(line)) .map(lambda tokens: stemming(tokens)) .map(lambda tokens: " ".join(tokens)) .map(lambda line: replace_reductions(line)) ) prerocessed_reviews_RDD.take(1) prerocessed_reviews = prerocessed_reviews_RDD.collect() prerocessed_data_train = prerocessed_reviews[:N] prerocessed_data_pred = prerocessed_reviews[N:] print "Train data length: {}".format(len(prerocessed_data_train)) print "Predicted data length: {}".format(len(prerocessed_data_pred)) from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.8, stop_words='english') prerocessed_X_train = vectorizer.fit_transform(prerocessed_data_train) prerocessed_X_test = vectorizer.transform(prerocessed_data_pred) print("n_samples: %d, n_features: %d" % prerocessed_X_train.shape) from sklearn.linear_model import RidgeClassifier clf = RidgeClassifier(tol=1e-2, solver="lsqr") clf.fit(prerocessed_X_train, y_train) preprocessed_pred = clf.predict(prerocessed_X_test) predictedLabelsPath = os.path.join(workingDir, 'output7.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in preprocessed_pred: f.write(line) """ Explanation: After exercises above we'r ready to implement subroutines for each step of pre-processing. End of explanation """ from sklearn.feature_selection import SelectKBest, chi2 select_chi2 = prerocessed_X_train.shape[1] ch2 = SelectKBest(chi2, k=select_chi2) prerocessed_X_train = ch2.fit_transform(prerocessed_X_train, y_train) prerocessed_X_test = ch2.transform(prerocessed_X_test) from sklearn.naive_bayes import BernoulliNB, MultinomialNB clf = MultinomialNB(alpha=.01) clf.fit(prerocessed_X_train, y_train) bayes_pred = clf.predict(prerocessed_X_test) predictedLabelsPath = os.path.join(workingDir, 'output6.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in bayes_pred: f.write(line) """ Explanation: 3. Attempts to apply other classifiers Chi and naive bayes End of explanation """ from sklearn.linear_model import PassiveAggressiveClassifier clf = PassiveAggressiveClassifier(n_iter=50) clf.fit(prerocessed_X_train, y_train) pred = clf.predict(prerocessed_X_test) predictedLabelsPath = os.path.join(workingDir, 'output8.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in pred: f.write(line) """ Explanation: Passive agressive classifier End of explanation """ from sklearn.naive_bayes import BernoulliNB, MultinomialNB clf = MultinomialNB(alpha=.01) clf.fit(prerocessed_X_train, y_train) pred = clf.predict(prerocessed_X_test) predictedLabelsPath = os.path.join(workingDir, 'output9.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in pred: f.write(line) """ Explanation: Bayes based classifiers End of explanation """ addsPath = os.path.join(workingDir, 'hygiene.dat.additional') addsRDD = sc.textFile(addsPath, use_unicode=True) import re def parseLine(line): t = line.split('"') categories = re.compile("'(\w*)'").findall(t[1]) numbers = t[2].split(',')[1:] zip_code = numbers[0] review_count = int(numbers[1]) rating = float(numbers[2]) return categories, [zip_code, review_count, rating] parseLine("\"['Vietnamese', 'Sandwiches', 'Restaurants']\",98118,4,4.0") additionals = addsRDD.map(lambda x: parseLine(x)).collect() from sklearn.feature_extraction import DictVectorizer dict_vectorizer = DictVectorizer() dict_index_train = [{str(adds[0]): 1} for catetogories, adds in additionals[:N]] dict_index_test = [{str(adds[0]): 1} for catetogories, adds in additionals[N:]] index_X_train = dict_vectorizer.fit_transform(dict_index_train) index_X_test = dict_vectorizer.transform(dict_index_test) from sklearn.feature_extraction import DictVectorizer cat_dict_vectorizer = DictVectorizer() def map_items(categories_list): return {cat: 1 for cat in categories_list} categories_map_train = [map_items(catetogories) for catetogories, adds in additionals[:N]] categories_map_test = [map_items(catetogories) for catetogories, adds in additionals[N:]] categories_map_X_train = cat_dict_vectorizer.fit_transform(categories_map_train) categories_map_X_test = cat_dict_vectorizer.transform(categories_map_test) ratings_train = [[float(x[1][1]), float(x[1][2])] for x in additionals[:N]] ratings_test = [[float(x[1][1]), float(x[1][2])] for x in additionals[N:]] print len(ratings) prerocessed_X_train.toarray() from scipy.sparse import hstack combined_X_train = hstack([prerocessed_X_train, ratings_train, index_X_train, categories_map_X_train]) combined_X_test = hstack([prerocessed_X_test, ratings_test, index_X_test, categories_map_X_test]) print combined_X_train.toarray() from sklearn.linear_model import RidgeClassifier clf = RidgeClassifier(tol=1e-2, solver="lsqr") clf.fit(combined_X_train, y_train) combined_pred = clf.predict(combined_X_test) predictedLabelsPath = os.path.join(workingDir, 'output14.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in combined_pred: f.write(line) from sklearn.svm import SVC svc = SVC(kernel='linear') svc.fit(combined_X_train, y_train) svc_pred = svc.predict(combined_X_test) predictedLabelsPath = os.path.join(workingDir, 'output15.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in svc_pred: f.write(line) """ Explanation: 4. Union text features with additional information End of explanation """ from sklearn.linear_model import RidgeClassifier from sklearn.feature_selection import SelectKBest, chi2, f_classif, f_regression from sklearn.grid_search import GridSearchCV select_chi2 = combined_X_train.shape[1] ridge_pipeline = Pipeline([ ('feature_kbest', SelectKBest(chi2)), #f_classif ('ridge_classifier', RidgeClassifier(tol=1e-2, solver="lsqr")) ]) param_grid = dict(feature_kbest__score_func = [chi2, f_classif], feature_kbest__k=[10, 15, 20, 25, 30, 50, 100, 1000, 10000, 20000, 'all'], ridge_classifier__tol=[1e-8, 1e-6, 1e-4, 1e-2, 1e-1], ridge_classifier__solver=['auto', 'cholesky', 'lsqr', 'sparse_cg'], ridge_classifier__normalize=[True, False]) grid_search = GridSearchCV(ridge_pipeline, param_grid=param_grid) grid_search.fit(combined_X_train, y_train) print(grid_search.best_estimator_) tuned_ridge_pipeline = Pipeline([ ('feature_kbest', SelectKBest(chi2, k=10)), ('ridge_classifier', RidgeClassifier(alpha=1.0, class_weight=None, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, solver='auto', tol=0.0001)) ]) tuned_ridge_pipeline.fit(combined_X_train, y_train) tuned_ridge_pipeline_pred = tuned_ridge_pipeline.predict(combined_X_test) predictedLabelsPath = os.path.join(workingDir, 'output18.txt') with open(predictedLabelsPath, 'w') as f: f.write('sis\n') for line in tuned_ridge_pipeline_pred: f.write(line) """ Explanation: 5. Selection of K best features and fine tuning of classifier End of explanation """
adolfoguimaraes/machinelearning
Projects/02_RecommenderSystem_Movies.ipynb
mit
# Import necessários para esta seção import pandas as pd idx = pd.IndexSlice # Preparando o Dataset links = pd.read_csv("../datasets/movielens/links.csv", index_col=['movieId']) movies = pd.read_csv("../datasets/movielens/movies.csv", sep=",", index_col=['movieId']) ratings = pd.read_csv("../datasets/movielens/ratings.csv", index_col=['userId','movieId']) tags = pd.read_csv("../datasets/movielens/tags.csv", index_col=['userId','movieId']) ratings.head() """ Explanation: Projeto Recommender System: Movies Esse projeto tem como finalidade explorar alguns métodos sobre sistema de recomendação. A proposta é criarmos um sistema de recomendação simples utilizando o algoritmo de Recomendação Colaborativa. O ponto de partida é o artigo Toward the Next Generation of Recommender Systems: A Survey of the State-of-the-Art and Possible Extensions. Vamos implementar uma das propostas de recomendação colaborativa do artigo. Detalhes do método implementado é dado mais a frente. Esse tutorial é dividido em: Dataset Descrição do Método de Recomendação Métodos Auxiliares Sistema De Recomendação Simples Sistema de Recomendação Colaborativo Usando a API do IMDb Atividade do MiniProjeto <a id='dataset'></a> Dataset Para demostrar os algoritmos de recomendação vamos utilizar o dataset da MovieLens. O site possui varias versões do dataset cada qual com um número diferentes de filmes e usuários. Vamos utilizar a versão small deles que é descrita como segue: MovieLens Latest Datasets These datasets will change over time, and are not appropriate for reporting research results. We will keep the download links stable for automated downloads. We will not archive or make available previously released versions. Small: 100,000 ratings and 1,300 tag applications applied to 9,000 movies by 700 users. Last updated 10/2016. A primeira etapa é carregar a base de dados como vários DataFrames do Pandas. Vamos carregar 4 arquivos: links: possui referência do id de cada filme para o id na base do IMDb e na base do TheMovieDb. Essa informação será usada no final para exibir mais informações dos filmes recomendados utilizando as APIs disponibilizadas por estes sites. movies: lista de filmes da base. Cada filme possui o título e uma lista de gêneros associada. ratings: tabela de avaliação de filmes. Cada usuário avalia um filme com uma nota de 1 a 5. É armazenado também o timestamp de cada avaliação. tags: termos associados a cada filme cadastrados pelos usuários. Para este tutorial vamos utilizar somente as três primeiras tabelas. End of explanation """ def get_movies_by_user(id_user, rating_cut=0, list_=False): """Retorna a lista de filmes avaliados por um usuário Keyword arguments: id_user -- id do usuário rating_cut -- retorna só itens avaliados com rating maior que rating_cut (default: 0) list_ -- se True retorna somente os ids dos filmes, se False retorna os ids com o valor do rating (default: False) """ return_dict = {} dict_ = ratings.loc[idx[id_user, :], 'rating'].T.to_dict() for d in dict_: if rating_cut != 0: if dict_[d] >= rating_cut: return_dict[d[1]] = dict_[d] else: return_dict[d[1]] = dict_[d] if list_: return list(return_dict.keys()) return return_dict def get_users_by_movie(id_movie, rating_cut=0, list_=False): """Retorna a lista de usuários que avaliaram determinado filme Keyword arguments: id_movie -- id do filme rating_cut -- retorna só usuários que avaliaram o filme com rating maior que rating_cut (default: 0) list_ -- se True retorna somente os ids dos usuários, se False retorna os ids com o valor do rating """ return_dict = {} dict_ = ratings.loc[idx[:, id_movie],'rating'].T.to_dict() for d in dict_: if rating_cut != 0: if dict_[d] >= rating_cut: return_dict[d[0]] = dict_[d] else: return_dict[d[0]] = dict_[d] if list_: return list(return_dict.keys()) return return_dict def get_rating_by_user_movie(id_user, id_movie): """Retorna o rating que o usuário (id_user) deu para um filme (id_movie). Se não exister, retorna 0.0. Keyword arguments: id_user -- id do usuário id_movie -- id do filme """ rating = 0.0; try: rating = ratings.loc[idx[id_user, id_movie], 'rating'] except KeyError as e: rating = 0.0 return rating def get_all_users(): """Retorna o id de todos os usuários. """ return list(set([x[0] for x in ratings.index.values])) def get_movie_title(id_movie): """Retorna o título de um filme. Keyword arguments: id_movie -- id do filme """ info = movies.loc[idx[id_movie], :] return info['title'] """ Explanation: <a id='descricao'></a> Descrição do Método de Recomendação Como dito anteriormente, vamos utilizar um método de recomendação apresentada no artigo Toward the Next Generation of Recommender Systems: A Survey of the State-of-the-Art and Possible Extensions. Existem vários tipos de sistemas de recomendação, neste tutorial vamos utilizar a recomendação dita colaborativa. Esse titpo de recomendação utiliza a avaliação do usuário (ratings) para recomendar. Os usuários são comparados entre si, utilizando alguma métrica de similaridade, e a recomendação é proposta tomando como base os ratings dos usuários mais semelhantes. A recomendação é medida a partir da predição da nota que usuário daria a um determinado item (predict rating) Duas métricas precisam ser definidas: O cálculo da similaridade de usuários O cálculo do predict rating Vamos utilizar as seguintes equações propostas no artigo: Cáculo da Similaridade $ sim(x,y) = \frac{\sum_{ s \in S_{xy}} { (r_{x,s} - \bar{r_{x}}) (r_{y,s} - \bar{r_{y}}) } } { \sqrt{ \sum_{s \in S_{xy}}{ (r_{x,s} - \bar{r_{x}})^2 } \sum_{s \in S_{xy}}{ (r_{y,s} - \bar{r_{y}})^2 } } } $, onde: $S_x$: itens avaliados pelo usuário $x$; $S_y$: itens avaliados pelo usuário $y$; $S_{xy}$: o conjunto de todos os itens que foram avaliados tanto por x quanto por y, em outras palavras, a interseção dos conjuntos $S_x$ e $S_y$; $r_{x,s}$: rating do usuário $x$ para o item $s$; $r_{y,s}$: rating do usuário $y$ para o item $s$; $\bar{r_x}$: média do ratings dos filmes avaliados por $x$ $\bar{r_y}$: média do ratings dos filmes avaliados por $y$ O cálculo do predict rating Para cada filme da base que o usuário não avaliou é calculado um valor de rating que o usuário daria ao filme. A proposta é calcular isto para todos os filmes da base e recomendar ao usuário os 10 filmes mais bem avaliados. Para este cálculo vamos utilizar a equação: $ r_{c,s} = \bar{r_c} + k * \sum_{c' \in \hat{C}}{sim(c,c') \times (r_{c',s} - \bar{r_{c'}})}$, onde: $c$ e $c'$: são usuários; $s$: um item; $k$: é um fator noramlizador dado por $k = \frac{1}{\sum_{c' \in \hat{C}}{|sim(c, c')|}}$ $sim(c, c'):$ a similaridade do usuário c com o usuário c' dada pela equação anterior; $\hat{C}:$ o conjunto dos $N$ usuários mais similares a $c$ que avaliaram o item $s$. as demais variáveis forma descritas na equação anterior, mudando apenas as letras utilizadas. <a id='metodosauxiliares'></a> Métodos Auxiliares Para facilitar a implementação das duas equações, vamos implementar uma série de métodos auxiliares que vão nos ajudar a extrair as informações da base de dados. O código a seguir implementa tais métodos. O que cada método faz está descrito no início de cada um. End of explanation """ ''' Neste trecho vamos armazenar em memória as informações de filmes avaliados pelos usuários. Isso evitar fazermos muitos acesso a estrutura do DataFrame. ''' all_users = get_all_users() movies_user_true = {} movies_user_false = {} for user in all_users: movies_user_true[user] = get_movies_by_user(user, list_=True) movies_user_false[user] = get_movies_by_user(user, list_=False) """ Explanation: Mesmo com esses métodos algumas operações podem ter um certo custo computacional já que serão chamadas várias vezes. Por exemplo, quando vamos calcular a similaridade de um usuário com todos da base, isso tem um certo custo. Por conta disso, algumas informações serão geradas antes e armazenadas em variáveis na memória. Tais informações serão geradas nas células a seguir. Essas variáveis só serão utilizadas nos métodos em que são utilizadas muitas vezes. End of explanation """ # Usuário para qual a recomendação será apresentada, chamarei de usuário A selected_user = 1 # Filmes com notas 4 e 5 avaliados por este usuário my_movies = get_movies_by_user(selected_user, rating_cut=4, list_=True) # Lista de todos os usuários que avaliaram os filmes assistidos por A all_users = [] for movie in my_movies: all_users = all_users + get_users_by_movie(movie, rating_cut=5, list_=True) # Para eliminar os usuários repetidos, transformamos a lista em um conjunto e depois convertemos novamente em uma lista all_users = list(set(all_users)) # Neste passo, pegamos todos os filmes nota 5 avaliados pelos usuários de all_users all_movies = [] for user in all_users: movies_ = get_movies_by_user(user, rating_cut=5, list_=True) all_movies = all_movies + movies_ # Removemos os filmes repetidos e aqueles já assistidos por A all_movies = list(set(all_movies) - set(my_movies)) # Apresentamos a lista dos filmes print("Foram encontrados: " + str(len(all_movies)) + " filmes") for movie in all_movies: print("\t"+ get_movie_title(movie)) """ Explanation: <a id='rs_simple'></a> Sistema De Recomendação Simples Só para que a gente veja alguns desses métodos funcionando, vamos implementar um "Sistema de Recomendação" bem simples. A proposta é recomendar os filmes avaliados com nota 5 que foram assistidos por usuários que já assistiram os mesmos filmes com nota 4 e 5 do usuário em questão. End of explanation """ # Imports necessários para esta seção import math import numpy as np import operator from sklearn.externals import joblib """ Explanation: Da para perceber que esta não é uma abordagem muito boa. Como é retornado somente filmes com nota 5 não existe nenhum critério para ordená-los, fazendo com que seja exibido todos os filmes recomendados (1106). Na próxima seção, vamos apresentar uma proposta em que utilizamos a similaridade de usuários para melhorar essa recomendação. <a id='rs_collaborative'></a> Sistema de Recomendação Colaborativo Uma forma melhor de recomendar é usar os ratings que os usuários deram aos filmes para classificar os usuários de acordo com uma similaridade. A recomendação é feita a partir dos usuários mais similares. Para isso, devemos calcular a distância do usuário em questão com todos os usuários da base e, por fim, a nota que o usuário daria. Como já foi dito, vamos utilizar as equações apresentadas anteriomente. End of explanation """ def intersect_items(id_user_x, id_user_y): """Retorna duas listas de ratings. Os ratings correspondem aos itens avaliados por x e y. É retornada duas listas distintas já que os itens são os mesmo, mas as avaliações são distintas. Isso irá facilitar na hora de calcuar a similaridade dos usuários. Keyword arguments: id_user_x -- id do usuário x id_user_y -- id do usuário y """ dict_x = movies_user_false[id_user_x] dict_y = movies_user_false[id_user_y] all_keys = set(list(dict_x.keys()) + list(dict_y.keys())) ratings_x = [] ratings_y = [] for key in all_keys: if key in dict_x and key in dict_y: ratings_x.append(dict_x[key]) ratings_y.append(dict_y[key]) ratings_x = np.array(ratings_x) ratings_y = np.array(ratings_y) return ratings_x, ratings_y """ Explanation: A primeira função que vamos implementar é a de similaridade de usuários: $ sim(x,y) = \frac{\sum_{ s \in S_{xy}} { (r_{x,s} - \bar{r_{x}}) (r_{y,s} - \bar{r_{y}}) } } { \sqrt{ \sum_{s \in S_{xy}}{ (r_{x,s} - \bar{r_{x}})^2 } \sum_{s \in S_{xy}}{ (r_{y,s} - \bar{r_{y}})^2 } } } $, onde: $S_x$: itens avaliados pelo usuário $x$; $S_y$: itens avaliados pelo usuário $y$; $S_{xy}$: o conjunto de todos os itens que foram avaliados tanto por x quanto por y, em outras palavras, a interseção dos conjuntos $S_x$ e $S_y$; $r_{x,s}$: rating do usuário $x$ para o item $s$; $r_{y,s}$: rating do usuário $y$ para o item $s$; $\bar{r_x}$: média do ratings dos filmes avaliados por $x$ $\bar{r_y}$: média do ratings dos filmes avaliados por $y$ Para tal, precisamos definir uma função que dados dois usuários, retorne os itens avaliados por ambos. End of explanation """ def similarity(id_user_x, id_user_y): """Retorna a similaridade de dois usuários baseada nos ratings dos filmes Keyword arguments: id_user_x -- id do usuário x id_user_y -- id do usuário y """ ratings_x, ratings_y = intersect_items(id_user_x, id_user_y) if(len(ratings_x) == 0): return 0.0 mean_rating_x = np.mean(ratings_x) mean_rating_y = np.mean(ratings_y) numerador = (ratings_x - mean_rating_x)*(ratings_y - mean_rating_y) numerador = np.sum(numerador) den_x = np.sum(np.power(ratings_x - mean_rating_x, 2)) den_y = np.sum(np.power(ratings_y - mean_rating_y, 2)) similarity_value = numerador / np.sqrt((den_x * den_y)) return similarity_value """ Explanation: O passo seguinte é implementarmos a função que calcular a similaridade dos usuários x e y. Observer que estamos utilizando o numpy para realizar algumas operações. Isso evita a utilização de for para realizar algumas operações sobre os vetores. End of explanation """ all_users = get_all_users() map_similarity = {} for i in all_users: for j in all_users: if(i < j): map_similarity[(i, j)] = similarity(i, j) joblib.dump(map_similarity, "usersimilarity.pkl") """ Explanation: Para o cálculo do predict rating é necessário pegar os Top $N$ usuários mais semelhantes ao usuário para qual desejamos prover a recomendação. Para encontrar essa lista, precisamos calcular a similaridade deste usuário com todos da base, ordenar a lista resultante pela similaridade e, por fim, retornar os TopN. Essa tarefa pode ser um tanto custosa para se fazer no processo de recomendação. Por isso, vamos pré-calcular esta similaridade e armazenar em um arquivo que será carregado quando necessário. A opção de salvar em arquivo é porque desta forma podemos utiliza-lo em outro momento quando a seção do Jupyter ou do Python for finalizada. Um outro motivo é que essa tarefa é um tanto custosa e, a depender do número de usuários, pode demorar. O código a seguir calcular essa similaridade e persiste o objeto com todas as similaridades de todos os usuários da base no arquivo usersimilarity.pkl. Só é necessário rodar este código na primeira vez que estiver rodando esse tutorial ou, se por um acaso, a base de usuário mudar. End of explanation """ user_similarity = joblib.load('usersimilarity.pkl') """ Explanation: Desta forma, podemos carregar a base de similaridades sempre que precisarmos. Se o arquivo já foi gerado, não é ncessário mais chamar o código anterior. Basta executar o código a seguir para carregar toda similaridade na memória. End of explanation """ def get_topneighbors_rateditem(id_user, id_movie, N): """Retorna os N usuários mais similares a id_user que avaliram o id_item Keyword arguments: id_user -- id do usuário id_item -- id do item N -- Número de usuários semelhantes retornados. """ all_users = get_all_users() similars = {} for user_ in all_users: items_user_ = movies_user_true[user_] if(id_user != user_): if id_movie in items_user_: similars[(id_user, user_)] = user_similarity[(id_user, user_)] sorted_ = sorted(similars.items(), key=operator.itemgetter(1), reverse=True) return sorted_[:N] """ Explanation: Com as informações de similaridade pré-calculadas podemos calcular o predict rating. $ r_{c,s} = \bar{r_c} + k * \sum_{c' \in \hat{C}}{sim(c,c') \times (r_{c',s} - \bar{r_{c'}})}$, onde: $c$ e $c'$: são usuários; $s$: um item; $k$: é um fator noramlizador dado por $k = \frac{1}{\sum_{c' \in \hat{C}}{|sim(c, c')|}}$ $sim(c, c'):$ a similaridade do usuário c com o usuário c' dada pela equação anterior; $\hat{C}:$ o conjunto dos $N$ usuários mais similares a $c$ que avaliaram o item $s$. O cálculo mais crítico desta equação é encontrar o conjunto $\hat{C}$, a lista dos $N$ usuários mais similares a $c$ que avaliaram o item $s$ que desejamos recomendar a $c$. Essa lista é definida pela função a seguir. End of explanation """ def predict_rating(id_user, id_movie): N = 20 items_user = movies_user_false[id_user] all_user_values = [items_user[x] for x in items_user] mean_user = np.mean(all_user_values) topN_users = get_topneighbors_rateditem(id_user, id_movie, N) sum_ = 0 sum_k = 0 for topuser in topN_users: similarity = topuser[1] user_u = topuser[0][1] rating_user_u = get_rating_by_user_movie(user_u, id_movie) items_user_u = movies_user_false[user_u] all_user_u_values = [items_user_u[x] for x in items_user_u] mean_user_u = np.mean(all_user_u_values) sum_ += similarity * (rating_user_u - mean_user_u) sum_k += abs(similarity) if sum_k == 0: k = 0 else: k = 1 / sum_k rating_final = mean_user + k * sum_ return rating_final print("Nota do usuário 1 para o filme 30:") predict_rating(1, 30) """ Explanation: Um vez que a função foi dos TopN foi definida, podemos usá-la na implementação na função do predict rating: End of explanation """ user = 1 all_movies = list(movies.index.values) all_user_movies = get_movies_by_user(user, list_=True) movies_to_predict = [x for x in all_movies if x not in all_user_movies] predict = {} for item in movies_to_predict: predict[item] = predict_rating(user, item) """ Explanation: Com a função de predição de ratings implementada podemos realizar uma recomendação para um usuário da base. Vamos realizar a recomendação para o usuário 1. Para isso, vamos calcular o rating do usuário 1 para todos os filmes da base que ele ainda não avaliou. Feito isso, vamos retornar e exibir os top 10 filmes mais bem avaliados. Essa tarefa demora em torno de 5 minutos End of explanation """ sorted_items = sorted(predict.items(), key=operator.itemgetter(1), reverse=True) sorted_items[:10] """ Explanation: Uma vez calculados os ratings, devemos ordenar e exibir os 10 mais bem avaliados. End of explanation """ count = 1 top10 = sorted_items[:10] print("Filmes recomendados para o usuário %s:" % user) for movie in top10: print("\t %.2d" % count, "[%.1f]" % movie[1], get_movie_title(movie[0]), ) count += 1 """ Explanation: Melhorando a forma de apresentação, temos: End of explanation """ # Imports necessários from imdbpie import Imdb from IPython.display import Image, display # Método que retorna o ID do IMDb dado o id do filme def get_imdb_id(id_movie): imdbid = int(links.loc[idx[id_movie], 'imdbId']) imdbid = "tt%.7d" % imdbid return imdbid imdbid = get_imdb_id(300) print(imdbid) # Carregando a biblioteca do IMDb imdb = Imdb() imdb = Imdb(anonymize=True) imdbid = get_imdb_id(1) title = imdb.get_title_by_id(imdbid) print(title) Image(title.cover_url) print(title.title) print(title.year) print(title.genres) print(title.release_date) cast = title.cast_summary for person in cast: print(person.name) reviews = imdb.get_title_reviews(imdbid, max_results=2) for review in reviews: print("Review by %s" % review.username) if(review.rating is not None): print("Rating: %.1f" % review.rating) print("Review: %s" % review.text) print() """ Explanation: <a id='imdb'></a> Usando a API do IMDb Podemos utilizar a API do IMDb para retornar algumas informações a mais sobre os filmes recomendados. O código a seguir importa a biblioteca e com as informações contidas na tabela links da Movielens acessa detalhes dos filmes na base do IMDb. Vamos utilizar a biblioteca imdbpie: https://github.com/richardasaurus/imdb-pie. End of explanation """ count = 1 top10 = sorted_items[:10] print("Filmes recomendados para o usuário %s:" % user) for movie in top10: imdbid = get_imdb_id(movie[0]) print("%.2d" % count, "[%.1f]" % movie[1], get_movie_title(movie[0]), ) if imdb.title_exists(imdbid): title = imdb.get_title_by_id(imdbid) display(Image(title.cover_url)) count += 1 """ Explanation: Basta acessar a página da API para ver todas as informações que podem ser retornadas a partir da API. Para finalizar vamos melhorar a nossa apresentação da recomendação utilizando a API do IMDb. End of explanation """
andre-martini/advanced-comp-2017
04-model-performance/lecture.ipynb
gpl-3.0
%config InlineBackend.figure_format='retina' %matplotlib inline # Silence warnings import warnings warnings.simplefilter(action="ignore", category=FutureWarning) warnings.simplefilter(action="ignore", category=UserWarning) warnings.simplefilter(action="ignore", category=RuntimeWarning) import numpy as np np.random.seed(123) import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (8, 8) plt.rcParams["font.size"] = 14 """ Explanation: Model performance Tuning and determining model performance. End of explanation """ from sklearn.model_selection import train_test_split from sklearn import datasets from sklearn import svm iris = datasets.load_iris() X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.3, random_state=1) clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train) clf.score(X_test, y_test) """ Explanation: Each model we have met so far has several parameters that need tuning. So called hyper-parameters. This lecture will discuss methods for systematically searching for the best hyper-parameters and the nassessing how well the model will perform on unseen data. Overfitting A model that simply memorises all training points will obtain a perfect score on the dataset used for training. In general it will perform poorly on new, unseen data. What we really want is not a model that memorises but one that learns to generalise. This means we should use a dataset that was not seens as part of the model fitting to evaluate the performance of the model. In the past lectures we split our data into a training and testing set, then tried different values of a hyper-parameter and picked the one that performs best on the test dataset: End of explanation """ from sklearn.model_selection import cross_val_score scores = cross_val_score(clf, X_train, y_train, cv=5) print(scores) # as you have several estimates you can also compute a measure of the spread print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std())) """ Explanation: As we explore different values of C are we going to find a setting which does the equivalent of "memorising" the dataset? Is the score obtained on the testing dataset a fair estimate of the classifier's performance on new, unseen data? The answer is no. Three way split We need a third set, a validation set. To get the naming right: our whole dataset is X we split X into training and testing we further split the testing dataset into a testing and validation set Or we directly split X into training, validation, and testing. The testing dataset should be locked away into a vault. You can only use it once you have frozen all the parameters and choices involved in your model. One drawback of splitting things into three sets is that we reduce the number of points available for training, testing and validation. A method to maximise the amount of data we can use from our test and validation set is called cross-validation. We still create a testing dataset that gets locked in a vault. However we split our training dataset using cross-validation (CV). The idea is the following: Split the training data into k blocks. Select one of the k blocks as testing data, train your model on the other k-1 blocks. Change k, and repeat. This allows you to obtain k independent estimates of your models performance. <img src="cv.png" /> In scikit-learn you can obtain estimates using CV as follows: End of explanation """ from sklearn.model_selection import GridSearchCV parameter_grid = [{'kernel': ['rbf'], 'gamma': [1e-1, 1e-2, 1e-3, 1e-4], 'C': [1, 10, 20, 30, 40, 50, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 20, 30, 40, 50, 100, 1000]}] clf = GridSearchCV(svm.SVC(C=1), parameter_grid, cv=5) clf.fit(X_train, y_train) # best parameters and score. Should we report this as our performance? print(clf.best_score_, clf.best_params_) # test scores for the whole grid means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std, params)) # score on the dataset we had in our vault clf.score(X_test, y_test) """ Explanation: Tune up So far we only ever tuned one parameter at a time for a model. Most models have several parameters that influence each other. To tune them all in one go we can perform a grid search. We specify a grid of parameters to evaluate and for each point we perform CV: End of explanation """ from sklearn.model_selection import RandomizedSearchCV import scipy param_dist = {'C': scipy.stats.expon(scale=100), 'gamma': scipy.stats.expon(scale=.1), 'kernel': ['rbf', 'linear']} # n_iter controls how many points will be sampled clf = RandomizedSearchCV(svm.SVC(), param_dist, cv=5, n_iter=12) clf.fit(X_train, y_train) # best parameters print(clf.best_params_) # test scores for all points that were evaluated means = clf.cv_results_['mean_test_score'] stds = clf.cv_results_['std_test_score'] for mean, std, params in zip(means, stds, clf.cv_results_['params']): print("%0.3f (+/-%0.03f) for %r" % (mean, std, params)) """ Explanation: Some noteworthy points: tune the parameter grid to find the right range as often as you like you should refit the best model on the complete training + validation data the score obtained by the grid search will be optimistic (random upward fluctuations) you must not evaluate the performance of different settings on the test set start with a coarse grid of extreme ranges, then zoom in on interesting regions can be computationally pretty expensive (five folds on 20 grid points -> 100 fits) "we have to go one level deeper", cross-validated cross-validation. Random tune up Exhaustive grid searches can be very expensive. In particular when one (or more) of the dimensions are irrelevant to the problem. In this case it is better to perform a random search for the best hyper-parameters. End of explanation """ def f(x): return (0.5-x[0])**2 X1 = np.linspace(-5, 5, 5) X2 = np.linspace(-5, 5, 5) param_grid = [] for x1 in X1: for x2 in X2: param_grid.append((x1, x2)) param_grid = np.array(param_grid) plt.scatter(param_grid[:, 0], param_grid[:, 1], c=[f(x) for x in param_grid], s=260) plt.colorbar() plt.xlabel("X1") plt.ylabel("X2"); vals = [] for x1 in X1: for x2 in X2: vals.append((x1, x2, f((x1, x2)))) vals = np.array(vals) plt.plot(vals[:, 0], vals[:, 2], 'o') plt.xlabel("X1") plt.ylabel("f(X)"); vals = [] for n in range(25): x1 = np.random.uniform(-5, 5) x2 = np.random.uniform(-5, 5) vals.append((x1, x2, f((x1, x2)))) vals = np.array(vals) plt.plot(vals[:, 0], vals[:, 2], 'o') plt.xlabel("X1") plt.ylabel("f(X)"); plt.scatter(vals[:, 0], vals[:, 1], c=vals[:, 2], s=260) plt.colorbar() plt.xlabel("X1") plt.ylabel("X2"); """ Explanation: Why is random better? For illustration purposes let's use a function which has two parameters X1 and X2. However the output only depends on one of them: X1. End of explanation """
nslatysheva/data_science_blogging
polished_prediction/polished_prediction.ipynb
gpl-3.0
import wget import pandas as pd # Import the dataset data_url = 'https://raw.githubusercontent.com/nslatysheva/data_science_blogging/master/datasets/wine/winequality-red.csv' dataset = wget.download(data_url) dataset = pd.read_csv(dataset, sep=";") # Take a peak at the first few columns of the data first_5_columns = dataset.columns[0:5] dataset[first_5_columns].head() """ Explanation: Scanning hyperspace: how to tune machine learning models Introduction When doing machine learning using Python's scikit-learn library, we can often get reasonable model performance by using out-of-the-box settings. However, the payoff can be huge if you invest at least some time into tuning models to your specific problem and dataset. In the previous post, we explored the concepts of overfitting, cross-validation, and the bias-variance tradeoff. These ideas turn out to be central to doing a good job at optimizing the hyperparameters (roughly, the settings) of algorithms. In this post, we will explore the concepts behind hyperparameter optimization and demonstrate the process of tuning and training a random forest classifier. You'll be working with the famous (well, machine learning famous!) wine dataset, which contains features of different quality wines, like the acidity and sugar content. Our goal is to tune and apply a random forest to these features in order to predict whether a given wine is nice or not. The steps we'll cover in this blog post can be summarized as follows: In the next two posts, you will learn about different strategies for model optimization and how to tune a support vector machine and logistic regression classifier. You will also find out how to take several different tuned models and combine them to build an ensemble model, which is a type of aggregated meta-model that often has higher accuracy and lower overfitting than its constituents. Let's get cracking. Loading and exploring the dataset We start off by collecting the dataset. It can be found both online and in our GitHub repository, so we can also just fetch it via wget (note: make sure you first type pip install wget into your terminal since wget is not a preinstalled Python library). It will download a copy of the dataset to your current working directory. End of explanation """ # Examine shape of dataset and the column names print (dataset.shape) print (dataset.columns.values) """ Explanation: You can examine the dimensions of the dataset and the column names: End of explanation """ # Summarise feature values dataset.describe()[first_5_columns] """ Explanation: So, it looks like you have a dozen features to play with, and just under 1600 data points. Get some summary statistics on the features using describe(): End of explanation """ # using a lambda function to bin quality scores dataset['quality_is_high'] = dataset.quality.apply(lambda x: 1 if x >= 6 else 0) """ Explanation: The distribution of the outcome variable quality is a bit funky - the values are mostly 5 and 6 (how would you check this?). This could get a bit irksome later on, so go ahead and recode the quality scores into something more convenient. One idea would be to label wines as being high quality (e.g. if their score is 6 or higher) and low quality otherwise. You could encode this with a 1 representing high quality and 0 representing low quality, like so: End of explanation """ import numpy as np # Convert the dataframe to a numpy array and split the # data into an input matrix X and class label vector y npArray = np.array(dataset) X = npArray[:,:-2].astype(float) y = npArray[:,-1] """ Explanation: If you're interested in getting to know the wine dataset further, check out Charles' previous post on using the plotly library to make interactive plots of the wine features here. Now convert the pandas dataframe into a numpy array and isolate the outcome variable you'd like to predict ('quality_is_high'). This conversion is needed to feed the data into a machine learning pipeline: End of explanation """ from sklearn.cross_validation import train_test_split # Split into training and test sets XTrain, XTest, yTrain, yTest = train_test_split(X, y, random_state=1) """ Explanation: Next up, split the dataset into a training and test set. The training set will be used to develop and tune our predictive models. The test will be completely left alone until the very end, at which point you'll run your finished models on it. Having a test set will allow you to get a good estimate of how well your models would perform out in the wild on unseen data, which is what you're actually interested in when you model data (see previous post). End of explanation """ from sklearn.ensemble import RandomForestClassifier from sklearn import metrics rf = RandomForestClassifier() rf.fit(XTrain, yTrain) rf_predictions = rf.predict(XTest) print (metrics.classification_report(yTest, rf_predictions)) print ("Overall Accuracy:", round(metrics.accuracy_score(yTest, rf_predictions),4)) """ Explanation: You are now going to try to predict high quality wine with a random forest classifier. Chapter 8 of the Introduction to Statistical Learning book provides a truly excellent introduction to the theory behind classification trees, bagged trees, and random forests. It's worth a read if you have time. Briefly, random forests build a collection of classification trees, which each try to classify data points by recursively splitting the data on the features (and feature values) that separate the classes best. Each tree is trained on bootstrapped data, and each bifurcation point is only allowed to 'see' a subset of the available variables when deciding on the best split. So, an element of randomness is introduced when constructing each tree, which means that a variety of different trees are built. The random forest ensembles these base learners together, i.e. it combines these trees into an aggregated model. When making a new prediction, the individual trees each make their individual predictions, and the random forest surveys these opinions and accepts the majority position. This often leads to improved accuracy, generalizability, and stability in the predictions. Out of the box, scikit's random forest classifier performs reasonably well on the wine dataset: End of explanation """ from sklearn.neighbors import KNeighborsClassifier # Create a default kNN classifer and print params knn_default = KNeighborsClassifier() print (knn_default.get_params) """ Explanation: We get an overall accuracy around the 0.77-0.79 mark (try running the code block a few times, or experiment with setting different seeds using random_state). Next up, you are going to learn how to pick the best values for the hyperparameters of the random forest algorithm in order to get better models with (hopefully!) even higher accuracy than this baseline. Better modelling through hyperparameter optimization We've glossed over what a hyperparameter actually is. Let's explore the topic now. Often, when setting out to train a machine learning algorithm on your dataset of interest, you must first specify a number of arguments or hyperparameters (HPs). An HP is just a variable than influences the performance of your model, but isn't directly tuned during model training. For example, when using the k-nearest neighbours algorithm to do classification (see these two previous posts), the value of k (the number of nearest neighbours the model considers) is a hyperparameter that must be supplied in advance. As another example, when building a neural network, the number of layers in the network and the number of neurons per layer are both hyperparameters that must be specified before training commences. By contrast, the weights and biases in a neural network are parameters (not hyperparameters) because they are explicitly tuned during training. It turns out that scikit-learn generally provides reasonable hyperparameter default values, such that it is possible to quickly build an e.g. kNN classifier by simply typing KNeighborsClassifier() and then fitting it to your data. Behind the scenes, we can can get the documentation on what hyperparameter values that the classifier has automatically assumed, but you can also examine models directly using get_params: End of explanation """ # manually specifying some HP values parameter_combinations = [ {"n_estimators": 2, "max_features": 10}, # parameter combination 1... {"n_estimators": 5, "max_features": 3}, # 2 {"n_estimators": 9, "max_features": 7} ] """ Explanation: So you see that the default kNN classifier has the number of nearest neighbours it considers set to 5 (n_neighbors=5) and gives all datapoints equal importance (weights=uniform), and so on. Often, the default hyperparameters values will do a decent job (as we saw above with the random forest example), so it may be tempting to skip the topic of model tuning completely. However, it is basically always a good idea to do some level of hyperparameter optimization, due to the potential for substantial improvements in your learning algorithm's performance. But how do you know what values to set the hyperparameters to in order to get the best performance from your learning algorithms? You optimize hyperparameters in exactly the way that you might expect - you try different values and see what works best. However, some care is needed when deciding how exactly to measure if certain values work well, and which strategy to use to systematically explore hyperparameter space. In a later post, we will introduce model ensembling, in which individual models can be considered 'hyper-hyper parameters' (&trade;; &copy;; &reg;; patent pending; T-shirts printing). Tuning your random forest In order to build the best possible model that does a good job at describing the underlying trends in a dataset, we need to pick the right HP values. As we mentioned above, HPs are not optimised while an algorithm is learning. Hence, we need other strategies to optimise them. The most basic way to do this would be just to test different possible values for the HPs and see how the model performs. In a random forest, some hyperparameters we can optimise are n_estimators and max_features. n_estimators controls the number of trees in the forest - the more the better (with diminishing returns), but more trees come at the expense of longer training time. max_features controls the size of the random selection of features the algorithm is allowed to consider when splitting a node. Larger values help if the individual predictors aren't that great. Smaller values can be helpful if the features in the dataset are decent and/or highly correlated. Let's try out some random HP values. End of explanation """ import itertools # test out different HP combinations for hp_combo in parameter_combinations: # Train and output accuracies rf = RandomForestClassifier(n_estimators=hp_combo["n_estimators"], max_features=hp_combo["max_features"]) rf.fit(XTrain, yTrain) RF_predictions = rf.predict(XTest) print ('When n_estimators is {} and max_features is {}, test set accuracy is {}'.format( hp_combo["n_estimators"], hp_combo["max_features"], round(metrics.accuracy_score(yTest, RF_predictions),4)) ) """ Explanation: We can manually write a small loop to test out how well the different combinations of these potential HP values fare (later, we'll find out better ways to do this): End of explanation """ from sklearn.grid_search import GridSearchCV, RandomizedSearchCV # Search for good hyperparameter values # Specify values to grid search over n_estimators = list(np.arange(10, 60, 20)) max_features = list(np.arange(2, X.shape[1], 3)) hyperparameters = {'n_estimators': n_estimators, 'max_features': max_features} print (hyperparameters) # Grid search using cross-validation gridCV = GridSearchCV(RandomForestClassifier(), param_grid=hyperparameters, cv=10, n_jobs=4) gridCV.fit(XTrain, yTrain) # Identify optimal hyperparameter values best_n_estim = gridCV.best_params_['n_estimators'] best_max_features = gridCV.best_params_['max_features'] print("The best performing n_estimators value is: {:5.1f}".format(best_n_estim)) print("The best performing max_features value is: {:5.1f}".format(best_max_features)) # Train classifier using optimal hyperparameter values # We could have also gotten this model out from gridCV.best_estimator_ rf = RandomForestClassifier(n_estimators=best_n_estim, max_features=best_max_features) rf.fit(XTrain, yTrain) RF_predictions = rf.predict(XTest) print (metrics.classification_report(yTest, RF_predictions)) print ("Overall Accuracy:", round(metrics.accuracy_score(yTest, RF_predictions),2)) """ Explanation: Looks like the last combinations of HPs might be doing better. However, manually searching for the best HPs in this way is not efficient, it's a bit random and liable to missing good combinations, and could potentially lead to models that perform well on this specific dataset, but do not generalise well to new data. The specific phenomenon of building models that do not generalise well, or that are fitting too closely to the dataset, is called overfitting. Here, you trained different models on the training dataset using manually selected HP values. We then tested on the test dataset. This is not as bad as training a model and evaluating it on the training set, but it is still bad - since you repeatedly evaluated on the test dataset, knowledge of the test set can leak into the model bulding phase. You are at risk of inadvertenly learning something about the test set, and hence are susceptible to overfitting. k-fold cross validation for hyperparameter tuning So, you have to be careful not to overfit to our data. But wait, didn't we also say that the test set is not meant to be touched until you are completely done training our model? How are you meant to optimize our hyperparameters then? Enter k-fold cross-validation, which is a handy technique for measuring a model's performance using only the training set. k-fold CV is a general method (see an explanation here), and is not specific to hyperparameter optimization, but is very useful for that purpose. We simply try out different HP values, get several different estimates of model performance for each HP value (or combination of HP values), and choose the model with the lowest CV error. The process looks like this: In the context of HP optimization, we perform k-fold cross validation together with grid search or randomized search to get a more robust estimate of the model performance associated with specific HP values. Grid search Traditionally and perhaps most intuitively, scanning for good HPs values can be done with the grid search (also called parameter sweep). This strategy exhaustively searches through some manually prespecified HP values and reports the best option. It is common to try to optimize multiple HPs simultaneously - grid search tries each combination of HPs in turn, hence the name. This is a more convenient and complete way of searching through hyperparameter space than manually specifying combinations. The combination of grid search and k-fold cross validation is very popular for finding the models with good performance and generalisability. So, in HP optimisation we are actually trying to do two things: (i) find the best possible combination of HPs that define a model and (ii) making sure that the pick generalises well to new data. In order to address the second concern, CV is often the method of choice. Scikit-learn makes this process very easy and slick, and even supports parallel distributing of the search (via the n_jobs argument). You use grid search to tune a random forest like this: End of explanation """
zomansud/coursera
ml-classification/week-6/module-9-precision-recall-assignment-blank.ipynb
mit
import graphlab from __future__ import division import numpy as np graphlab.canvas.set_target('ipynb') """ Explanation: Exploring precision and recall The goal of this second notebook is to understand precision-recall in the context of classifiers. Use Amazon review data in its entirety. Train a logistic regression model. Explore various evaluation metrics: accuracy, confusion matrix, precision, recall. Explore how various metrics can be combined to produce a cost of making an error. Explore precision and recall curves. Because we are using the full Amazon review dataset (not a subset of words or reviews), in this assignment we return to using GraphLab Create for its efficiency. As usual, let's start by firing up GraphLab Create. Make sure you have the latest version of GraphLab Create (1.8.3 or later). If you don't find the decision tree module, then you would need to upgrade graphlab-create using pip install graphlab-create --upgrade See this page for detailed instructions on upgrading. End of explanation """ products = graphlab.SFrame('amazon_baby.gl/') """ Explanation: Load amazon review dataset End of explanation """ def remove_punctuation(text): import string return text.translate(None, string.punctuation) # Remove punctuation. review_clean = products['review'].apply(remove_punctuation) # Count words products['word_count'] = graphlab.text_analytics.count_words(review_clean) # Drop neutral sentiment reviews. products = products[products['rating'] != 3] # Positive sentiment to +1 and negative sentiment to -1 products['sentiment'] = products['rating'].apply(lambda rating : +1 if rating > 3 else -1) """ Explanation: Extract word counts and sentiments As in the first assignment of this course, we compute the word counts for individual words and extract positive and negative sentiments from ratings. To summarize, we perform the following: Remove punctuation. Remove reviews with "neutral" sentiment (rating 3). Set reviews with rating 4 or more to be positive and those with 2 or less to be negative. End of explanation """ products """ Explanation: Now, let's remember what the dataset looks like by taking a quick peek: End of explanation """ train_data, test_data = products.random_split(.8, seed=1) """ Explanation: Split data into training and test sets We split the data into a 80-20 split where 80% is in the training set and 20% is in the test set. End of explanation """ model = graphlab.logistic_classifier.create(train_data, target='sentiment', features=['word_count'], validation_set=None) """ Explanation: Train a logistic regression classifier We will now train a logistic regression classifier with sentiment as the target and word_count as the features. We will set validation_set=None to make sure everyone gets exactly the same results. Remember, even though we now know how to implement logistic regression, we will use GraphLab Create for its efficiency at processing this Amazon dataset in its entirety. The focus of this assignment is instead on the topic of precision and recall. End of explanation """ accuracy= model.evaluate(test_data, metric='accuracy')['accuracy'] print "Test Accuracy: %s" % accuracy """ Explanation: Model Evaluation We will explore the advanced model evaluation concepts that were discussed in the lectures. Accuracy One performance metric we will use for our more advanced exploration is accuracy, which we have seen many times in past assignments. Recall that the accuracy is given by $$ \mbox{accuracy} = \frac{\mbox{# correctly classified data points}}{\mbox{# total data points}} $$ To obtain the accuracy of our trained models using GraphLab Create, simply pass the option metric='accuracy' to the evaluate function. We compute the accuracy of our logistic regression model on the test_data as follows: End of explanation """ baseline = len(test_data[test_data['sentiment'] == 1])/len(test_data) print "Baseline accuracy (majority class classifier): %s" % baseline """ Explanation: Baseline: Majority class prediction Recall from an earlier assignment that we used the majority class classifier as a baseline (i.e reference) model for a point of comparison with a more sophisticated classifier. The majority classifier model predicts the majority class for all data points. Typically, a good model should beat the majority class classifier. Since the majority class in this dataset is the positive class (i.e., there are more positive than negative reviews), the accuracy of the majority class classifier can be computed as follows: End of explanation """ confusion_matrix = model.evaluate(test_data, metric='confusion_matrix')['confusion_matrix'] confusion_matrix """ Explanation: Quiz Question: Using accuracy as the evaluation metric, was our logistic regression model better than the baseline (majority class classifier)? Confusion Matrix The accuracy, while convenient, does not tell the whole story. For a fuller picture, we turn to the confusion matrix. In the case of binary classification, the confusion matrix is a 2-by-2 matrix laying out correct and incorrect predictions made in each label as follows: +---------------------------------------------+ | Predicted label | +----------------------+----------------------+ | (+1) | (-1) | +-------+-----+----------------------+----------------------+ | True |(+1) | # of true positives | # of false negatives | | label +-----+----------------------+----------------------+ | |(-1) | # of false positives | # of true negatives | +-------+-----+----------------------+----------------------+ To print out the confusion matrix for a classifier, use metric='confusion_matrix': End of explanation """ round(1443 / (26689 + 1443 ), 2) """ Explanation: Quiz Question: How many predicted values in the test set are false positives? End of explanation """ 100*1443 + 1*1406 """ Explanation: Computing the cost of mistakes Put yourself in the shoes of a manufacturer that sells a baby product on Amazon.com and you want to monitor your product's reviews in order to respond to complaints. Even a few negative reviews may generate a lot of bad publicity about the product. So you don't want to miss any reviews with negative sentiments --- you'd rather put up with false alarms about potentially negative reviews instead of missing negative reviews entirely. In other words, false positives cost more than false negatives. (It may be the other way around for other scenarios, but let's stick with the manufacturer's scenario for now.) Suppose you know the costs involved in each kind of mistake: 1. \$100 for each false positive. 2. \$1 for each false negative. 3. Correctly classified reviews incur no cost. Quiz Question: Given the stipulation, what is the cost associated with the logistic regression classifier's performance on the test set? End of explanation """ precision = model.evaluate(test_data, metric='precision')['precision'] print "Precision on test data: %s" % precision """ Explanation: Precision and Recall You may not have exact dollar amounts for each kind of mistake. Instead, you may simply prefer to reduce the percentage of false positives to be less than, say, 3.5% of all positive predictions. This is where precision comes in: $$ [\text{precision}] = \frac{[\text{# positive data points with positive predicitions}]}{\text{[# all data points with positive predictions]}} = \frac{[\text{# true positives}]}{[\text{# true positives}] + [\text{# false positives}]} $$ So to keep the percentage of false positives below 3.5% of positive predictions, we must raise the precision to 96.5% or higher. First, let us compute the precision of the logistic regression classifier on the test_data. End of explanation """ round(1 - precision, 2) """ Explanation: Quiz Question: Out of all reviews in the test set that are predicted to be positive, what fraction of them are false positives? (Round to the second decimal place e.g. 0.25) End of explanation """ recall = model.evaluate(test_data, metric='recall')['recall'] print "Recall on test data: %s" % recall """ Explanation: Quiz Question: Based on what we learned in lecture, if we wanted to reduce this fraction of false positives to be below 3.5%, we would: (see the quiz) A complementary metric is recall, which measures the ratio between the number of true positives and that of (ground-truth) positive reviews: $$ [\text{recall}] = \frac{[\text{# positive data points with positive predicitions}]}{\text{[# all positive data points]}} = \frac{[\text{# true positives}]}{[\text{# true positives}] + [\text{# false negatives}]} $$ Let us compute the recall on the test_data. End of explanation """ def apply_threshold(probabilities, threshold): ### YOUR CODE GOES HERE # +1 if >= threshold and -1 otherwise. return probabilities.apply(lambda x: +1 if x >= threshold else -1) """ Explanation: Quiz Question: What fraction of the positive reviews in the test_set were correctly predicted as positive by the classifier? Quiz Question: What is the recall value for a classifier that predicts +1 for all data points in the test_data? Precision-recall tradeoff In this part, we will explore the trade-off between precision and recall discussed in the lecture. We first examine what happens when we use a different threshold value for making class predictions. We then explore a range of threshold values and plot the associated precision-recall curve. Varying the threshold False positives are costly in our example, so we may want to be more conservative about making positive predictions. To achieve this, instead of thresholding class probabilities at 0.5, we can choose a higher threshold. Write a function called apply_threshold that accepts two things * probabilities (an SArray of probability values) * threshold (a float between 0 and 1). The function should return an SArray, where each element is set to +1 or -1 depending whether the corresponding probability exceeds threshold. End of explanation """ probabilities = model.predict(test_data, output_type='probability') predictions_with_default_threshold = apply_threshold(probabilities, 0.5) predictions_with_high_threshold = apply_threshold(probabilities, 0.9) print "Number of positive predicted reviews (threshold = 0.5): %s" % (predictions_with_default_threshold == 1).sum() print "Number of positive predicted reviews (threshold = 0.9): %s" % (predictions_with_high_threshold == 1).sum() """ Explanation: Run prediction with output_type='probability' to get the list of probability values. Then use thresholds set at 0.5 (default) and 0.9 to make predictions from these probability values. End of explanation """ # Threshold = 0.5 precision_with_default_threshold = graphlab.evaluation.precision(test_data['sentiment'], predictions_with_default_threshold) recall_with_default_threshold = graphlab.evaluation.recall(test_data['sentiment'], predictions_with_default_threshold) # Threshold = 0.9 precision_with_high_threshold = graphlab.evaluation.precision(test_data['sentiment'], predictions_with_high_threshold) recall_with_high_threshold = graphlab.evaluation.recall(test_data['sentiment'], predictions_with_high_threshold) print "Precision (threshold = 0.5): %s" % precision_with_default_threshold print "Recall (threshold = 0.5) : %s" % recall_with_default_threshold print "Precision (threshold = 0.9): %s" % precision_with_high_threshold print "Recall (threshold = 0.9) : %s" % recall_with_high_threshold """ Explanation: Quiz Question: What happens to the number of positive predicted reviews as the threshold increased from 0.5 to 0.9? Exploring the associated precision and recall as the threshold varies By changing the probability threshold, it is possible to influence precision and recall. We can explore this as follows: End of explanation """ threshold_values = np.linspace(0.5, 1, num=100) print threshold_values """ Explanation: Quiz Question (variant 1): Does the precision increase with a higher threshold? Quiz Question (variant 2): Does the recall increase with a higher threshold? Precision-recall curve Now, we will explore various different values of tresholds, compute the precision and recall scores, and then plot the precision-recall curve. End of explanation """ precision_all = [] recall_all = [] probabilities = model.predict(test_data, output_type='probability') for threshold in threshold_values: predictions = apply_threshold(probabilities, threshold) precision = graphlab.evaluation.precision(test_data['sentiment'], predictions) recall = graphlab.evaluation.recall(test_data['sentiment'], predictions) precision_all.append(precision) recall_all.append(recall) """ Explanation: For each of the values of threshold, we compute the precision and recall scores. End of explanation """ import matplotlib.pyplot as plt %matplotlib inline def plot_pr_curve(precision, recall, title): plt.rcParams['figure.figsize'] = 7, 5 plt.locator_params(axis = 'x', nbins = 5) plt.plot(precision, recall, 'b-', linewidth=4.0, color = '#B0017F') plt.title(title) plt.xlabel('Precision') plt.ylabel('Recall') plt.rcParams.update({'font.size': 16}) plot_pr_curve(precision_all, recall_all, 'Precision recall curve (all)') """ Explanation: Now, let's plot the precision-recall curve to visualize the precision-recall tradeoff as we vary the threshold. End of explanation """ for i, p in enumerate(precision_all): print str(i) + " -> " + str(p) round(threshold_values[67], 3) """ Explanation: Quiz Question: Among all the threshold values tried, what is the smallest threshold value that achieves a precision of 96.5% or better? Round your answer to 3 decimal places. End of explanation """ predictions_with_98_threshold = apply_threshold(probabilities, 0.98) cm = graphlab.evaluation.confusion_matrix(test_data['sentiment'], predictions_with_98_threshold) cm """ Explanation: Quiz Question: Using threshold = 0.98, how many false negatives do we get on the test_data? (Hint: You may use the graphlab.evaluation.confusion_matrix function implemented in GraphLab Create.) End of explanation """ baby_reviews = test_data[test_data['name'].apply(lambda x: 'baby' in x.lower())] """ Explanation: This is the number of false negatives (i.e the number of reviews to look at when not needed) that we have to deal with using this classifier. Evaluating specific search terms So far, we looked at the number of false positives for the entire test set. In this section, let's select reviews using a specific search term and optimize the precision on these reviews only. After all, a manufacturer would be interested in tuning the false positive rate just for their products (the reviews they want to read) rather than that of the entire set of products on Amazon. Precision-Recall on all baby related items From the test set, select all the reviews for all products with the word 'baby' in them. End of explanation """ probabilities = model.predict(baby_reviews, output_type='probability') """ Explanation: Now, let's predict the probability of classifying these reviews as positive: End of explanation """ threshold_values = np.linspace(0.5, 1, num=100) """ Explanation: Let's plot the precision-recall curve for the baby_reviews dataset. First, let's consider the following threshold_values ranging from 0.5 to 1: End of explanation """ precision_all = [] recall_all = [] for threshold in threshold_values: # Make predictions. Use the `apply_threshold` function ## YOUR CODE HERE predictions = apply_threshold(probabilities, threshold) # Calculate the precision. # YOUR CODE HERE precision = graphlab.evaluation.precision(baby_reviews['sentiment'], predictions) # YOUR CODE HERE recall = graphlab.evaluation.recall(baby_reviews['sentiment'], predictions) # Append the precision and recall scores. precision_all.append(precision) recall_all.append(recall) """ Explanation: Second, as we did above, let's compute precision and recall for each value in threshold_values on the baby_reviews dataset. Complete the code block below. End of explanation """ round(threshold_values[72], 3) for i, p in enumerate(precision_all): print str(i) + " -> " + str(p) """ Explanation: Quiz Question: Among all the threshold values tried, what is the smallest threshold value that achieves a precision of 96.5% or better for the reviews of data in baby_reviews? Round your answer to 3 decimal places. End of explanation """ plot_pr_curve(precision_all, recall_all, "Precision-Recall (Baby)") """ Explanation: Quiz Question: Is this threshold value smaller or larger than the threshold used for the entire dataset to achieve the same specified precision of 96.5%? Finally, let's plot the precision recall curve. End of explanation """
empet/PSCourse
CryptographicHashFunctions.ipynb
bsd-3-clause
import hashlib mes = hashlib.md5()#declara mes ca un obiect hash vid mes.update('anul1CTI@yahoogroups.com')# se updateaza obiectul hash prin concatenarea unui string s=mes.hexdigest() print 'valoarea hash in hexa prin MD5 a adresei email este', s print 'lungimea in biti a valorii hash este:', len(s)*4 """ Explanation: Calculul valorilor hash folosind functiile hash criptografice MD5, SHA1, SHA2 Pentru a intelege aplicatiile problemei coliziunilor (zilei de nastere) din Cursul 8, la atacul asupra functiilor hash criptografice, ilustram in acest notebook valorile acestor functii pe diferite blocuri de date. Python pune la dispozitie modulul hashlib care contine functiile ce genereaza valorile hash ale unor blocuri de date prin functiile hash criptografice MD5, SHA1, SHA2. MD5 este o functie hash cu valori in multimea stringurilor de 128 de biti, ${0,1}^{128}$. SHA1 ia valori in ${0,1}^{160}$. SHA2 consta din mai multe functii hash, cu valori pe $224, 256, 384, 512$ biti. Pentru a ilustra ca blocuri de date de lungimi diferite sunt aplicate de o functie hash intr-un string avand acelasi numar fixat de biti, calculam mai intai valoarea hash prin MD5 a blocului de date 'anul1CTI@yahoogroups.com' si apoi a blocului mai lung, 'anul1CTI@yahoo.com grupul anului 1': End of explanation """ mes.update(' grupul anului 1') sn=mes.hexdigest() print 'valoarea hash in hexa prin MD5 a adresei email la care s-a concatenat un string este', sn print 'lungimea in biti a noii valorii hash este:', len(sn)*4 """ Explanation: Concatenam acum la adresa email, stringul ' grupul anului 1' si afisam valoarea hash corespunzatoare si lungimea ei in biti: End of explanation """ m1=hashlib.sha1('anul1CTI@yahoogroups.com') s1=m1.hexdigest() print 'valoarea hash in hexa prin SHA1 a adresei email este', s1 print 'lungimea in biti a valorii hash este:', len(s1)*4 m1.update(' grupul anului 1') s1n=m1.hexdigest() print 'valoarea hash in hexa prin SHA1 a adresei email la care s-a concatenat un string este', s1n print 'lungimea in biti a noii valorii hash este:', len(s1n)*4 """ Explanation: Sa experimentam acum in acelasi mod functia hash SHA1: End of explanation """ mesaj=hashlib.sha256('Contul meu secret dintr-un paradis financiar este 6421 1345 2974 6852') valh=mesaj.hexdigest() print 'Valoarea hash prin SHA2 pe 256 biti este:', valh print 'lungimea in biti a valorii hash:', len(valh)*4 from IPython.core.display import HTML def css_styling(): styles = open("./styles/custom.css", "r").read() return HTML(styles) css_styling() """ Explanation: Exemplu folosind SHA2: End of explanation """
jonbruner/tensorflow-basics
save-load/save.ipynb
mpl-2.0
%matplotlib inline import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) import tensorflow as tf sess = tf.InteractiveSession() def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') x = tf.placeholder(tf.float32, shape=[None, 784], name="input") y_ = tf.placeholder(tf.float32, shape=[None, 10]) W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) x_image = tf.reshape(x, [-1,28,28,1]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) keep_prob = tf.placeholder(tf.float32, name="keep_prob") h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2, name="output") sess.run(tf.initialize_all_variables()) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1])) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) sess.run(tf.initialize_all_variables()) # To make this run faster, we'll only run 1,000 iterations of the training process. for i in range(1000): batch = mnist.train.next_batch(50) if i%100 == 0: train_accuracy = accuracy.eval(feed_dict={ x:batch[0], y_: batch[1], keep_prob: 1.0}) print("step %d, training accuracy %g"%(i, train_accuracy)) train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) print("test accuracy %g"%accuracy.eval(feed_dict={ x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) """ Explanation: 1: Save This notebook, along with its companion notebook load, illustrates the process of saving a neural network model and its weights using TensorFlow, then loading them to be run as part of a separate program. Some details of saving and loading are non-obvious, and require a clear understanding of TensorFlow's abstraction. As a programmer of perhaps intermediate skills, it took me a while to grasp all of this, but once you've figured it out, saving and loading is very straightforward. Build the model The code in the block below is mostly copied straight from TensorFlow's Deep MNIST for Experts tutorial, which is a good introduction to convolutional neural networks. This code trains a CNN to classify handwritten images using the MNIST dataset. Read the tutorial if you're interested in an introduction to convolution and/or TensorFlow; I'll reproduce the tutorial code as compactly as possible in order to move on to the saving bit. I've made just a few modifications to the code below from what's published in TensorFlow's tutorial: I'm only running 1,000 iterations of the training step to save time (instead of 20,000) because accuracy isn't essential here. And I've added name flags to three variables: x, keep_prob, and y_conv. The addition of names will make sense later. End of explanation """ image_a = mnist.validation.images[154] plt.imshow(image_a.reshape([28, 28]), cmap='Greys') """ Explanation: Run the model Let's take a moment to review how this model can be used to classify a new image. We'll begin with an arbitrary image from the MNIST validation set and display it. End of explanation """ image_a = image_a.reshape([1, 784]) result = sess.run(y_conv, feed_dict={x:image_a, keep_prob:1}) print(result) print(sess.run(tf.argmax(result, 1))) """ Explanation: So, this image is the digit 9. Let's run it through the model to see if the model agrees that it's a 9. The output of the model is the variable y_conv. To evaluate y_conv, we need to call sess.run, and we tell TensorFlow what to use as the model's inputs by feeding the inputs as a feed_dict. The MNIST convlutional neural network requires two inputs: x, which is the image we're trying to classify, and keep_prob, which is used in the dropout step in training, but will be 1 when we run the model here. The output of the model, y_conv, is a tensor consisting of a 1 x 10 list of probabilities; for any digit n between 0 and 9, y_conv[n] is the model's estimate of the probability that the image we fed in as x represents n. The last line below prints the index of the higest value in the y_conv list, which should be the digit we're looking for. We begin by reshaping image_a into a 1 x 784 matrix, which is the shape that x anticipates. End of explanation """ saver = tf.train.Saver() save_path = saver.save(sess, "saved_mnist_cnn.ckpt") print("Model saved to %s" % save_path) """ Explanation: The model will prodce a slightly different result each time it's trained, but I got what I'm expecting: the model correctly says this handwritten digit is a 9, with roughly 82% certainty. The next closest guess is that it's a 7, with 17% certainty. That makes sense; this digit does indeed look a bit like a 7. Save the model So, we've trained this model and we've seen how to run it. Now we want to save it so it can be reinstated in another program and used to classify images there. The key realization at this point is that the TensorFlow model and its weights are different things, and it's possible to export one or the other. We want to export both. The model, or graph definition, defines the variables, the computational steps, and the links between them. The weights are the model values that we developed by training the model a few minutes ago. To save the model, we use a TensorFlow saver. The code below saves the model into our current working directory as two files: saved_mnist_cnn.ckpt, which contains the weights, and saved_mnist_cnn.ckpt.meta, which contains the graph definition. End of explanation """
kunaltyagi/SDES
notes/python/p_norvig/logic/Sicherman Dice.ipynb
gpl-3.0
def sicherman(): """The set of pairs of 6-sided dice that have the same distribution of sums as a regular pair of dice.""" return {pair for pair in pairs(all_dice()) if pair != regular_pair and sums(pair) == regular_sums} # TODO: pairs, all_dice, regular_pair, sums, regular_sums """ Explanation: Sicherman Dice <span style="background-color:#e6e6e6">Note: This notebook takes the form of a conversation between two problem solvers. One speaks in bold, the other in plain. Also note, for those who are not native speakers of English: "dice" is the plural form; "die" is the singular.</span> Huh. <a href="http://wordplay.blogs.nytimes.com/2014/06/16/dice-3/">This</a> is interesting. You know how in many games, such as craps or Monopoly, you roll two regular dice and add them up. Only the sum matters, not what either of the individual dice shows. Right. And some of those sums, like 8, can be made multiple ways, while 2 and 12 can only be made one way. Yeah. 8 can be made 5 ways, so it has a 5/36 probability of occuring. The interesting thing is that people have been playing dice games for 7,000 years. But it wasn't until 1977 that <a href="http://userpages.monmouth.com/~colonel/">Colonel George Sicherman</a> asked whether is is possible to have a pair of dice that are not regular dice&mdash;that is, they don't have (1, 2, 3, 4, 5, 6) on the six sides&mdash;but have the same distribution of sums as a regular pair&mdash;so the pair of dice would also have to have 5 ways of making 8, but it could be different ways; maybe 7+1 could be one way. Sicherman assumes that each side bears a positive integer. And what did he find? Wouldn't it be more fun to figure it out for ourselves? OK! How could we proceed? When in doubt, use brute force: we can write a program to enumerate the possibilities: Generate all dice that could possibly be part of a solution, such as (1, 2, 2, 4, 8, 9). Consider all pairs of these dice, such as ((1, 3, 4, 4, 5, 8), (1, 2, 2, 3, 3, 4)) See if we find any pairs that are not the regular pair, but do have the same distribution of sums as the regular pair. That's great. I can code up your description almost verbatim. I'll also keep track of our TO DO list: End of explanation """ def pairs(collection): "Return all pairs (A, B) from collection where A <= B." return [(A, B) for A in collection for B in collection if A <= B] # TODO: all_dice, regular_pair, sums, regular_sums """ Explanation: Looks good to me. Now we can tick off the items in the TO DO list. The function pairs is first, and it is easy: End of explanation """ pairs(['A', 'B', 'C']) """ Explanation: That's good. We could have used the library function itertools.combinations_with_replacement, but let's just leave it as is. We should test to make sure it works: End of explanation """ def sums(pair): "All possible sums of a side from one die plus a side from the other." (A, B) = pair return Bag(a + b for a in A for b in B) Bag = sorted # Implement a bag as a sorted list def ints(start, end): "A tuple of the integers from start to end, inclusive." return tuple(range(start, end + 1)) regular_die = ints(1, 6) regular_pair = (regular_die, regular_die) regular_sums = sums(regular_pair) # TODO: all_dice """ Explanation: TO DO: sums(pair) Now for sums: we need some way to represent all the 36 possible sums from a pair of dice. We want a representation that will be the same for two different pairs if all 36 sums are the same, but where the order or composition of the sums doesn't matter. So we want a set of the sums? Well, it can't be a set, because we need to know that 8 can be made 5 ways, not just that 8 is a member of the set. The technical term for a collection where order doesn't matter but where you can have repeated elements is a bag, or sometimes called a multiset. For example, the regular pair of dice makes two 11s with 5+6 and 6+5, and another pair could make two 11s with 7+4 and 3+8. Can you think of a representation that will do that? Well the easiest is just a sorted list or tuple&mdash;if we don't want order to matter, sorting takes care of that. Another choice would be a dictionary of {sum: count} entries, like {2: 1, 3: 2, ... 11: 2, 12: 1}. There is even a library class, collections.Counter, that does exactly that. How do we choose between the two representations? I don't think it matters much. Since there are only 36 entries, I think the sorted list will be simpler, and probably more efficient. For 100-sided dice I'd probably go with the Counter. OK, here's some code implementing sums as a sorted list, and definitions for regular die pair, and sums. By the way, I could have used range(1, 7) to define a regular die, but range is zero-based, and regular dice are one-based, so I defined the function ints instead. End of explanation """ len(regular_sums) print(regular_sums) """ Explanation: Let's check the regular_sums: End of explanation """ from collections import Counter Counter(regular_sums) """ Explanation: And we can see what that would look like to a Counter: End of explanation """ def all_dice(): "A list of all feasible 6-sided dice for the Sicherman problem." return [(1, s2, s3, s4, s5, s6) for s2 in ints(2, 8) for s3 in ints(s2, 8) for s4 in ints(s3, 8) for s5 in ints(s4, 8) for s6 in ints(s5+1, 9)] """ Explanation: Looks good! Now only one more thing on our TODO list: TO DO: all_dice() all_dice should generate all possible dice, where by "possible" I mean the dice that could feasibly be part of a pair that is a solution to the Sicherman problem. Do we know how many dice that will be? Is it a large enough number that efficiency will be a problem? Let's see. A die has six sides each. If each side can be a number from, say, 1 to 10, that's 10<sup>6</sup> or a million possible dice; a million is a small number for a computer. True, a million is a relatively small number for all_dice(), but how many pairs(all_dice()) will there be? Ah. A million squared is a trillion. That's a large number even for a computer. Just counting to a trillion takes hours in Python; checking a trillion pairs will take days. So we need to get rid of most of the dice. What about permutations? Good point. If I have the die (1, 2, 3, 4, 5, 6), then I don't need the 6! = 720 different permutations of this die&mdash; that is, dice like (2, 4, 6, 1, 3, 5). Each die should be a bag (I learned a new word!) of sides. So we've already eliminated 719/720 = 99.9% of the work. One other thing bothers me ... how do you know that the sides can range from 1 to 10? Are you sure that 11 can't be part of a solution? Or 12? Every side on every die must be a positive integer, right? Right. No zeroes, no negative numbers, no fractions. Then I know for sure that 12 can't be on any die, because when you add 12 to whatever is on the other die, you would get at least 13, and 13 is not allowed in the regular distribution of sums. Good. How about 11? We can't have a sum that is bigger than 12. So if one die had an 11, the other would have to have all 1s. That wouldn't work, because then we'd have six 12s, but we only want one. So 10 is the biggest allowable number on a die. What else can we say about the biggest number on a die? There's one 12 in the sums. But there are several ways to make a 12: 6+6 or 7+5 or 8+4, and so on. So I can't say for sure what the biggest number on any one die will be. But I can say that whatever the biggest number on a die is, it will be involved in summing to 12, so there can be only one of them, because we only want to make one 12. What about the smallest number on a die? Well, there's only one 2 allowed in the sums. The only way to sum to 2 is 1+1: a 1 from each of the dice in the pair. If a die had no 1s, we wouldn't get a 2; if a die had more than one 1, we would get too many 2s. So every die has to have exactly one 1. Good. So each die has exactly one 1, and exactly one of whatever the biggest number is, something in the range up to 10. Here's a picture of the six sides of any one die: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-10</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-10</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-10</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-10</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-10</span> The bag of sides is always listed in non-decreasing order; the first side, 1, is less than the next, and the last side, whatever it is, is greater than the one before it. Wait a minute: you have [2-10] &lt; [2-10]. But 2 is not less than 2, and 10 is not less than 10. I think it should be [2-9] &lt; [3-10]. So the picture should be like this: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-9</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-9</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-9</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-9</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-10</span> Good! We're making progress in cutting down the range. But it That this bothers me because it says the range for the biggest number is 3 to 10. But if one die has a 3 and the other a 10, that adds to 13. So I'm thinking that it is not possible to have a 10 after all&mdash;because if one die had a 10, then the other would have to have a 2 as the biggest number, and that can't be. Therefore the biggest number is in the range of 3 to 9. But then the others have to be less, so make them 2 to 8: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-8</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-8</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-8</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-8</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-9</span> I can turn this picture into code: End of explanation """ sicherman() """ Explanation: I think we're ready to run sicherman(). Any bets on what we'll find out? I bet that Sicherman is remembered because he discovered a pair of dice that works. If he just proved the non-existence of a pair, I don't think that would be noteworthy. Makes sense. Here goes: The Answer End of explanation """ def all_dice(): "A list of all feasible 6-sided dice for the Sicherman problem." return [(1, s2, s3, s4, s5, s6) for s2 in ints(2, 7) for s3 in ints(s2, 7) for s4 in ints(max(s3, 3), 7) for s5 in ints(s4, 7) for s6 in ints(s5+1, 8)] """ Explanation: Look at that! It turns out you can <a href="http://www.grand-illusions.com/acatalog/Sicherman_Dice.html">buy</a> a pair of dice with just these numbers. <a href="http://www.grand-illusions.com/acatalog/Sicherman_Dice.html"><img src="http://www.grand-illusions.com/acatalog/lge-sicherman_dice.jpg"></a> Here's a table I borrowed from Wikipedia that shows both pairs of dice have the same sums. <table class="wikitable"> <tr> <td align="centre"></td> <td align="centre">2</td> <td align="centre">3</td> <td align="centre">4</td> <td align="centre">5</td> <td align="centre">6</td> <td align="centre">7</td> <td align="centre">8</td> <td align="centre">9</td> <td align="centre">10</td> <td align="centre">11</td> <td align="centre">12</td> </tr> <tr> <td>Regular dice: <br>(1, 2, 3, 4, 5, 6) <br>(1, 2, 3, 4, 5, 6)</td> <td>1+1</td> <td>1+2<br /> 2+1</td> <td>1+3<br /> 2+2<br /> 3+1</td> <td>1+4<br /> 2+3<br /> 3+2<br /> 4+1</td> <td>1+5<br /> 2+4<br /> 3+3<br /> 4+2<br /> 5+1</td> <td>1+6<br /> 2+5<br /> 3+4<br /> 4+3<br /> 5+2<br /> 6+1</td> <td>2+6<br /> 3+5<br /> 4+4<br /> 5+3<br /> 6+2</td> <td>3+6<br /> 4+5<br /> 5+4<br /> 6+3</td> <td>4+6<br /> 5+5<br /> 6+4</td> <td>5+6<br /> 6+5</td> <td>6+6</td> </tr> <tr> <td>Sicherman dice: <br>(<b><span style="color:orange;">1</span>, <span style="color:red;">2</span>, <span style="color:blue;"><i>2</i></span>, <span style="color:red;">3</span>, <span style="color:blue;"><i>3</i></span>, <span style="color:orange;">4</span></b>) <br>(1, 3, 4, 5, 6, 8)</td> <td><b><span style="color:orange;">1</span></b>+1</td> <td><b><span style="color:red;">2</span></b>+1<br /> <b><span style="color:blue;"><i>2</i></span></b>+1</td> <td><b><span style="color:red;">3</span></b>+1<br /> <b><span style="color:blue;"><i>3</i></span></b>+1<br /> <b><span style="color:orange;">1</span></b>+3</td> <td><b><span style="color:orange;">1</span></b>+4<br /> <b><span style="color:red;">2</span></b>+3<br /> <b><span style="color:blue;"><i>2</i></span></b>+3<br /> <b><span style="color:orange;">4</span></b>+1</td> <td><b><span style="color:orange;">1</span></b>+5<br /> <b><span style="color:red;">2</span></b>+4<br /> <b><span style="color:blue;"><i>2</i></span></b>+4<br /> <b><span style="color:red;">3</span></b>+3<br /> <b><span style="color:blue;"><i>3</i></span></b>+3</td> <td><b><span style="color:orange;">1</span></b>+6<br /> <b><span style="color:red;">2</span></b>+5<br /> <b><span style="color:blue;"><i>2</i></span></b>+5<br /> <b><span style="color:red;">3</span></b>+4<br /> <b><span style="color:blue;"><i>3</i></span></b>+4<br /> <b><span style="color:orange;">4</span></b>+3</td> <td><b><span style="color:red;">2</span></b>+6<br /> <b><span style="color:blue;"><i>2</i></span></b>+6<br /> <b><span style="color:red;">3</span></b>+5<br /> <b><span style="color:blue;"><i>3</i></span></b>+5<br /> <b><span style="color:orange;">4</span></b>+4</td> <td><b><span style="color:orange;">1</span></b>+8<br /> <b><span style="color:red;">3</span></b>+6<br /> <b><span style="color:blue;"><i>3</i></span></b>+6<br /> <b><span style="color:orange;">4</span></b>+5</td> <td><b><span style="color:red;">2</span></b>+8<br /> <b><span style="color:blue;"><i>2</i></span></b>+8<br /> <b><span style="color:orange;">4</span></b>+6<br /></td> <td><b><span style="color:red;">3</span></b>+8<br /> <b><span style="color:blue;"><i>3</i></span></b>+8</td> <td><b><span style="color:orange;">4</span></b>+8</td> </tr> </table> We could stop here. Or we could try to solve it for N-sided dice. Why stop now? Onward! OK. I know 4-, 12-, and 20-sided dice are common, but we'll try to handle any N > 1. My guess is we won't go too far before our program becomes too slow. So, before we try N-sided dice, let's analyze six-sided dice a little better, to see if we can eliminate some of the pairs before we start. The picture says that (1, 2, 2, 2, 2, 3) could be a valid die. Could it? No! If a die had four 2s, then we know that since the other die has one 1, we could make 2 + 1 = 3 four ways. But the regular_sums has only two 3s. So that means that a die can have no more than two 2s. New picture: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-8</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-8</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-8</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-8</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-9</span> Now we've got [3-8] < [3-9]; that's not right. If a die can only have one 1 and two 2s, then it must have at least one number that is a 3 or more, followed by the biggest number, which must be 4 or more, and we know a pair of biggest numbers must sum to 12, so the range of the biggest can't be [4-9], it must be [4-8]: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-7</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-7</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-7</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-7</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4-8</span> End of explanation """ len(all_dice()) len(pairs(all_dice())) """ Explanation: I'll count how many dice and how many pairs there are now: End of explanation """ import random random.sample(all_dice(), 10) """ Explanation: Nice&mdash;we got down from a trillion pairs to 26,000. I don't want to print all_dice(), but I can sample a few: End of explanation """ def sicherman(N=6): """The set of pairs of N-sided dice that have the same distribution of sums as a regular pair of N-sided dice.""" reg_sums = regular_sums(N) reg_pair = regular_pair(N) return {pair for pair in pairs(all_dice(N)) if pair != reg_pair and sums(pair) == reg_sums} def regular_die(N): return ints(1, N) def regular_pair(N): return (regular_die(N), regular_die(N)) def regular_sums(N): return sums(regular_pair(N)) # TODO: all_dice(N) """ Explanation: sicherman(N) OK, I think we're ready to update sicherman() to sicherman(N). Sure, most of that will be easy, just parameterizing with N: End of explanation """ for N in ints(1, 7): print("N:", N, dict(Counter(regular_sums(N)))) """ Explanation: Good. I think it would be helpful for me to look at a table of regular_sums: End of explanation """ def lower_bounds(N): "A list of lower bounds for respective sides of an N-sided die." lowers = [1] for _ in range(N-1): m = lowers[-1] # The last number in lowers so far lowers.append(m if (lowers.count(m) < m) else m + 1) lowers[-1] = lowers[-2] + 1 return lowers lower_bounds(6) lower_bounds(10) """ Explanation: That is helpful. I can see that any regular_sums must have one 2 and two 3s, and three 4s, and so on, not just for N=6 but for any N (except for trivially small N). And that means that any regular die can have at most two 2s, three 3s, four 4s, and so on. So we have this picture: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &le; ... where [2+] means the lower bound is 2, but we haven't figured out yet what the upper bound is. Let's figure out upper bounds starting from the biggest number. What can the biggest number be? For a pair of N-sided die, the biggest sides from each one must add up to 2N. Let's take N=10 as an example. The biggest numbers on two 10-sided Sicherman dice must sum to 20. According to the picture above, the lower bound on the biggest number would be 4, but because there can only be one of the biggest number, the lower bound is 5. So to add up to 20, the range must be [5-15]: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4+</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">5-15</span> There's probably some tricky argument for the upper bounds of the other sides, but I'm just going to say the upper bound is one less than the upper bound of the biggest number: <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">&nbsp;1&nbsp;</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">2-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">3-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4-14</span> &le; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">4-14</span> &lt; <span style="border-style: solid; width: 5em; height: 5em; padding: 3px">5-15</span> Let's start by coding up lower_bounds(N): End of explanation """ def upper_bounds(N): "A list of upper bounds for respective sides of an N-sided die." U = 2 * N - lower_bounds(N)[-1] return [1] + (N - 2) * [U - 1] + [U] upper_bounds(6) upper_bounds(10) """ Explanation: And upper_bounds(N): End of explanation """ def all_dice(N): "Return a list of all possible N-sided dice for the Sicherman problem." lowers = lower_bounds(N) uppers = upper_bounds(N) def possibles(die, i): "The possible numbers for the ith side of an N-sided die." return ints(max(lowers[i], die[-1] + int(i == N-1)), uppers[i]) dice = [(1,)] for i in range(1, N): dice = [die + (side,) for die in dice for side in possibles(die, i)] return dice """ Explanation: Now, what do we have to do for all_dice(N)? When we knew we had six sides, we wrote six nested loops. We can't do that for N, so what do we do? Here's an iterative approach: we keep track of a list of partially-formed dice, and on each iteration, we add a side to all the partially-formed dice in all possible ways, until the dice all have N sides. So for eaxmple, we'd start with: dice = [(1,)] and then on the next iteration (let's assume N=6, so the lower bound is 2 and the upper bound is 7), we'd get this: dice = [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)] on the next iteration, we find all the ways of adding a third side, and so on. Like this: End of explanation """ len(all_dice(6)) """ Explanation: The tricky part was with the max: the actual lower bound at least lowers[i], but it must be as big as the previous side, die[-1]. And just to make things complicated, the very last side has to be strictly bigger than the previous; " + int(i == N-1)" does that by adding 1 just in case we're on the last side, and 0 otherwise. Let's check it out: End of explanation """ random.sample(all_dice(6), 8) """ Explanation: Reassuring that we get the same number we got with the old version of all_dice(). End of explanation """ {N: sicherman(N) for N in ints(2, 6)} """ Explanation: Running sicherman(N) for small N Let's try sicherman for some small values of N: End of explanation """ %time sicherman(6) %time sicherman(7) """ Explanation: Again, reassuring that we get the same result for sicherman(6). And interesting that there is a result for sicherman(4) but not for the other N. Let's go onwards from N=6, but let's check the timing as we go: End of explanation """ %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt def logplot(X, Y, *options): "Plot Y on a log scale vs X." fig, ax = plt.subplots() ax.set_yscale('log') ax.plot(X, Y, *options) """ Explanation: Estimating run time of sicherman(N) for larger N OK, it takes 50 or 60 times longer to do 7, compared to 6. At this rate, N=8 will take 15 minutes, 9 will take 15 hours, and 10 will take a month. Do we know it will continue to rise at the same rate? You're saying the run time is exponential in N? I think so. The run time is proportional to the number of pairs. The number of pairs is proportional to the square of the number of dice. The number of dice is roughly exponential in N, because each time you increase N by 1, you have to try a number of new sides that is similar to the number for the previous side (but not quite the same). I should plot the number of pairs on a log scale and see if it looks like a straight line. I can count the number of pairs without explicitly generating the pairs. If there are D dice, then the number of pairs is what? Something like D &times; (D + 1) / 2? Or is it D &times; (D - 1) / 2? Let's draw a picture. With D = 4, here are all the ways to pair one die with another to yield 10 distinct pairs: 11 .. .. .. 21 22 .. .. 31 32 33 .. 41 42 43 44 To figure out the formula, add a row to the top: .. .. .. .. 11 .. .. .. 21 22 .. .. 31 32 33 .. 41 42 43 44 Now we have a D &times; (D + 1) rectangle, and we can see that half (10) of them are pairs, and half (the other 10) are not pairs (because they would be repetitions). So the formula is D &times; (D + 1)/2, and checking for D=4, (4 &times; 5) / 2 = 10, so we're good. OK, let's try it. First some boilerplate for plotting: End of explanation """ def plot_pairs(Ns): "Given a list of N values, plot the number of pairs and return a dict of them." Ds = [len(all_dice(N)) for N in Ns] Npairs = [D * (D + 1) // 2 for D in Ds] logplot(Ns, Npairs, 'bo-') return {Ns[i]: Npairs[i] for i in range(len(Ns))} plot_pairs(ints(2, 12)) """ Explanation: Now we can plot and display the number of pairs: End of explanation """ sum((1, 2, 2, 3, 3, 4) + (1, 3, 4, 5, 6, 8)) sum((1, 2, 3, 4, 5, 6) + (1, 2, 3, 4, 5, 6)) """ Explanation: OK, we've learned two things. One, it is roughly a straight line, so the number of pairs is roughly exponential. Two, there are a lot of pairs. 10<sup>14</sup>, just for N=12. I don't want to even think about N=20. So if we want to get much beyond N=8, we're either going to need a brand new approach, or we need to make far fewer pairs of dice. Making Fewer pairs Maybe we could tighten up the upper bounds, but I don't think that will help very much. How about if we concentrate on making fewer pairs, without worrying about making fewer dice? How could we do that? Isn't the number of pairs always (D<sup>2</sup> + D)/2 ? Remember, we're looking for feasible pairs. So if there was some way of knowing ahead of time that two dice were incompatible as a pair, we wouldn't even need to consider the pair. By incompatible, you mean they can't form a pair that is a solution. Right. Consider this: in any valid pair, the sum of the biggest number on each die must be 2N. For example, with N = 6: ((1, 2, 2, 3, 3, 4), (1, 3, 4, 5, 6, 8)) sum of biggests = 4 + 8 = 12 ((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 5, 6)) sum of biggests = 6 + 6 = 12 So if we have a die with biggest number 7, what dice should we consider pairing it with? Only ones with biggest number 5. I get it: we sort all the die into bins labeled with their biggest number. Then we look at each bin, and for the "7" bin, we pair them up with the dice in the "5" bin. In general, the B bin can only pair with the 2N - B bin. Exactly. Cool. I can see how that can cut the amount of work by a factor of 10 or so. But I was hoping for a factor of a million or so. There are other properties of a feasible pair. Like what? Well, what about the number of 2s in a pair? Let's see. We know that any regular_sums has to have two 3s, and the only way to make a 3 is 2+1. And each die has only one 1, so that means that each pair of dice has to have a total of exactly two 2s. Does it have to be one 2 on each die? No. It could be one each, or it could be two on one die and none on the other. So a die with T twos can only pair with dice that have 2 - T twos. Great. Can you think of another property? Give me a hint. Let's look at the sums of 6-sided Sicherman and regular pairs: End of explanation """ {die for die in all_dice(6) if max(die) == 12 - 5 and sum(die) == 42 - 19 and die.count(2) == 2} """ Explanation: They're the same. Is that the question that 42 is the answer to? But does a Sicherman pair always have to have the same sum as a regular pair? I guess it doea, because the sum of sums(pair) is just all the sides added up N times each, so two pairs have the same sum of sums(pair) if and only if they have the same sum. So consider the die (1, 3, 3, 3, 4, 5). What do we know about the dice that it can possibly pair with? OK, that die has a biggest side of 5, so it can only pair with dice that have a biggest side of 12 - 5 = 7. It has a sum of 19, so it can only pair with dice that have a sum of 42 - 19 = 23. And it has no 2s, so it can only pair with dice that have two 2s. I wonder how many such dice there are, out of all 231 all_dice(6)? End of explanation """ from collections import defaultdict def tabulate(dice): """Put all dice into bins in a hash table, keyed by bin_label(die). Each bin holds a list of dice with that key.""" # Example: {(21, 6, 1): [(1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 4, 7), ...] table = defaultdict(list) for die in dice: table[bin_label(die)].append(die) return table def pairs(dice): "Return all pairs of dice that could possibly be a solution to the Sicherman problem." table = tabulate(dice) N = len(dice[0]) for bin1 in table: bin2 = compatible_bin(bin1, N) if bin2 in table and bin1 <= bin2: for A in table[bin1]: for B in table[bin2]: yield (A, B) def bin_label(die): return sum(die), max(die), die.count(2) def compatible_bin(bin1, N): "Return a bin label that is compatible with bin1." (S1, M1, T1) = bin1 return (N * (N + 1) - S1, 2 * N - M1, 2 - T1) """ Explanation: There's only 1. So, (1, 3, 3, 3, 4, 5) only has to try to pair with one die, rather than 230. Nice improvement! In general, I wonder what the sum of the sides of a regular pair is? Easy, that's N * (N + 1). Gauss knew that when he was in elementary school! More efficient pairs(dice) OK, we can code this up easily enough: End of explanation """ {N: sicherman(N) for N in ints(2, 6)} """ Explanation: Let's make sure it works: End of explanation """ %time sicherman(7) """ Explanation: Good, those are the same answers as before. But how much faster is it? End of explanation """ tabulate(all_dice(5)) """ Explanation: Wow, that's 1000 times faster than before. I want to take a peek at what some of the bins look like: End of explanation """ print(' N: D #pairs(dice) D*(D-1)/2') for N in ints(2, 11): dice = list(all_dice(N)) D = len(dice) print('{:2}: {:9,d} {:12,d} {:17,d}'.format(N, D, len(list(pairs(dice))), D*(D-1)//2)) """ Explanation: Pretty good: four of the bins have two dice, but the rest have only one die. And let's see how many pairs we're producing now. We'll tabulate N (the number of sides); D (the number of N-sided dice), the number pairs(dice) using the new pairs, and the number using the old pairs: End of explanation """ %%time {N: sicherman(N) for N in ints(2, 10)} """ Explanation: OK, we're doing 100,000 times better for N=11. But it would still take a long time to test 11 million pairs. Let's just get the answers up to N=10: End of explanation """
anhaidgroup/py_entitymatching
notebooks/guides/step_wise_em_guides/Performing Matching Using a ML Matcher.ipynb
bsd-3-clause
# Import py_entitymatching package import py_entitymatching as em import os import pandas as pd """ Explanation: Introduction This IPython notebook illustrates how to performing matching with a ML matcher. In particular we show examples with a decision tree matcher, but the same principles apply to all of the other ML matchers. End of explanation """ # Get the datasets directory datasets_dir = em.get_install_path() + os.sep + 'datasets' path_A = datasets_dir + os.sep + 'dblp_demo.csv' path_B = datasets_dir + os.sep + 'acm_demo.csv' path_labeled_data = datasets_dir + os.sep + 'labeled_data_demo.csv' A = em.read_csv_metadata(path_A, key='id') B = em.read_csv_metadata(path_B, key='id') # Load the pre-labeled data S = em.read_csv_metadata(path_labeled_data, key='_id', ltable=A, rtable=B, fk_ltable='ltable_id', fk_rtable='rtable_id') """ Explanation: Read in the orignal tables and a set of labeled data into py_entitymatching. End of explanation """ # Split S into I an J IJ = em.split_train_test(S, train_proportion=0.5, random_state=0) I = IJ['train'] J = IJ['test'] # Generate a set of features F = em.get_features_for_matching(A, B, validate_inferred_attr_types=False) # Convert I into feature vectors using updated F H = em.extract_feature_vecs(I, feature_table=F, attrs_after='label', show_progress=False) # Instantiate the matcher to evaluate. dt = em.DTMatcher(name='DecisionTree', random_state=0) # Train using feature vectors from I dt.fit(table=H, exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'], target_attr='label') """ Explanation: Training the ML Matcher Now, we can train our ML matcher. In this notebook we will demonstrate this process with a decision tree matcher. First, we need to split our labeled data into a training set and a test set. Then we will exract feature vectors from the training set and train our decision tree with the fit command. End of explanation """ # Convert J into a set of feature vectors using F L1 = em.extract_feature_vecs(J, feature_table=F, attrs_after='label', show_progress=False) # Predict on L predictions = dt.predict(table=L1, exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label']) # Show the predictions predictions[0:10] """ Explanation: Getting Predictions with the ML Matcher Since we now have a trained decision tree, we can use our matcher to get predictions on the test set. Below, we will show four different ways to get the predictions with the predict command that will be useful in various contexts. Getting a List of Predictions First up, we will demonstrate how to get just a list of predictions using the predict command. This is the default method of getting predictions. As shown below, the resulting variable, predictions, is just an array containing the predictions for each of the feature vectors in the test set. End of explanation """ # Convert J into a set of feature vectors using F L2 = em.extract_feature_vecs(J, feature_table=F, attrs_after='label', show_progress=False) # Predict on L predictions, probs = dt.predict(table=L2, exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'], return_probs=True) # Show the predictions and probabilities print('Predictions for first ten entries: {0}'.format(predictions[0:10])) print('Probabilities of a match for first ten entries: {0}'.format(probs[0:10])) """ Explanation: Getting a List of Predictions and a List of Probabilities Next we will demonstrate how to get both a list of prediction for the test set, as well as a list of the associated probabilities for the predictions. This is done by setting the 'return_probs' argument to true. Note that the probabilities shown are the probability for a match. End of explanation """ # Convert J into a set of feature vectors using F L3 = em.extract_feature_vecs(J, feature_table=F, attrs_after='label', show_progress=False) # Predict on L predictions = dt.predict(table=L3, exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'], target_attr='prediction', append=True, return_probs=True, probs_attr='probability') # Show the predictions and probabilities predictions[['_id', 'ltable_id', 'rtable_id', 'label', 'prediction', 'probability']].head() """ Explanation: Appending the Predictions to the Feature Vectors Table Often, we want to include the predictions with the feature vector table. We can return predictions appended to a copy of the feature vector table if we use the 'append' argument to true. We can choose the name of the new predictions column using the 'target_attr' argument. We can also append the probabilites by setting 'return_probs' to true and setting the new probabilities column name with the 'probs_attr'. End of explanation """ # Convert J into a set of feature vectors using F L4 = em.extract_feature_vecs(J, feature_table=F, attrs_after='label', show_progress=False) # Predict on L dt.predict(table=L4, exclude_attrs=['_id', 'ltable_id', 'rtable_id', 'label'], target_attr='prediction', append=True, return_probs=True, probs_attr='probabilities', inplace=True) # Show the predictions and probabilities L4[['_id', 'ltable_id', 'rtable_id', 'label', 'prediction', 'probabilities']].head() """ Explanation: Appending the Prediction to the Original Feature Vectors Table In-place Lastly, we will show how to append the predictions to the original feature vector dataframe. We can accomplish this by setting the 'append' argument to true, setting the name of the new column with the 'target_attr' argument and then setting the 'inplace' argument to true. Again, we can include the probabilites with the 'return_probs' and 'probs_attr' arguments. This will append the predictions and probabilities to the original feature vector dataframe as opposed to the mthod used above which will create a copy of the feature vectors and append the predictions to that copy. End of explanation """
prashantas/MyDataScience
Python/MnistDigitsKeras.ipynb
bsd-2-clause
batch_size = 128 nb_classes =10 nb_epochs = 10 # convert class vectors to binary class matrices for softmax layer Y_train = keras.utils.np_utils.to_categorical(y_train,nb_classes) Y_test = keras.utils.np_utils.to_categorical(y_test,nb_classes) ## for example 6's label is now [0,0,0,0,0,0,0,0] print(Y_train.shape) """ Explanation: X_train = X_train.reshape(60000,784) X_test = X_test.reshape(10000,784) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /=255 X_test /=255 print(X_train.shape[0],'train_samples') print(X_test.shape[0],'test_samples') End of explanation """ model = Sequential() model.add(Dense(512,input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.summary() """ Explanation: MODEL DEFINITION End of explanation """ model.compile(loss='categorical_crossentropy',optimizer=RMSprop(),metrics=['accuracy']) history = model.fit(X_train,Y_train, batch_size=batch_size,nb_epoch=nb_epochs, verbose=1,validation_data=(X_test,Y_test)) ## we can check the param of the model after training history.params history.history """ Explanation: MODEL COMPILATION End of explanation """ score = model.evaluate(X_test,Y_test,verbose=0) print('Test score:', score[0]) print('Test accuracy:',score[1]) """ Explanation: EVALUATION AND PREDICTION End of explanation """ X_test_0 = X_test[0,:].reshape(1,784) Y_test_0 = Y_test[0,:] print(Y_test_0) plt.imshow(X_test_0.reshape(28,28)) pred = model.predict(X_test_0) print('Label of testing sample:', np.argmax(Y_test_0)) print('\nOutput of the softmax layer:',pred[0]) print('\nNeural Network prediction:', np.argmax(pred[0])) """ Explanation: Now lets predict one single sample End of explanation """
mne-tools/mne-tools.github.io
0.19/_downloads/6035dcef33422511928bd2247a3d092d/plot_source_power_spectrum_opm.ipynb
bsd-3-clause
# Authors: Denis Engemann <denis.engemann@gmail.com> # Luke Bloy <luke.bloy@gmail.com> # Eric Larson <larson.eric.d@gmail.com> # # License: BSD (3-clause) import os.path as op from mne.filter import next_fast_len import mne print(__doc__) data_path = mne.datasets.opm.data_path() subject = 'OPM_sample' subjects_dir = op.join(data_path, 'subjects') bem_dir = op.join(subjects_dir, subject, 'bem') bem_fname = op.join(subjects_dir, subject, 'bem', subject + '-5120-5120-5120-bem-sol.fif') src_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject) vv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif' vv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif' vv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif' opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif' opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif' opm_trans_fname = None opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat') """ Explanation: ====================================================================== Compute source power spectral density (PSD) of VectorView and OPM data ====================================================================== Here we compute the resting state from raw for data recorded using a Neuromag VectorView system and a custom OPM system. The pipeline is meant to mostly follow the Brainstorm [1] OMEGA resting tutorial pipeline &lt;bst_omega_&gt;. The steps we use are: Filtering: downsample heavily. Artifact detection: use SSP for EOG and ECG. Source localization: dSPM, depth weighting, cortically constrained. Frequency: power spectral density (Welch), 4 sec window, 50% overlap. Standardize: normalize by relative power for each source. :depth: 1 Preprocessing End of explanation """ raws = dict() raw_erms = dict() new_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz) raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming raws['vv'].load_data().resample(new_sfreq) raws['vv'].info['bads'] = ['MEG2233', 'MEG1842'] raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error') raw_erms['vv'].load_data().resample(new_sfreq) raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842'] raws['opm'] = mne.io.read_raw_fif(opm_fname) raws['opm'].load_data().resample(new_sfreq) raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname) raw_erms['opm'].load_data().resample(new_sfreq) # Make sure our assumptions later hold assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq'] """ Explanation: Load data, resample. We will store the raw objects in dicts with entries "vv" and "opm" to simplify housekeeping and simplify looping later. End of explanation """ titles = dict(vv='VectorView', opm='OPM') ssp_ecg, _ = mne.preprocessing.compute_proj_ecg( raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1) raws['vv'].add_proj(ssp_ecg, remove_existing=True) # due to how compute_proj_eog works, it keeps the old projectors, so # the output contains both projector types (and also the original empty-room # projectors) ssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog( raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112') raws['vv'].add_proj(ssp_ecg_eog, remove_existing=True) raw_erms['vv'].add_proj(ssp_ecg_eog) fig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:], info=raws['vv'].info) fig.suptitle(titles['vv']) fig.subplots_adjust(0.05, 0.05, 0.95, 0.85) """ Explanation: Do some minimal artifact rejection just for VectorView data End of explanation """ kinds = ('vv', 'opm') n_fft = next_fast_len(int(round(4 * new_sfreq))) print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq'])) for kind in kinds: fig = raws[kind].plot_psd(n_fft=n_fft, proj=True) fig.suptitle(titles[kind]) fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) """ Explanation: Explore data End of explanation """ src = mne.read_source_spaces(src_fname) # This line removes source-to-source distances that we will not need. # We only do it here to save a bit of memory, in general this is not required. del src[0]['dist'], src[1]['dist'] bem = mne.read_bem_solution(bem_fname) fwd = dict() trans = dict(vv=vv_trans_fname, opm=opm_trans_fname) # check alignment and generate forward with mne.use_coil_def(opm_coil_def_fname): for kind in kinds: dig = True if kind == 'vv' else False fig = mne.viz.plot_alignment( raws[kind].info, trans=trans[kind], subject=subject, subjects_dir=subjects_dir, dig=dig, coord_frame='mri', surfaces=('head', 'white')) mne.viz.set_3d_view(figure=fig, azimuth=0, elevation=90, distance=0.6, focalpoint=(0., 0., 0.)) fwd[kind] = mne.make_forward_solution( raws[kind].info, trans[kind], src, bem, eeg=False, verbose=True) del trans, src, bem """ Explanation: Alignment and forward End of explanation """ freq_bands = dict( delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45)) topos = dict(vv=dict(), opm=dict()) stcs = dict(vv=dict(), opm=dict()) snr = 3. lambda2 = 1. / snr ** 2 for kind in kinds: noise_cov = mne.compute_raw_covariance(raw_erms[kind]) inverse_operator = mne.minimum_norm.make_inverse_operator( raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True) stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd( raws[kind], inverse_operator, lambda2=lambda2, n_fft=n_fft, dB=False, return_sensor=True, verbose=True) topo_norm = sensor_psd.data.sum(axis=1, keepdims=True) stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs # Normalize each source point by the total power across freqs for band, limits in freq_bands.items(): data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True) topos[kind][band] = mne.EvokedArray( 100 * data / topo_norm, sensor_psd.info) stcs[kind][band] = \ 100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data del inverse_operator del fwd, raws, raw_erms """ Explanation: Compute and apply inverse to PSD estimated using multitaper + Welch. Group into frequency bands, then normalize each source point and sensor independently. This makes the value of each sensor point and source location in each frequency band the percentage of the PSD accounted for by that band. End of explanation """ def plot_band(kind, band): """Plot activity within a frequency band on the subject's brain.""" title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band]) topos[kind][band].plot_topomap( times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno', time_format=title) brain = stcs[kind][band].plot( subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both', time_label=title, title=title, colormap='inferno', clim=dict(kind='percent', lims=(70, 85, 99))) brain.show_view(dict(azimuth=0, elevation=0), roll=0) return fig, brain fig_theta, brain_theta = plot_band('vv', 'theta') """ Explanation: Now we can make some plots of each frequency band. Note that the OPM head coverage is only over right motor cortex, so only localization of beta is likely to be worthwhile. Theta End of explanation """ fig_alpha, brain_alpha = plot_band('vv', 'alpha') """ Explanation: Alpha End of explanation """ fig_beta, brain_beta = plot_band('vv', 'beta') fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta') """ Explanation: Beta Here we also show OPM data, which shows a profile similar to the VectorView data beneath the sensors. End of explanation """ fig_gamma, brain_gamma = plot_band('vv', 'gamma') """ Explanation: Gamma End of explanation """
cuttlefishh/emp
methods/figure-data/fig-3/Fig3_data_files.ipynb
bsd-3-clause
# read in nestedness output for all samples fig3a = pd.read_csv('../../../data/nestedness/nest_phylum_allsamples.csv') fig3a.head() """ Explanation: Figure 3 csv data generation Figure data consolidation for Figure 3, which shows patterns of nestedness in beta diversity Figure 3a: phyla occupancy plot, all samples End of explanation """ fig3b_animal = pd.read_csv('../../../data/nestedness/nest_phylum_Animal.csv') fig3b_plant = pd.read_csv('../../../data/nestedness/nest_phylum_Plant.csv') fig3b_saline = pd.read_csv('../../../data/nestedness/nest_phylum_Saline.csv') fig3b_nonsaline = pd.read_csv('../../../data/nestedness/nest_phylum_Non-saline.csv') """ Explanation: Fig 3b: phyla occupancy plot, individual Empo_2 categories End of explanation """ fig3c = pd.read_csv('../../../data/nestedness/nestedness_null_model_results_2017-09-19.csv') fig3c.head() """ Explanation: Fig 3c: NODF model results End of explanation """ fig3 = pd.ExcelWriter('Figure3_data.xlsx') fig3a.to_excel(fig3,'Fig-3a') fig3b_animal.to_excel(fig3,'Fig-3b_animal') fig3b_plant.to_excel(fig3,'Fig-3b_plant') fig3b_saline.to_excel(fig3,'Fig-3b_saline') fig3b_nonsaline.to_excel(fig3,'Fig-3b_nonsaline') fig3c.to_excel(fig3,'Fig-3c') fig3.save() """ Explanation: Write to Excel notebook End of explanation """
tiagogiraldo/Machine_Learning_Nanodegree_Projects
boston_housing.ipynb
gpl-3.0
# Import libraries necessary for this project import numpy as np import pandas as pd import visuals as vs # Supplementary code from sklearn.cross_validation import ShuffleSplit # Pretty display for notebooks %matplotlib inline # Load the Boston housing dataset data = pd.read_csv('housing.csv') prices = data['MEDV'] features = data.drop('MEDV', axis = 1) # Success print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape) """ Explanation: Machine Learning Engineer Nanodegree Model Evaluation & Validation Project 1: Predicting Boston Housing Prices Welcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully! In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide. Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. Getting Started In this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a good fit could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis. The dataset for this project originates from the UCI Machine Learning Repository. The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset: - 16 data points have an 'MEDV' value of 50.0. These data points likely contain missing or censored values and have been removed. - 1 data point has an 'RM' value of 8.78. This data point can be considered an outlier and has been removed. - The features 'RM', 'LSTAT', 'PTRATIO', and 'MEDV' are essential. The remaining non-relevant features have been excluded. - The feature 'MEDV' has been multiplicatively scaled to account for 35 years of market inflation. Run the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported. End of explanation """ # TODO: Minimum price of the data minimum_price = np.min(prices) # TODO: Maximum price of the data maximum_price = np.max(prices) # TODO: Mean price of the data mean_price = np.mean(prices) # TODO: Median price of the data median_price = np.median(prices) # TODO: Standard deviation of prices of the data std_price = np.std(prices) # Show the calculated statistics print "Statistics for Boston housing dataset:\n" print "Minimum price: ${:,.2f}".format(minimum_price) print "Maximum price: ${:,.2f}".format(maximum_price) print "Mean price: ${:,.2f}".format(mean_price) print "Median price ${:,.2f}".format(median_price) print "Standard deviation of prices: ${:,.2f}".format(std_price) """ Explanation: Data Exploration In this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results. Since the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into features and the target variable. The features, 'RM', 'LSTAT', and 'PTRATIO', give us quantitative information about each data point. The target variable, 'MEDV', will be the variable we seek to predict. These are stored in features and prices, respectively. Implementation: Calculate Statistics For your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since numpy has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model. In the code cell below, you will need to implement the following: - Calculate the minimum, maximum, mean, median, and standard deviation of 'MEDV', which is stored in prices. - Store each calculation in their respective variable. End of explanation """ # TODO: Import 'r2_score' from sklearn.metrics import r2_score def performance_metric(y_true, y_predict): """ Calculates and returns the performance score between true and predicted values based on the metric chosen. """ # TODO: Calculate the performance score between 'y_true' and 'y_predict' score = r2_score(y_true, y_predict) # Return the score return score """ Explanation: Question 1 - Feature Observation As a reminder, we are using three features from the Boston housing dataset: 'RM', 'LSTAT', and 'PTRATIO'. For each data point (neighborhood): - 'RM' is the average number of rooms among homes in the neighborhood. - 'LSTAT' is the percentage of homeowners in the neighborhood considered "lower class" (working poor). - 'PTRATIO' is the ratio of students to teachers in primary and secondary schools in the neighborhood. Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an increase in the value of 'MEDV' or a decrease in the value of 'MEDV'? Justify your answer for each. Hint: Would you expect a home that has an 'RM' value of 6 be worth more or less than a home that has an 'RM' value of 7? Answer: An increase in 'RM' must increase the price because more rooms must mean higher values, and lesser rooms mean lesser values. An increase in ´'LSTAT' must decrease the price, because the neighborhood tends to depreciate because the existence of lower class homeowners, it could represent neighborhood with poor maintenance and this devaluate the neighborhood. Lesser 'LSTAT' could be interpreted like a more exclusive neighborhood, and it could produce higher prices of the homes. An increase in 'PTRATIO' could mean a better education in the nearest schools and it could increase the demand in the zone by homeowners with children, and it produce a higher price. The contrary, could produce lesser demand because lesser parents wants to leave near to schools with better standards like more teachers and produce lower prices compared with others neighborhoods. Developing a Model In this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions. Implementation: Define a Performance Metric It is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the coefficient of determination, R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions. The values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the target variable. A model with an R<sup>2</sup> of 0 always fails to predict the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the features. A model can be given a negative R<sup>2</sup> as well, which indicates that the model is no better than one that naively predicts the mean of the target variable. For the performance_metric function in the code cell below, you will need to implement the following: - Use r2_score from sklearn.metrics to perform a performance calculation between y_true and y_predict. - Assign the performance score to the score variable. End of explanation """ # Calculate the performance of this model score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3]) print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score) """ Explanation: Question 2 - Goodness of Fit Assume that a dataset contains five data points and a model made the following predictions for the target variable: | True Value | Prediction | | :-------------: | :--------: | | 3.0 | 2.5 | | -0.5 | 0.0 | | 2.0 | 2.1 | | 7.0 | 7.8 | | 4.2 | 5.3 | Would you consider this model to have successfully captured the variation of the target variable? Why or why not? Run the code cell below to use the performance_metric function and calculate this model's coefficient of determination. End of explanation """ # TODO: Import 'train_test_split' from sklearn.cross_validation import train_test_split # TODO: Shuffle and split the data into training and testing subsets X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size=0.20, random_state=123) # Success print "Training and testing split was successful." """ Explanation: Answer: Model has a coefficient of determination, R^2, of 0.923. In this case the model capture well the variation, because it is nearest to 1, the model explain 92.3% the residual variance. Implementation: Shuffle and Split Data Your next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset. For the code cell below, you will need to implement the following: - Use train_test_split from sklearn.cross_validation to shuffle and split the features and prices data into training and testing sets. - Split the data into 80% training and 20% testing. - Set the random_state for train_test_split to a value of your choice. This ensures results are consistent. - Assign the train and testing splits to X_train, X_test, y_train, and y_test. End of explanation """ # Produce learning curves for varying training set sizes and maximum depths vs.ModelLearning(features, prices) """ Explanation: Question 3 - Training and Testing What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm? Hint: What could go wrong with not having a way to test your model? Answer: the importance of split the data randomly, is by splitting the data the bias caused by the entire sample is removed in some way, to validate the quality of the model with the set of test data. And quoting from the course "By separating training and testing sets and graphing performance on each separately, we can get a better idea of how well the model can generalize to unseen data." Analyzing Model Performance In this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing 'max_depth' parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone. Learning Curves The following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination. Run the code cell below and use these graphs to answer the following question. End of explanation """ vs.ModelComplexity(X_train, y_train) """ Explanation: Question 4 - Learning the Data Choose one of the graphs above and state the maximum depth for the model. What happens to the score of the training curve as more training points are added? What about the testing curve? Would having more training points benefit the model? Hint: Are the learning curves converging to particular scores? Answer: the max_depth = 3 is the better model, because R<sup>2</sup> has the highest score of the four graphics. The Training score curve decrease as long as training points are augmented; by the graph, it appears that the asymptote is in a ratio of 0.8. The testing curve grows in ascending, and and also it tends to stabilize at 0.8, and as in the training curve it appears that the asymptote is in a ratio of 0.8. In conclusion, the two curves (training and testing) converge to a value close to 0.80. As chart show, the training points converge at 300 training points and it looks steady for many training points (350 and 400), in conclusion add training points seems convenient until 300, after that, the model hasn't significantly improve. Complexity Curves The following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the learning curves, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the performance_metric function. Run the code cell below and use this graph to answer the following two questions. End of explanation """ # TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV' from sklearn.tree import DecisionTreeRegressor from sklearn.grid_search import GridSearchCV from sklearn.metrics import make_scorer def fit_model(X, y): """ Performs grid search over the 'max_depth' parameter for a decision tree regressor trained on the input data [X, y]. """ # Create cross-validation sets from the training data cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0) # TODO: Create a decision tree regressor object regressor = DecisionTreeRegressor() # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10 params = {'max_depth': (1,2,3,4,5,6,7,8,9,10)} # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer' scoring_fnc = make_scorer(performance_metric) # TODO: Create the grid search object grid = GridSearchCV(regressor, param_grid= params, cv=cv_sets ,scoring = scoring_fnc) # Fit the grid search object to the data to compute the optimal model grid = grid.fit(X, y) # Return the optimal model after fitting the data return grid.best_estimator_ """ Explanation: Question 5 - Bias-Variance Tradeoff When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions? Hint: How do you know when a model is suffering from high bias or high variance? Answer: When the model is trained with maximum depth of 1, the curve is showing high bias. When the model is trained with maximum depth of 10 it is showing high variance. The model with maximum depth of 1 is biased because both curves are too close, this showing the model here is underfited. And the opposite is showed at 10 as maximum depth, because both curves are separating one from the other, as depth is added to the model. Question 6 - Best-Guess Optimal Model Which maximum depth do you think results in a model that best generalizes to unseen data? What intuition lead you to this answer? Answer: I think, a value among 4 or 5 is the best for the model, because at this point the variance begin to increasing at inflexion point of training curve. More depth could produce more variance, lesser depth could produce high bias. Applying Occam's Razor principle, which say the "the simplest model that fits the data is also the most plausible" (Abu-Mustafa et al., 2012), the best depth is 4, because this imply a simpler model. Evaluating Model Performance In this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from fit_model. Question 7 - Grid Search What is the grid search technique and how it can be applied to optimize a learning algorithm? Answer: The Grid Search thecnique is a way to explore many combinantions of paremeters, the best performance is selected through assessed using cross-validation cross validated. "is a way of systematically working through multiple combinations of parameter tunes, cross-validating as it goes to determine which tune gives the best performance". Question 8 - Cross-Validation What is the k-fold cross-validation training technique? What benefit does this technique provide for grid search when optimizing a model? Hint: Much like the reasoning behind having a testing set, what could go wrong with using grid search without a cross-validated set? Answer: Is technique used in problems with splitting into training and testing data, chosing the best learning result through best validation result. The method split the sample in k-folds of equal sizes and running k separate learning experiments, then all results from k experiments are averaged. The result is more precise than one partition alternative (one training set and one test set) and it has the benefit of being able to use all sample data. Implementation: Fitting a Model Your final implementation requires that you bring everything together and train a model using the decision tree algorithm. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the 'max_depth' parameter for the decision tree. The 'max_depth' parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called supervised learning algorithms. For the fit_model function in the code cell below, you will need to implement the following: - Use DecisionTreeRegressor from sklearn.tree to create a decision tree regressor object. - Assign this object to the 'regressor' variable. - Create a dictionary for 'max_depth' with the values from 1 to 10, and assign this to the 'params' variable. - Use make_scorer from sklearn.metrics to create a scoring function object. - Pass the performance_metric function as a parameter to the object. - Assign this scoring function to the 'scoring_fnc' variable. - Use GridSearchCV from sklearn.grid_search to create a grid search object. - Pass the variables 'regressor', 'params', 'scoring_fnc', and 'cv_sets' as parameters to the object. - Assign the GridSearchCV object to the 'grid' variable. End of explanation """ # Fit the training data to the model using grid search reg = fit_model(X_train, y_train) # Produce the value for 'max_depth' print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth']) """ Explanation: Making Predictions Once a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a decision tree regressor, the model has learned what the best questions to ask about the input data are, and can respond with a prediction for the target variable. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on. Question 9 - Optimal Model What maximum depth does the optimal model have? How does this result compare to your guess in Question 6? Run the code block below to fit the decision tree regressor to the training data and produce an optimal model. End of explanation """ # Produce a matrix for client data client_data = [[5, 17, 15], # Client 1 [4, 32, 22], # Client 2 [8, 3, 12]] # Client 3 # Show predictions for i, price in enumerate(reg.predict(client_data)): print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price) """ Explanation: Answer: The maximum depth is 4, and is among the range I've chosen, The inflexion point looks like a good start to choose the right model. Question 10 - Predicting Selling Prices Imagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients: | Feature | Client 1 | Client 2 | Client 3 | | :---: | :---: | :---: | :---: | | Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms | | Neighborhood poverty level (as %) | 17% | 32% | 3% | | Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 | What price would you recommend each client sell his/her home at? Do these prices seem reasonable given the values for the respective features? Hint: Use the statistics you calculated in the Data Exploration section to help justify your response. Run the code block below to have your optimized model make predictions for each client's home. End of explanation """ vs.PredictTrials(features, prices, fit_model, client_data) """ Explanation: Answer: First at all, I rounded the prices, because is difficult in reality explain the exact number where they come from. Next, for the client 1, I recommend a selling price of 410,000.00. For the client 2 I recommend a selling price of 233,000.00, and for the client 3 I recommend a price of 893,000.00. The prices looks reasonables giving the features, and all of them are beteewn the miminmun (105,000.00) and maximum (1,024,800.00) prices for the city and they where calculated based on a mathematical model that took into account the variables of number of rooms, the exclusive neighborhood level and the number of students per teacher from nearby schools. More rooms may lead to think that the house would be worth more, and besides the number of rooms, is considered that the level of poor population in the sector is low compared to others (3%) , the exclusivity of the neighborhood becomes more evident for this house, and therefore should have a higher value in price, as it is showing in the model for the customer 3. This is also reinforced by having the neighborhood a better indicator of teachers per student, because this can show a more personalized education, thus increasing the attractiveness of housing and therefore a higher price. The same analysis can be done for the other two customers,for the customer 1 can infer that your house is very close The same analysis can be done for the other two customers. For the customer 1, you can infer that your house is very close to the city average, one would think, that substantial improvements in your house could increase the value of the house. While for client 2, your house is in a neighborhood that has worse standards than other two houses, and this model is reflecting this situation in the forecasted price. As conclusion the variables of poverty and education will add or detract from the physical part of the house, such as the number of rooms, new variables can improve forecasts of a model, as we are evaluating here Sensitivity An optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. Run the code cell below to run the fit_model function ten times with different training and testing sets to see how the prediction for a specific client changes with the data it's trained on. End of explanation """
OpenDataPolicingNC/Traffic-Stops
il/data/New-IL-Data-Review.ipynb
mit
# 2004 --- 2017 ! head ../../IL-New-Data/ILtrafficstops-2016-10-03.csv """ Explanation: New IL Data Review Old Data Summary Simple schema: Just Agency, Gender, Race, Search (T/F), Contraband (T/F), and StopPurpose Only Year (not full date) No officers Date range: * 2005 --- 2014 * 23m stops * https://opendatapolicing.com/il/ End of explanation """ ! ls -l ../../IL-New-Data/IL-DEC182018 # very different format ! head "../../IL-New-Data/IL-DEC182018/2015 ITSS Data/2015 ITSS Data.txt" ! wc -l "../../IL-New-Data/IL-DEC182018/2015 ITSS Data/2015 ITSS Data.txt" """ Explanation: New Data Inspection 2004 --- 2017 Each as separate archive End of explanation """ import pandas as pd filename = "../../IL-New-Data/IL-DEC182018/2015 ITSS Data/2015 ITSS Data.txt" reader = pd.read_csv(filename, sep="~", chunksize=10 ** 8, iterator=True) df = reader.get_chunk(20) df list(df.columns.values) filename = "../../IL-New-Data/IL-DEC182018/2015 ITSS Data/2015 ITSS Data.txt" df = pd.read_csv(filename, sep="~", encoding="iso-8859-1") len(df.index) df.AgencyName.value_counts() df.VehicleSearchConducted.value_counts() is_search = df['VehicleSearchConducted'] == 1 len(df[is_search].index) df[is_search].groupby(df.AgencyName)['AgencyName'].count().sort_values(ascending=False) df.DriverRace.unique() df.DriverSex.unique() """ Explanation: Pandas exploration End of explanation """
google-research/agent-based-epidemic-sim
agent_based_epidemic_sim/learning/covid_ens_simulation.ipynb
apache-2.0
import itertools import numpy as np import matplotlib.pyplot as plt import scipy.stats import pandas as pd from collections import namedtuple from enum import Enum, IntEnum from dataclasses import dataclass import matplotlib.cm as cm import sklearn from sklearn import metrics # Configure plot style sheet plt.style.use('fivethirtyeight') plt.rcParams['axes.titlesize'] = 'medium' # can take 'large', 'x-large' plt.rcParams['axes.labelsize'] = 'medium' import jax import jax.numpy as jnp """ Explanation: COVID19 Exposure Notification System Risk Simulator kpmurphy@google.com, serghiou@google.com (broken link) Last update: 22 August 2020 References We base our approach on these papers Quantifying SARS-CoV-2-infection risk withing the Apple/Google exposure notification framework to inform quarantine recommendations, Amanda Wilson, Nathan Aviles, Paloma Beamer, Zsombor Szabo, Kacey Ernst, Joanna Masel. July 2020 The timing of COVID-19 transmission, Luca Ferretti et al, Sept. 2020 Risk scoring in contact tracing apps, Mark Briers, Marcos Charalambides, Christophe Fraser, Chris Holmes, Radka Jersakova, James Lomax, and Tom Lovett. 26 July 2020 End of explanation """ def infectiousness_gaussian(deltas): mu = -0.3; s = 2.75; ps = np.exp(- np.power(deltas-mu,2) / (2*s*s)) return ps deltas = np.arange(-10, 10, 0.1) ps = infectiousness_gaussian(deltas) plt.figure(); plt.plot(deltas, ps) plt.xlabel('days since symptom onset'); plt.ylabel('infectiousness'); """ Explanation: Infectiousness vs time since onset of symptoms (TOST) Let $\Delta=T^e - T^s$ be the time between when Alice got exposed to Bob and when Bob first showed symptoms. Let $f_{\rm{inf}}(\Delta)$ be the infectiousness. Gaussian approximation We use the Gaussian approximation from the following paper. Risk scoring in contact tracing apps, Mark Briers, Marcos Charalambides, Christophe Fraser, Chris Holmes, Radka Jersakova, James Lomax, and Tom Lovett. 26 July 2020 $$ f_{inf}(\Delta) = \exp\left( -\frac{ (\Delta - \mu)^2 }{2 \sigma^2} \right) $$ where $\mu=-0.3$ , $\sigma=2.75$ (units of days). We plot this below. End of explanation """ def skew_logistic_scaled(x, alpha, mu, sigma): return scipy.stats.genlogistic.pdf(x, alpha, loc=mu, scale=sigma) def ptost_conditional(ts, incubation): mu = -4 sigma = 1.85 alpha = 5.85 tau = 5.42 fpos = skew_logistic_scaled(ts, alpha, mu, sigma) #fneg = skew_logistic_scaled(ts, alpha, mu, sigma*incubation/tau) # error in paper fneg = skew_logistic_scaled(ts*tau/incubation, alpha, mu, sigma) ps = fpos neg = np.where(ts < 0) ps[neg] = fneg[neg] ps = ps/np.max(ps) return ps def incubation_dist(t): mu = 1.621 sig = 0.418 rv = scipy.stats.lognorm(sig, scale=np.exp(mu)) return rv.pdf(t) def ptost_uncond(tost_times): #p(t) = sum_{k=1}^14 p(incubation=k) ptost(t | k) / max_t( ptost(t|k) ) incub_times = np.arange(1, 14, 1) incub_probs = incubation_dist(incub_times) tost_probs = np.zeros_like(tost_times, dtype=float) for k, incub in enumerate(incub_times): ps = ptost_conditional(tost_times, incub) tost_probs += incub_probs[k] * ps #tost_probs = tost_probs/np.max(tost_probs) return tost_probs infectiousness_curve_times = np.arange(-14, 14+1, 0.1) infectiousness_curve_vals = ptost_uncond(infectiousness_curve_times) def infectiousness_skew_logistic(delta): return np.interp(delta, infectiousness_curve_times, infectiousness_curve_vals) print(infectiousness_skew_logistic(5)) print(infectiousness_skew_logistic(np.array([5]))) tost = np.arange(-10, 10, 0.1) incubs = np.array([3, 5.5, 9]) #https://matplotlib.org/3.1.1/tutorials/colors/colors.html colors = ['tab:blue', 'tab:purple', 'tab:red'] plt.figure() for i, incub in enumerate(incubs): ps = ptost_conditional(tost, incub) #ps = ps/np.max(ps) name = 'incubation = {:0.2f}'.format(incub) plt.plot(tost, ps, label=name, color=colors[i]) ps = ptost_uncond(tost) ps = [infectiousness_skew_logistic(t) for t in tost] qs = infectiousness_skew_logistic(tost) assert np.allclose(ps, qs) plt.plot(tost, ps, label='avg', color='k') plt.legend() plt.xlabel('days since onset of symptoms') plt.ylabel('prob(transmission)') """ Explanation: Skew-logistic distribution In Ferretti et al 2020, they note that the infectiousness profile varies depending on the incubation period. We model this as shown below. End of explanation """ def dose_curve_quadratic(d, Dmin=1): Dmin = 1 m = np.power(Dmin,2)/np.power(d, 2) return np.minimum(1, m) d = np.linspace(0, 5, 100) p = dose_curve_quadratic(d) plt.figure() plt.plot(d, p) plt.xlabel('distance (meters)'); plt.ylabel('dose') """ Explanation: Dose vs distance Briers (2020) propose the following quadratic model $$ g(d) = \min(1, D^2_{\min}/d^2) $$ They set $D^2_{\min}=1$ based on argument of the physics of droplet spread. End of explanation """ # from scipy.interpolate import splev, splrep # def dose_curve_spline_fit(): # url = "https://raw.githubusercontent.com/probml/covid19/master/WilsonMasel/stelios-dose-data-scaled.csv" # df = pd.read_csv(url) # distances = df['distance'].to_numpy() # doses = df['dose'].to_numpy() # ndx1 = (distances <= 1) # ndx2 = (distances > 1) # x = distances[ndx1] # y = doses[ndx1] # spline1 = splrep(x, y) # (t, c, k), contains knots, coefficients, degree # x = distances[ndx2] # y = doses[ndx2] # spline2 = splrep(x, y) # return spline1, spline2 # def dose_curve_spline(x, spline1, spline2): # if np.isscalar(x): # x = np.array([x]) # scalar = True # else: # scalar = False # n = len(x) # ndx = np.where(x <= 1) # y1 = splev(x, spline1) # y2 = splev(x, spline2) # y = np.zeros(n) # y[x <= 1] = y1[x <= 1] # y[x > 1] = y2[x > 1] # if scalar: # y = y[0] # return y # spline1, spline2 = dose_curve_spline_fit() # d = np.linspace(0, 5, 100) # p = dose_curve_spline(d, spline1, spline2) # plt.figure() # plt.plot(d, p) # plt.xlabel('distance (meters)'); # plt.ylabel('dose') # plt.yscale('log') """ Explanation: Wilson (2020) use a physical simulator of droplet spread. We fit a cubic spline to their Monte Carlo simulation. Results are shown below. End of explanation """ # Lovett paper https://arxiv.org/abs/2007.05057 # Lognormal noise model # mu = slope * log(distance) + intercept' # log(-rssi) ~ N(mu, sigma) # E log(-R) = slope*log(D) + inter # D = exp( (log(-R) - inter) / slope) # attenuation = tx - rx - rssi def atten_to_dist(atten, params): rssi = params.tx - (atten + params.correction) return np.exp((np.log(-rssi) - params.intercept)/params.slope) def dist_to_atten(distance, params): mu = params.intercept + params.slope * np.log(distance) rssi = -np.exp(mu) atten = params.tx - (rssi + params.correction) return atten def dist_to_atten_sample_lognormal(distances, params): if params.sigma == 0: return dist_to_atten(distances, params) N = len(distances) mus = params.intercept + params.slope * np.log(distances) rssi = -scipy.stats.lognorm(s=params.sigma, scale=np.exp(mus)).rvs() atten = params.tx - (rssi + params.correction) return atten # We use regression parameters from Fig 4 of # Lovett paper https://arxiv.org/abs/2007.05057 # estimated from H0H1 data @dataclass class BleParams: slope: float = 0.21 intercept: float = 3.92 sigma: float = np.sqrt(0.33) tx: float = 0.0 correction: float=2.398 name: str = 'briers-lognormal' ble_params = BleParams() attens = np.arange(40, 90) distances = atten_to_dist(attens, ble_params) fig, axs = plt.subplots(1,1) axs = np.reshape(axs, (1,)) ax = axs[0] ax.plot(attens, distances) ax.set_xlabel('attenuation (dB)') ax.set_ylabel('distance (m)') np.sqrt(0.33) """ Explanation: Bluetooth simulator End of explanation """ ble_params_mle = BleParams(sigma=np.sqrt(0.33), name = 'briers-mle') ble_params_low_noise = BleParams(sigma=0.01, name = 'briers-low-noise') ble_params_no_noise = BleParams(sigma=0, name = 'briers-no-noise') ble_params_list = [ble_params_no_noise, ble_params_low_noise, ble_params_mle] distances = [] nrep = 100 for d in range(1, 10+1): for i in range(nrep): distances.append(d) distances = np.array(distances) fig, axs = plt.subplots(1,3, figsize=(15,5)) axs = np.reshape(axs, (3,)) for i, params in enumerate(ble_params_list): mu = dist_to_atten(distances, params) np.random.seed(0) attens = dist_to_atten_sample_lognormal(distances, params) ax = axs[i] ax.plot(mu, '-', linewidth=3) ax.plot(attens, '.') ax.set_ylabel('attenuation (dB)') ax.set_xlabel('sample') ax.set_title(params.name) #fname = '../Figures/bluetoothSamples_{}.png'.format(params.name) #plt.savefig(fname) """ Explanation: Noisy simulation End of explanation """ @dataclass class Exposure: duration: float = np.nan distance: float = np.nan atten: float = np.nan days_exp: int = np.nan # days since exposure days_sym: int = np.nan # days since symptom onset @dataclass class ModelParams: ble_params: BleParams = ble_params distance_fun: str = 'quadratic' # quadratic or spline Dmin: float = 1 infectiousness_fun: str = 'skew-logistic' # gaussian or skew-logistic beta: float = 1e-3 params = ModelParams() def compute_dose(expo, params): if not np.isnan(expo.atten): dist = atten_to_dist(expo.atten, params.ble_params) else: dist = expo.distance if params.distance_fun == 'quadratic': fd = dose_curve_quadratic(dist, params.Dmin) elif params.distance_fun == 'spline': fd = dose_curve_spline(dist) else: fd = 1 if not np.isnan(expo.days_sym): if params.infectiousness_fun == 'gaussian': finf = infectiousness_gaussian(expo.days_sym) elif params.infectiousness_fun == 'skew-logistic': finf = infectiousness_skew_logistic(expo.days_sym) else: finf = 1 else: finf = 1 dose = expo.duration * fd * finf return dose def prob_infection(expo, params): dose = compute_dose(expo, params) return 1-np.exp(-params.beta * dose) def prob_infections(exposures, params): dose = 0 for expo in exposures: dose += compute_dose(expo, params) return 1-np.exp(-params.beta * dose) def prob_infection_batch(attenuations, durations, symptom_days, params, distances=None): if distances is None: distances = atten_to_dist(attenuations, params.ble_params) if params.distance_fun == 'quadratic': fd = dose_curve_quadratic(distances) elif params.distance_fun == 'spline': fd = dose_curve_spline(distances) if params.infectiousness_fun == 'gaussian': finf = infectiousness_gaussian(symptom_days) elif params.infectiousness_fun == 'skew-logistic': finf = infectiousness_skew_logistic(symptom_days) doses = durations * fd * finf return 1-np.exp(-params.beta * doses) distances = np.array([0.8, 0.9, 1.0, 1.1, 1.2, 1.5, 2.0]) dur = 8*60 ps = [] for dist in distances: expo = Exposure(distance=dist, duration=dur, days_sym = 0) p = prob_infection(expo, params) ps.append(p) qs = prob_infection_batch(None, dur, 0, params, distances) assert np.allclose(ps, qs) print(ps) """ Explanation: Probability of getting infected This depends on 3 factors: how long was the exposure, how far, and how infectious was the transmitter. End of explanation """ # https://enconfig.storage.googleapis.com/enconfig_fixed.html @dataclass class RiskConfig: ble_thresholds: np.array = np.array([]) ble_weights: np.array = np.array([]) inf_levels: np.array = np.array([]) inf_weights: np.array = np.array([]) name: str = '' beta: float = 3.1 * 1e-6 # Wilson table 1 """ Explanation: Risk score End of explanation """ config_swiss = RiskConfig(ble_thresholds = np.array([53, 60]), ble_weights = np.array([1.0, 0.5, 0.0]), name= 'Switzerland') config_germany = RiskConfig(ble_thresholds = np.array([55, 63]), ble_weights = np.array([1.0, 0.5, 0.0]), name= 'Germany') config_ireland = RiskConfig(ble_thresholds = np.array([56, 62]), ble_weights = np.array([1.0, 1.0, 0.0]), name= 'Ireland') config_wilson = RiskConfig(ble_thresholds = np.array([50, 70]), ble_weights = np.array([2.39, 0.6, 0.06]), name= 'Arizona') config_list = [config_swiss, config_germany, config_ireland, config_wilson] attens = np.arange(40, 80) distances = atten_to_dist(attens, ble_params) fig, axs = plt.subplots(1,1, figsize=(8,8)) axs = np.reshape(axs, (1,)) ax = axs[0] ax.plot(attens, distances) ax.set_xlabel('attenuation (dB)') ax.set_ylabel('distance (m)') names = [config.name for config in config_list] colors = ['r', 'g', 'b', 'k'] handles = [] for i, config in enumerate(config_list): for j, thresh in enumerate(config.ble_thresholds): dist = atten_to_dist(thresh, ble_params) handle = ax.vlines(thresh, 0, dist, color=colors[i]) ax.hlines(dist, np.min(attens), thresh, color=colors[i]) if j==0: handles.append(handle) ax.legend(handles, names) plt.show() def attenuation_score(atten, thresholds, weights): bin = np.digitize(atten, thresholds) watten = weights[bin] return watten def attenuation_score_batch(attenuations, thresholds, weights): attenuations = np.atleast_1d(attenuations) labels = np.digitize(attenuations, thresholds) vecs = jax.nn.one_hot(labels, num_classes = len(weights)) tmp = jnp.multiply(weights, vecs) scores = jnp.sum(tmp, 1) return scores thresholds = np.array([50, 70]) weights = np.array([2.39, 0.6, 0.06]) attens = np.array([40, 52, 66, 99]) buckets = np.digitize(attens, thresholds) print(buckets) ps = [attenuation_score(a, thresholds, weights) for a in attens] qs = attenuation_score_batch(attens, thresholds, weights) assert np.allclose(ps, qs) rs = np.array([attenuation_score_batch(a, thresholds, weights) for a in attens]).flatten() assert np.allclose(ps, rs) """ Explanation: Attenuation End of explanation """ def make_infectiousness_params_v1(): inf_pre = np.zeros((9), dtype=int) inf_post = np.zeros((5), dtype=int) inf_mid = np.array([1, 3, 4, 5, 6, 6, 6, 6, 5, 4, 3, 2, 2, 1, 1]) inf_levels = np.concatenate((inf_pre, inf_mid, inf_post)) inf_weights = np.array([0, 10**1, 10**1.2, 10**1.4, 10**1.6, 10**1.8, 10**2]) return inf_levels, inf_weights def make_infectiousness_params_v2(): inf_pre = np.zeros((9), dtype=int) inf_post = np.zeros((5), dtype=int) inf_mid6 = np.array([1, 3, 4, 5, 6, 6, 6, 6, 5, 4, 3, 2, 2, 1, 1]) inf_mid = np.ones_like(inf_mid6) ndx = (inf_mid6 >= 5) inf_mid[ndx] = 2 #inf_mid = np.array([1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1]) inf_levels = np.concatenate((inf_pre, inf_mid, inf_post)) inf_weights = np.array([0, 10**1.6, 10**2]) return inf_levels, inf_weights def infectiousness_score(days_since_symptoms, inf_levels, inf_weights): # days_since_symptoms can be -14..14 i = days_since_symptoms+14 level = inf_levels[i] return inf_weights[level] def infectiousness_score_batch(symptom_days, inf_levels, inf_weights): # symptom_days is an array of ints in -14..14 symptom_days = np.atleast_1d(symptom_days) inf_labels = inf_levels[symptom_days + 14] inf_vecs = jax.nn.one_hot(inf_labels, num_classes = len(inf_weights)) tmp = jnp.multiply(inf_weights, inf_vecs) scores = jnp.sum(tmp, 1) return scores # debugging ts = np.arange(0, 5, 1) levels, weights = make_infectiousness_params_v1() ps = [infectiousness_score(t, levels, weights) for t in ts] qs = infectiousness_score_batch(ts, levels, weights) assert np.allclose(ps, qs) qs = [infectiousness_score_batch(t, levels, weights) for t in ts] qs2 = np.array(qs).flatten() print(ps) print(qs) print(qs2) assert np.allclose(ps, qs2) ts = np.arange(-14, 14+1, 1) levels, weights = make_infectiousness_params_v1() ps = infectiousness_score_batch(ts, levels, weights) import matplotlib.cm as cm cmap = cm.get_cmap('jet') colors = [cmap(c/7) for c in levels] plt.figure() plt.bar(ts, ps, color = colors) plt.xlabel('days since symptom onset'); plt.ylabel('transmission risk'); levels, weights = make_infectiousness_params_v2() ps = infectiousness_score_batch(ts, levels, weights) palette =['black', 'green', 'red'] colors = [palette[c] for c in levels] plt.figure() plt.bar(ts, ps, color = colors) plt.xlabel('days since symptom onset'); plt.ylabel('transmission risk'); """ Explanation: Infectiousness levels Here is a figure from Wilson et al, "Quantifying SARS-CoV-2 infection risk within the Google/Apple exposure notification framework to inform quarantine recommendations". Colors are the 6 transmission levels supported by GAEN v1.1. <img src="https://github.com/probml/covid19/blob/master/Figures/infectiousness-TOST-MaselFig5A.png?raw=true"> End of explanation """ def risk_score(expo, config): winf = infectiousness_score(expo.days_sym, config.inf_levels, config.inf_weights) watten = attenuation_score(expo.atten, config.ble_thresholds, config.ble_weights) return expo.duration * watten * winf def prob_risk_score(expo, config): r = risk_score(expo, config) return 1-np.exp(-config.beta * r) # Same interface as # prob_infection_batch(attenuations, durations, symptom_days, params) def prob_risk_score_batch(attenuations, durations, symptom_days, config): attenuations = np.atleast_1d(attenuations) durations = np.atleast_1d(durations) symptom_days = np.atleast_1d(symptom_days) winf = infectiousness_score_batch(symptom_days, config.inf_levels, config.inf_weights) watten = attenuation_score_batch(attenuations, config.ble_thresholds, config.ble_weights) risks = durations * watten * winf return 1-np.exp(-config.beta * risks) """ Explanation: Probabilistic risk End of explanation """ levels, weights = make_infectiousness_params_v1() config_wilsonv1 = RiskConfig(ble_thresholds = np.array([50, 70]), ble_weights = np.array([2.39, 0.6, 0.06]), inf_weights = weights, inf_levels = levels, name = 'thresh2_inf6') levels, weights = make_infectiousness_params_v2() config_wilsonv2 = RiskConfig(ble_thresholds = np.array([50, 70]), ble_weights = np.array([2.39, 0.6, 0.06]), inf_weights = weights, inf_levels = levels, name = 'thresh2_inf2') levels, weights = make_infectiousness_params_v1() config_wilsonv3 = RiskConfig(ble_thresholds = np.array([50, 60, 70]), # made up ble_weights = np.array([2.39, 0.6, 0.06]), # made up inf_weights = weights, inf_levels = levels, name = 'thresh3_inf6') levels, weights = make_infectiousness_params_v2() config_wilsonv4 = RiskConfig(ble_thresholds = np.array([50, 60, 70]), # made up ble_weights = np.array([2.39, 0.6, 0.2, 0.06]), # made up inf_weights = weights, inf_levels = levels, name = 'thresh3_inf2') attens = 50 # np.linspace(40, 80, 3, endpoint=True) symptoms = np.arange(0,5) durations = 80 ps = prob_infection_batch(attens, durations, symptoms, params) print(ps) qs = prob_risk_score_batch(attens, durations, symptoms, config_wilsonv1) print(qs) """ Explanation: Risk score plots End of explanation """ def plot_risk_vs_symptoms_and_durations(attens, durations, symptoms, config, params): ndur = len(durations) if ndur==4: fig, axs = plt.subplots(2,2, figsize=(15,15), sharex=True, sharey=True) axs = np.reshape(axs, (4,)) elif ndur==2: fig, axs = plt.subplots(1,2, figsize=(15,10), sharex=True, sharey=True) axs = np.reshape(axs, (2,)) elif ndur==1: fig, axs = plt.subplots(1,1) axs = np.reshape(axs, (1,)) else: print('unknown figure layout') return cmap = cm.get_cmap('plasma') nattens = len(attens) colors = [cmap(c/nattens) for c in range(nattens)] for i, dur in enumerate(durations): ax = axs[i] labels = [] handles = [] for j, atten in enumerate(attens): ps = prob_infection_batch(atten, dur, symptoms, params) qs = prob_risk_score_batch(atten, dur, symptoms, config) label = 'atten={}'.format(atten) labels.append(label) h = ax.plot(symptoms, ps, '-', color=colors[j], label=label) handles.append(h) ax.plot(symptoms, qs, ':', linewidth=3, color=colors[j]) ax.set_yscale('log') ax.set_title('config = {}, dur = {}, atten = {} to {}'.format( config.name, dur, np.min(attens), np.max(attens))) #ax.legend(handles, labels) ax.set_xlabel('days since symptom onset') ax.set_ylabel('prob. infection') def plot_risk_vs_symptoms(attens, dur, symptoms, config, params, ax): cmap = cm.get_cmap('plasma') nattens = len(attens) colors = [cmap(c/nattens) for c in range(nattens)] for j, atten in enumerate(attens): ps = prob_infection_batch(atten, dur, symptoms, params) qs = prob_risk_score_batch(atten, dur, symptoms, config) label = 'atten={}'.format(atten) h = ax.plot(symptoms, ps, '-', color=colors[j], label=label) ax.plot(symptoms, qs, ':', linewidth=3, color=colors[j]) ax.set_yscale('log') ax.set_title('{}, dur={}, A={}:{}'.format( config.name, dur, np.min(attens), np.max(attens))) ax.set_xlabel('days since symptom onset') ax.set_ylabel('prob. infection') attens = np.linspace(40, 80, 10, endpoint=True) symptoms = np.arange(-12,12) #durations = np.array([15,1*60,4*60,8*60]) duration = 15 config_list = [config_wilsonv1, config_wilsonv2, config_wilsonv3, config_wilsonv4] fig, axs = plt.subplots(2,2, figsize=(12,12), sharex=True, sharey=True) axs = np.reshape(axs, (4,)) for i, config in enumerate(config_list): plot_risk_vs_symptoms(attens, duration, symptoms, config, params, axs[i]) """ Explanation: True risk curve vs approximation End of explanation """ # compute min acceptible probability of infection atten = dist_to_atten(2, ble_params) expo = Exposure(atten=atten, duration=15, days_sym = 0) pthresh = prob_infection(expo, params) print(pthresh) expo = Exposure(atten=atten, duration=15, days_sym = 5) pthresh = prob_infection(expo, params) print(pthresh) def make_curves_batch(attens, durations, symptoms, config, params): vals = itertools.product(durations, attens, symptoms) X = np.vstack([np.array(v) for v in vals]) durations_grid = X[:,0] attens_grid = X[:,1] sym_grid = np.array(X[:,2], dtype=int) ps = prob_infection_batch(attens_grid, durations_grid, sym_grid, params) qs = prob_risk_score_batch(attens_grid, durations_grid, sym_grid, config) return ps, qs def make_curves_batch_noise(attens, durations, symptoms, config, params, ble_params): distances = atten_to_dist(attens, ble_params) attens = dist_to_atten_sample_lognormal(distances, ble_params) vals = itertools.product(durations, attens, symptoms) X = np.vstack([np.array(v) for v in vals]) durations_grid = X[:,0] attens_grid = X[:,1] sym_grid = np.array(X[:,2], dtype=int) ps = prob_infection_batch(attens_grid, durations_grid, sym_grid, params) qs = prob_risk_score_batch(attens_grid, durations_grid, sym_grid, config) return ps, qs import itertools attens = np.linspace(40, 80, 10, endpoint=True) symptoms = np.arange(-5, 10) # must be int durations = np.linspace(5, 1*60, 10, endpoint=True) config_list = [config_wilsonv1, config_wilsonv2, config_wilsonv3, config_wilsonv4] #config_list = [config_wilsonv1] fig, axs = plt.subplots(2,2, figsize=(8,8), sharex=True, sharey=True) axs = np.reshape(axs, (4,)) for i, config in enumerate(config_list): ps, qs = make_curves_batch(attens, durations, symptoms, config, params) yhat = (ps > pthresh) fpr, tpr, thresholds = metrics.roc_curve(yhat, qs) auc = metrics.auc(fpr, tpr) frac_pos = np.sum(yhat)/len(yhat) #print(frac_pos) ax = axs[i] ax.plot(fpr, tpr) ax.set_title('AUC={:0.2f}, config={}'.format(auc, config.name)) ax.set_xlabel('FPR') ax.set_ylabel('TPR') print(ble_params) """ Explanation: ROC plots End of explanation """ import itertools n = 10 attens = np.linspace(40, 80, 10, endpoint=True) symptoms = np.arange(-5, 10) # must be int durations = np.linspace(5, 1*60, 10, endpoint=True) config_list = [config_wilsonv1, config_wilsonv2, config_wilsonv3, config_wilsonv4] ble_params_roc = ble_params_mle fig, axs = plt.subplots(2,2, figsize=(8,8), sharex=True, sharey=True) axs = np.reshape(axs, (4,)) for i, config in enumerate(config_list): tprs = [] aucs = [] median_fpr = np.linspace(0, 1, 100) np.random.seed(1041) for j in range(n): ps, qs = make_curves_batch_noise(attens, durations, symptoms, config, params, ble_params_roc) yhat = (ps > pthresh) fpr, tpr, threshold = metrics.roc_curve(yhat, qs) auc = metrics.auc(fpr, tpr) frac_pos = np.sum(yhat)/len(yhat) interp_tpr = np.interp(median_fpr, fpr, tpr) interp_tpr[0] = 0.0 tprs.append(interp_tpr) aucs.append(auc) if j % 10 == 0: ax = axs[i] ax.plot(fpr, tpr, color='blue', lw=1, alpha=0.1) ax.set_xlabel('FPR') ax.set_ylabel('TPR') ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='grey', alpha=0.8) median_tpr = np.median(tprs, axis=0) median_tpr[-1] = 1.0 median_auc = metrics.auc(median_fpr, median_tpr) auc_lo = np.quantile(aucs, 0.025, axis=0) auc_hi = np.quantile(aucs, 0.975, axis=0) std_auc = np.std(aucs) ax.plot(median_fpr, median_tpr, color='red', label=r'Median ROC (AUC = %0.2f $\pm$ %0.2f)' % (median_auc, std_auc), lw=2, alpha=0.8) ax.set_title('AUC={:0.2f}-{:0.2f}, config={}'.format(auc_lo, auc_hi, config.name)) tprs_hi = np.quantile(tprs, 0.025, axis=0) tprs_lo = np.quantile(tprs, 0.975, axis=0) ax.fill_between(median_fpr, tprs_lo, tprs_hi, color='grey', alpha=0.4, label=r'$\pm$ 1 std. dev.') # ax.legend(loc="lower right") """ Explanation: ROC with noise End of explanation """ #@title # Simulation configuration #@markdown Make selections and then **click the Play button on the left top #@markdown corner**. #@markdown --- #@markdown ## Attenuation #@markdown ### Functional form #@markdown Default = quadratic - not implemented yet (need to simulate distances). distance_fun = 'spline' #@param ['quadratic', 'spline'] {type:"string"} #@markdown ### Quadratic parameters #@markdown Any value is for D_min is possible, but for any value outwith [0, 5] #@markdown this is a straight line (default = 1). Dmin = 1 #@param {type:"slider", min:0.5, max:5, step:0.1} def prob_infection_batch(attenuations, durations, symptom_days, params, Dmin, distances=None): if distances is None: distances = atten_to_dist(attenuations, params.ble_params) if params.distance_fun == 'quadratic': fd = dose_curve_quadratic(distances, Dmin) elif params.distance_fun == 'spline': fd = dose_curve_spline(distances) if params.infectiousness_fun == 'gaussian': finf = infectiousness_gaussian(symptom_days) elif params.infectiousness_fun == 'skew-logistic': finf = infectiousness_skew_logistic(symptom_days) doses = durations * fd * finf return 1-np.exp(-params.beta * doses) def make_curves_batch(attens, durations, symptoms, config, params, Dmin): vals = itertools.product(durations, attens, symptoms) X = np.vstack([np.array(v) for v in vals]) durations_grid = X[:,0] attens_grid = X[:,1] sym_grid = np.array(X[:,2], dtype=int) ps = prob_infection_batch(attens_grid, durations_grid, sym_grid, params, Dmin) qs = prob_risk_score_batch(attens_grid, durations_grid, sym_grid, config) return ps, qs #@markdown ### Noise in converson of attenuation to distance #@markdown Default = 0.01 sigma = 0.01 #@param {type:"slider", min:0, max:0.05, step:0.001} ble_params = BleParams(sigma = sigma) #@markdown --- #@markdown ## Infectiousness #@markdown ### Functional form #@markdown Default = skew-logistic infectiousness_fun = 'skew-logistic' #@param ['skew-logistic', 'gaussian'] {type:"string"} params = ModelParams(distance_fun = 'quadratic', infectiousness_fun = infectiousness_fun) #@title # Express Notification configuration #@markdown Make selections and then **click the Play button on the left top #@markdown corner**. #@markdown --- #@markdown ## Attenuation weight (%) immediate = 129 #@param {type:"slider", min:0, max:200, step:1} near = 80 #@param {type:"slider", min:0, max:200, step:1} medium = 27 #@param {type:"slider", min:0, max:200, step:1} other = 0 #@param {type:"slider", min:0, max:200, step:1} atten_weights = [immediate, near, medium, other] #@markdown --- #@markdown ## Attenuation thresholds (dB) #@markdown Please make sure that each successive threshold is equal to or less #@markdown than the previous. immediate_near = 60 #@param {type:"slider", min:0, max:255, step:1} near_medium = 70 #@param {type:"slider", min:0, max:255, step:1} medium_far = 80 #@param {type:"slider", min:0, max:255, step:1} atten_thresholds = [immediate_near, near_medium, medium_far] #@markdown --- #@markdown ## Infectiousness weight (%) standard = 44 #@param {type:"slider", min:0, max:250, step:1} high= 75 #@param {type:"slider", min:0, max:250, step:1} inf_weights = [0, standard, high] #@markdown --- #@markdown ## Symptom onset (days since onset) minus_14 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_13 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_12 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_11 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_10 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_09 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_08 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_07 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_06 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_05 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_04 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_03 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_02 = 'High' #@param ['Drop', 'Standard', 'High'] {type:"string"} minus_01 = 'High' #@param ['Drop', 'Standard', 'High'] {type:"string"} day_zero = 'High' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_01 = 'High' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_02 = 'High' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_03 = 'High' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_04 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_05 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_06 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_07 = 'Standard' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_08 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_09 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_10 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_11 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_12 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_13 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_14 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} plus_15 = 'Drop' #@param ['Drop', 'Standard', 'High'] {type:"string"} symptom_onset = [minus_14, minus_13, minus_12, minus_11, minus_10, minus_09, minus_08, minus_07, minus_06, minus_05, minus_04, minus_03, minus_02, minus_01, day_zero, plus_01, plus_02, plus_03, plus_04, plus_05, plus_06, plus_07, plus_08, plus_09, plus_10, plus_11, plus_12, plus_13, plus_14] import ipywidgets as widgets from ipywidgets import interact, interactive, fixed, interact_manual from IPython.display import display, clear_output def infectiousness_levels(): levels = [] for index, level in enumerate(symptom_onset): if level == 'Drop': levels.append(0) elif level == 'Standard': levels.append(1) else: levels.append(2) return levels # https://enconfig.storage.googleapis.com/enconfig_fixed.html levels = [0] * 10 + [1, 0, 2, 1] + [2] * 4 + [1] * 6 + [0, 1, 0, 0, 1, 0] config_default = RiskConfig(ble_thresholds = np.array([30, 50, 60]), ble_weights = np.array([150, 100, 50, 0]), inf_weights = np.array([0, 100, 100]), inf_levels = np.array(levels), name = 'Default') config_custom = RiskConfig(ble_thresholds = np.array(atten_thresholds), ble_weights = np.array(atten_weights), inf_weights = np.array(inf_weights), inf_levels = np.array(infectiousness_levels()), name = 'Custom') levels, weights = make_infectiousness_params_v2() config_swiss = RiskConfig(ble_thresholds = np.array([53, 60]), ble_weights = np.array([1.0, 0.5, 0.0]), inf_weights = weights, inf_levels = levels, name= 'Switzerland') config_germany = RiskConfig(ble_thresholds = np.array([55, 63]), ble_weights = np.array([1.0, 0.5, 0.0]), inf_weights = weights, inf_levels = levels, name= 'Germany') config_ireland = RiskConfig(ble_thresholds = np.array([56, 62]), ble_weights = np.array([1.0, 1.0, 0.0]), inf_weights = weights, inf_levels = levels, name= 'Ireland') config_arizona = RiskConfig(ble_thresholds = np.array([50, 70]), ble_weights = np.array([2.39, 0.6, 0.06]), inf_weights = weights, inf_levels = levels, name = 'Arizona') ble_thresholds = {} ble_thresholds['swiss'] = np.array([53, 60]) ble_thresholds['germany'] = np.array([55, 63]) ble_thresholds['ireland'] = np.array([56, 62]) ble_thresholds['arizona'] = np.array([50, 70]) threshold_mean = np.mean(np.array([ble_thresholds[k] for k in ble_thresholds]), axis=0) ble_weights = {} ble_weights['swiss'] = np.array([1.0, 0.5, 0.0]) ble_weights['germany'] = np.array([1.0, 0.5, 0.0]) ble_weights['ireland'] = np.array([1.0, 1.0, 0.0]) ble_weights['arizona'] = np.array([2.39, 0.6, 0.06]) weight_mean = np.mean(np.array([ble_weights[k] for k in ble_weights]), axis=0) config_mean = RiskConfig(ble_thresholds = threshold_mean, ble_weights = weight_mean, inf_weights = weights, inf_levels = levels, name= 'Mean') attens = np.linspace(40, 80, 10, endpoint=True) symptoms = np.arange(-5, 10) # must be int durations = np.linspace(5, 1*60, 10, endpoint=True) config_list = [config_swiss, config_germany, config_ireland, config_arizona, config_mean, config_default, config_custom] # Plot fig, axs = plt.subplots(4,4, figsize=(24,14), sharex=True, sharey=True) for i, config in enumerate(config_list): ps, qs = make_curves_batch(attens, durations, symptoms, config, params, Dmin) yhat = (ps > pthresh) # ROC fpr, tpr, thresholds = metrics.roc_curve(yhat, qs) auc = metrics.auc(fpr, tpr) ax = axs[int(i / 2),2 if i % 2 else 0] fpr_array = np.array(fpr) idx = [np.abs(fpr_array - 0.1).argmin()] ax.plot(fpr, tpr, '-go' if config.name=='Custom' else '-bo', markevery=idx) ax.text(fpr[idx] + 0.03, tpr[idx] - 0.07, ['risk = %.3f' % thresholds[i] for i in idx][0]) ax.set_title('AUC={:0.2f}, config={}'.format(auc, config.name)) ax.set_xlabel('FPR') ax.set_ylabel('TPR') # PR precision, recall, thresholds = metrics.precision_recall_curve(yhat, qs) auc = metrics.auc(recall, precision) frac_pos = np.sum(yhat)/len(yhat) ax = axs[int(i / 2),3 if i % 2 else 1] ax.plot(recall, precision, color='orange' if config.name=='Custom' else 'red') ax.hlines(frac_pos, 0, 1, linestyles='dashed') ax.text(0, frac_pos + 0.03, 'prevalence = %.2f' % frac_pos) ax.set_title('AUC={:0.2f}, config={}'.format(auc, config.name)) ax.set_xlabel('Recall') ax.set_ylabel('Precision') """ Explanation: Interactive End of explanation """
lukemans/Hello-world
t81_558_class10_lstm.ipynb
apache-2.0
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import pandas as pd # Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue) def encode_text_dummy(df,name): dummies = pd.get_dummies(df[name]) for x in dummies.columns: dummy_name = "{}-{}".format(name,x) df[dummy_name] = dummies[x] df.drop(name, axis=1, inplace=True) # Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue). def encode_text_index(df,name): le = preprocessing.LabelEncoder() df[name] = le.fit_transform(df[name]) return le.classes_ # Encode a numeric column as zscores def encode_numeric_zscore(df,name,mean=None,sd=None): if mean is None: mean = df[name].mean() if sd is None: sd = df[name].std() df[name] = (df[name]-mean)/sd # Convert all missing values in the specified column to the median def missing_median(df, name): med = df[name].median() df[name] = df[name].fillna(med) # Convert a Pandas dataframe to the x,y inputs that TensorFlow needs def to_xy(df,target): result = [] for x in df.columns: if x != target: result.append(x) # find out the type of the target column. Is it really this hard? :( target_type = df[target].dtypes target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type print(target_type) # Encode to int for classification, float otherwise. TensorFlow likes 32 bits. if target_type in (np.int64, np.int32): # Classification return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.int32) else: # Regression return df.as_matrix(result).astype(np.float32),df.as_matrix([target]).astype(np.float32) # Nicely formatted time string def hms_string(sec_elapsed): h = int(sec_elapsed / (60 * 60)) m = int((sec_elapsed % (60 * 60)) / 60) s = sec_elapsed % 60 return "{}:{:>02}:{:>05.2f}".format(h, m, s) # Regression chart, we will see more of this chart in the next class. def chart_regression(pred,y): t = pd.DataFrame({'pred' : pred.flatten(), 'y' : y_test.flatten()}) t.sort_values(by=['y'],inplace=True) a = plt.plot(t['y'].tolist(),label='expected') b = plt.plot(t['pred'].tolist(),label='prediction') plt.ylabel('output') plt.legend() plt.show() """ Explanation: T81-558: Applications of Deep Neural Networks Class 10: Recurrent and LSTM Networks * Instructor: Jeff Heaton, School of Engineering and Applied Science, Washington University in St. Louis * For more information visit the class website. Common Functions Some of the common functions from previous classes that we will use again. End of explanation """ # x = [ [32], [41], [39], [20], [15] ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) """ Explanation: Data Structure for Recurrent Neural Networks Previously we trained neural networks with input ($x$) and expected output ($y$). $X$ was a matrix, the rows were training examples and the columns were values to be predicted. The definition of $x$ will be expanded and y will stay the same. Dimensions of training set ($x$): * Axis 1: Training set elements (sequences) (must be of the same size as $y$ size) * Axis 2: Members of sequence * Axis 3: Features in data (like input neurons) Previously, we might take as input a single stock price, to predict if we should buy (1), sell (-1), or hold (0). End of explanation """ from IPython.display import display, HTML import pandas as pd import numpy as np x = np.array(x) print(x[:,0]) df = pd.DataFrame({'x':x[:,0], 'y':y}) display(df) """ Explanation: This is essentially building a CSV file from scratch, to see it as a data frame, use the following: End of explanation """ x = [ [32,1383], [41,2928], [39,8823], [20,1252], [15,1532] ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) Again, very similar to what we did before. The following shows this as a data frame. from IPython.display import display, HTML import pandas as pd import numpy as np x = np.array(x) print(x[:,0]) df = pd.DataFrame({'price':x[:,0], 'volume':x[:,1], 'y':y}) display(df) """ Explanation: You might want to put volume in with the stock price. End of explanation """ x = [ [[32,1383],[41,2928],[39,8823],[20,1252],[15,1532]], [[35,8272],[32,1383],[41,2928],[39,8823],[20,1252]], [[37,2738],[35,8272],[32,1383],[41,2928],[39,8823]], [[34,2845],[37,2738],[35,8272],[32,1383],[41,2928]], [[32,2345],[34,2845],[37,2738],[35,8272],[32,1383]], ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) """ Explanation: Now we get to sequence format. We want to predict something over a sequence, so the data format needs to add a dimension. A maximum sequence length must be specified, but the individual sequences can be of any length. End of explanation """ x = [ [[32],[41],[39],[20],[15]], [[35],[32],[41],[39],[20]], [[37],[35],[32],[41],[39]], [[34],[37],[35],[32],[41]], [[32],[34],[37],[35],[32]], ] y = [ 1, -1, 0, -1, 1 ] print(x) print(y) """ Explanation: Even if there is only one feature (price), the 3rd dimension must be used: End of explanation """ %matplotlib inline import matplotlib import numpy as np import matplotlib.pyplot as plt import math def sigmoid(x): a = [] for item in x: a.append(1/(1+math.exp(-item))) return a def f2(x): a = [] for item in x: a.append(math.tanh(item)) return a x = np.arange(-10., 10., 0.2) y1 = sigmoid(x) y2 = f2(x) print("Sigmoid") plt.plot(x,y1) plt.show() print("Hyperbolic Tangent(tanh)") plt.plot(x,y2) plt.show() """ Explanation: Recurrent Neural Networks So far the neural networks that we’ve examined have always had forward connections. The input layer always connects to the first hidden layer. Each hidden layer always connects to the next hidden layer. The final hidden layer always connects to the output layer. This manner to connect layers is the reason that these networks are called “feedforward.” Recurrent neural networks are not so rigid, as backward connections are also allowed. A recurrent connection links a neuron in a layer to either a previous layer or the neuron itself. Most recurrent neural network architectures maintain state in the recurrent connections. Feedforward neural networks don’t maintain any state. A recurrent neural network’s state acts as a sort of short-term memory for the neural network. Consequently, a recurrent neural network will not always produce the same output for a given input. Recurrent neural networks do not force the connections to flow only from one layer to the next, from input layer to output layer. A recurrent connection occurs when a connection is formed between a neuron and one of the following other types of neurons: The neuron itself A neuron on the same level A neuron on a previous level Recurrent connections can never target the input neurons or the bias neurons. The processing of recurrent connections can be challenging. Because the recurrent links create endless loops, the neural network must have some way to know when to stop. A neural network that entered an endless loop would not be useful. To prevent endless loops, we can calculate the recurrent connections with the following three approaches: Context neurons Calculating output over a fixed number of iterations Calculating output until neuron output stabilizes We refer to neural networks that use context neurons as a simple recurrent network (SRN). The context neuron is a special neuron type that remembers its input and provides that input as its output the next time that we calculate the network. For example, if we gave a context neuron 0.5 as input, it would output 0. Context neurons always output 0 on their first call. However, if we gave the context neuron a 0.6 as input, the output would be 0.5. We never weight the input connections to a context neuron, but we can weight the output from a context neuron just like any other connection in a network. Context neurons allow us to calculate a neural network in a single feedforward pass. Context neurons usually occur in layers. A layer of context neurons will always have the same number of context neurons as neurons in its source layer, as demonstrated here: As you can see from the above layer, two hidden neurons that are labeled hidden 1 and hidden 2 directly connect to the two context neurons. The dashed lines on these connections indicate that these are not weighted connections. These weightless connections are never dense. If these connections were dense, hidden 1 would be connected to both hidden 1 and hidden 2. However, the direct connection simply joins each hidden neuron to its corresponding context neuron. The two context neurons form dense, weighted connections to the two hidden neurons. Finally, the two hidden neurons also form dense connections to the neurons in the next layer. The two context neurons would form two connections to a single neuron in the next layer, four connections to two neurons, six connections to three neurons, and so on. You can combine context neurons with the input, hidden, and output layers of a neural network in many different ways. In the next two sections, we explore two common SRN architectures. In 1990, Elman introduced a neural network that provides pattern recognition to time series. This neural network type has one input neuron for each stream that you are using to predict. There is one output neuron for each time slice you are trying to predict. A single-hidden layer is positioned between the input and output layer. A layer of context neurons takes its input from the hidden layer output and feeds back into the same hidden layer. Consequently, the context layers always have the same number of neurons as the hidden layer, as demonstrated here: The Elman neural network is a good general-purpose architecture for simple recurrent neural networks. You can pair any reasonable number of input neurons to any number of output neurons. Using normal weighted connections, the two context neurons are fully connected with the two hidden neurons. The two context neurons receive their state from the two non-weighted connections (dashed lines) from each of the two hidden neurons. Backpropagation through time works by unfolding the SRN to become a regular neural network. To unfold the SRN, we construct a chain of neural networks equal to how far back in time we wish to go. We start with a neural network that contains the inputs for the current time, known as t. Next we replace the context with the entire neural network, up to the context neuron’s input. We continue for the desired number of time slices and replace the final context neuron with a 0. The following diagram shows an unfolded Elman neural network for two time slices. As you can see, there are inputs for both t (current time) and t-1 (one time slice in the past). The bottom neural network stops at the hidden neurons because you don’t need everything beyond the hidden neurons to calculate the context input. The bottom network structure becomes the context to the top network structure. Of course, the bottom structure would have had a context as well that connects to its hidden neurons. However, because the output neuron above does not contribute to the context, only the top network (current time) has one. Understanding LSTM Some useful resources on LSTM/recurrent neural networks. Understanding LSTM Networks Recurrent Neural Networks in TensorFlow Long Short Term Neural Network (LSTM) are a type of recurrent unit that is often used with deep neural networks. For TensorFlow, LSTM can be thought of as a layer type that can be combined with other layer types, such as dense. LSTM makes use two transfer function types internally. The first type of transfer function is the sigmoid. This transfer function type is used form gates inside of the unit. The sigmoid transfer function is given by the following equation: $$ \text{S}(t) = \frac{1}{1 + e^{-t}} $$ The second type of transfer function is the hyperbolic tangent (tanh) function. This function is used to scale the output of the LSTM, similarly to how other transfer functions have been used in this course. The graphs for these functions are shown here: End of explanation """ import numpy as np import pandas import tensorflow as tf from sklearn import metrics from tensorflow.models.rnn import rnn, rnn_cell from tensorflow.contrib import skflow SEQUENCE_SIZE = 6 HIDDEN_SIZE = 20 NUM_CLASSES = 4 def char_rnn_model(X, y): byte_list = skflow.ops.split_squeeze(1, SEQUENCE_SIZE, X) cell = rnn_cell.LSTMCell(HIDDEN_SIZE) _, encoding = rnn.rnn(cell, byte_list, dtype=tf.float32) return skflow.models.logistic_regression(encoding, y) classifier = skflow.TensorFlowEstimator(model_fn=char_rnn_model, n_classes=NUM_CLASSES, steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True) """ Explanation: Both of these two functions compress their output to a specific range. For the sigmoid function, this range is 0 to 1. For the hyperbolic tangent function, this range is -1 to 1. LSTM maintains an internal state and produces an output. The following diagram shows an LSTM unit over three time slices: the current time slice (t), as well as the previous (t-1) and next (t+1) slice: The values $\hat{y}$ are the output from the unit, the values ($x$) are the input to the unit and the values $c$ are the context values. Both the output and context values are always fed to the next time slice. The context values allow LSTM is made up of three gates: Forget Gate (f_t) - Controls if/when the context is forgotten. (MC) Input Gate (i_t) - Controls if/when a value should be remembered by the context. (M+/MS) Output Gate (o_t) - Controls if/when the remembered value is allowed to pass from the unit. (RM) Mathematically, the above diagram can be thought of as the following: These are vector values. First, calculate the forget gate value. This gate determines if the short term memory is forgotten. The value $b$ is a bias, just like the bias neurons we saw before. Except LSTM has a bias for every gate: $b_t$, $b_i$, and $b_o$. $$ f_t = S(W_f \cdot [\hat{y}_{t-1}, x_t] + b_f) $$ Next, calculate the input gate value. This gate's value determines what will be remembered. $$ i_t = S(W_i \cdot [\hat{y}_{t-1},x_t] + b_i) $$ Calculate a candidate context value (a value that might be remembered). This value is called $\tilde{c}$. $$ \tilde{C}t = \tanh(W_C \cdot [\hat{y}{t-1},x_t]+b_C) $$ Determine the new context ($C_t$). Do this by remembering the candidate context ($i_t$), depending on input gate. Forget depending on the forget gate ($f_t$). $$ C_t = f_t \cdot C_{t-1}+i_t \cdot \tilde{C}_t $$ Calculate the output gate ($o_t$): $$ o_t = S(W_o \cdot [\hat{y}_{t-1},x_t] + b_o ) $$ Calculate the actual output ($\hat{y}_t$): $$ \hat{y}_t = o_t \cdot \tanh(C_t) $$ Simple TensorFlow LSTM Example The following code creates the LSTM network. End of explanation """ x = [ [[0],[1],[1],[0],[0],[0]], [[0],[0],[0],[2],[2],[0]], [[0],[0],[0],[0],[3],[3]], [[0],[2],[2],[0],[0],[0]], [[0],[0],[3],[3],[0],[0]], [[0],[0],[0],[0],[1],[1]] ] x = np.array(x,dtype=np.float32) y = np.array([1,2,3,2,3,1]) classifier.fit(x, y) test = [[[0],[0],[0],[0],[3],[3]]] test = np.array(test) classifier.predict(test) """ Explanation: The following code trains on a data set (x) with a max sequence size of 6 (columns) and 6 training elements (rows) End of explanation """ # How to read data from the stock market. from IPython.display import display, HTML import pandas.io.data as web import datetime start = datetime.datetime(2014, 1, 1) end = datetime.datetime(2014, 12, 31) f=web.DataReader('tsla', 'yahoo', start, end) display(f) import numpy as np prices = f.Close.pct_change().tolist() # to percent changes prices = prices[1:] # skip the first, no percent change SEQUENCE_SIZE = 5 x = [] y = [] for i in range(len(prices)-SEQUENCE_SIZE-1): #print(i) window = prices[i:(i+SEQUENCE_SIZE)] after_window = prices[i+SEQUENCE_SIZE] window = [[x] for x in window] #print("{} - {}".format(window,after_window)) x.append(window) y.append(after_window) x = np.array(x) print(len(x)) from tensorflow.contrib import skflow from tensorflow.models.rnn import rnn, rnn_cell import tensorflow as tf HIDDEN_SIZE = 20 def char_rnn_model(X, y): byte_list = skflow.ops.split_squeeze(1, SEQUENCE_SIZE, X) cell = rnn_cell.LSTMCell(HIDDEN_SIZE) _, encoding = rnn.rnn(cell, byte_list, dtype=tf.float32) return skflow.models.linear_regression(encoding, y) regressor = skflow.TensorFlowEstimator(model_fn=char_rnn_model, n_classes=1, steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True) regressor.fit(x, y) # Try an in-sample prediction from sklearn import metrics # Measure RMSE error. RMSE is common for regression. pred = regressor.predict(x) score = np.sqrt(metrics.mean_squared_error(pred,y)) print("Final score (RMSE): {}".format(score)) # Try out of sample start = datetime.datetime(2015, 1, 1) end = datetime.datetime(2015, 12, 31) f=web.DataReader('tsla', 'yahoo', start, end) import numpy as np prices = f.Close.pct_change().tolist() # to percent changes prices = prices[1:] # skip the first, no percent change SEQUENCE_SIZE = 5 x = [] y = [] for i in range(len(prices)-SEQUENCE_SIZE-1): window = prices[i:(i+SEQUENCE_SIZE)] after_window = prices[i+SEQUENCE_SIZE] window = [[x] for x in window] x.append(window) y.append(after_window) x = np.array(x) # Measure RMSE error. RMSE is common for regression. pred = regressor.predict(x) score = np.sqrt(metrics.mean_squared_error(pred,y)) print("Out of sample score (RMSE): {}".format(score)) """ Explanation: Stock Market Example End of explanation """ import os import pandas as pd from sklearn.cross_validation import train_test_split import tensorflow.contrib.learn as skflow import numpy as np from sklearn import metrics path = "./data/" filename = os.path.join(path,"t81_558_train.csv") train_df = pd.read_csv(filename) train_df.drop('id',1,inplace=True) train_x, train_y = to_xy(train_df,'outcome') train_x, test_x, train_y, test_y = train_test_split( train_x, train_y, test_size=0.25, random_state=42) # Create a deep neural network with 3 hidden layers of 50, 25, 10 regressor = skflow.TensorFlowDNNRegressor(hidden_units=[50, 25, 10], steps=5000) # Early stopping early_stop = skflow.monitors.ValidationMonitor(test_x, test_y, early_stopping_rounds=200, print_steps=50) # Fit/train neural network regressor.fit(train_x, train_y, monitor=early_stop) # Measure RMSE error. RMSE is common for regression. pred = regressor.predict(test_x) score = np.sqrt(metrics.mean_squared_error(pred,test_y)) print("Final score (RMSE): {}".format(score)) #################### # Build submit file #################### from IPython.display import display, HTML filename = os.path.join(path,"t81_558_test.csv") submit_df = pd.read_csv(filename) ids = submit_df.Id submit_df.drop('Id',1,inplace=True) submit_x = submit_df.as_matrix() pred_submit = regressor.predict(submit_x) submit_df = pd.DataFrame({'Id': ids, 'outcome': pred_submit[:,0]}) submit_filename = os.path.join(path,"t81_558_jheaton_submit.csv") submit_df.to_csv(submit_filename, index=False) display(submit_df) """ Explanation: Assignment 3 Solution Basic neural network solution: End of explanation """ import matplotlib.pyplot as plt from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestRegressor # Build a forest and compute the feature importances forest = RandomForestRegressor(n_estimators=50, random_state=0, verbose = True) print("Training random forest") forest.fit(train_x, train_y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking #train_df.drop('outcome',1,inplace=True) bag_cols = train_df.columns.values print("Feature ranking:") for f in range(train_x.shape[1]): print("{}. {} ({})".format(f + 1, bag_cols[indices[f]], importances[indices[f]])) The following code uses engineered features. import os import pandas as pd from sklearn.cross_validation import train_test_split import tensorflow.contrib.learn as skflow import numpy as np from sklearn import metrics path = "./data/" filename = os.path.join(path,"t81_558_train.csv") train_df = pd.read_csv(filename) train_df.drop('id',1,inplace=True) #train_df.drop('g',1,inplace=True) #train_df.drop('e',1,inplace=True) train_df.insert(0, "a-b", train_df.a - train_df.b) #display(train_df) train_x, train_y = to_xy(train_df,'outcome') train_x, test_x, train_y, test_y = train_test_split( train_x, train_y, test_size=0.25, random_state=42) # Create a deep neural network with 3 hidden layers of 50, 25, 10 regressor = skflow.TensorFlowDNNRegressor(hidden_units=[50, 25, 10], steps=5000) # Early stopping early_stop = skflow.monitors.ValidationMonitor(test_x, test_y, early_stopping_rounds=200, print_steps=50) # Fit/train neural network regressor.fit(train_x, train_y, monitor=early_stop) # Measure RMSE error. RMSE is common for regression. pred = regressor.predict(test_x) score = np.sqrt(metrics.mean_squared_error(pred,test_y)) print("Final score (RMSE): {}".format(score)) # foxtrot bravo # charlie alpha """ Explanation: The following code uses a random forest to rank the importance of features. This can be used both to rank the origional features and new ones created. End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/ipsl/cmip6/models/sandbox-2/ocean.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-2', 'ocean') """ Explanation: ES-DOC CMIP6 Model Properties - Ocean MIP Era: CMIP6 Institute: IPSL Source ID: SANDBOX-2 Topic: Ocean Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. Properties: 133 (101 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-20 15:02:45 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Seawater Properties 3. Key Properties --&gt; Bathymetry 4. Key Properties --&gt; Nonoceanic Waters 5. Key Properties --&gt; Software Properties 6. Key Properties --&gt; Resolution 7. Key Properties --&gt; Tuning Applied 8. Key Properties --&gt; Conservation 9. Grid 10. Grid --&gt; Discretisation --&gt; Vertical 11. Grid --&gt; Discretisation --&gt; Horizontal 12. Timestepping Framework 13. Timestepping Framework --&gt; Tracers 14. Timestepping Framework --&gt; Baroclinic Dynamics 15. Timestepping Framework --&gt; Barotropic 16. Timestepping Framework --&gt; Vertical Physics 17. Advection 18. Advection --&gt; Momentum 19. Advection --&gt; Lateral Tracers 20. Advection --&gt; Vertical Tracers 21. Lateral Physics 22. Lateral Physics --&gt; Momentum --&gt; Operator 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff 24. Lateral Physics --&gt; Tracers 25. Lateral Physics --&gt; Tracers --&gt; Operator 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity 28. Vertical Physics 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum 32. Vertical Physics --&gt; Interior Mixing --&gt; Details 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum 35. Uplow Boundaries --&gt; Free Surface 36. Uplow Boundaries --&gt; Bottom Boundary Layer 37. Boundary Forcing 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing 1. Key Properties Ocean key properties 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean model code (NEMO 3.6, MOM 5.0,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.model_family') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OGCM" # "slab ocean" # "mixed layer ocean" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Model Family Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of ocean model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.basic_approximations') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Primitive equations" # "Non-hydrostatic" # "Boussinesq" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Basic approximations made in the ocean. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.prognostic_variables') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # "Salinity" # "U-velocity" # "V-velocity" # "W-velocity" # "SSH" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N List of prognostic variables in the ocean component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Wright, 1997" # "Mc Dougall et al." # "Jackett et al. 2006" # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Seawater Properties Physical properties of seawater in ocean 2.1. Eos Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Potential temperature" # "Conservative temperature" # TODO - please enter value(s) """ Explanation: 2.2. Eos Functional Temp Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Temperature used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Practical salinity Sp" # "Absolute salinity Sa" # TODO - please enter value(s) """ Explanation: 2.3. Eos Functional Salt Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Salinity used in EOS for sea water End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Pressure (dbars)" # "Depth (meters)" # TODO - please enter value(s) """ Explanation: 2.4. Eos Functional Depth Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Depth or pressure used in EOS for sea water ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "TEOS 2010" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 2.5. Ocean Freezing Point Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.6. Ocean Specific Heat Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Specific heat in ocean (cpocean) in J/(kg K) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 2.7. Ocean Reference Density Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Boussinesq reference density (rhozero) in kg / m3 End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Present day" # "21000 years BP" # "6000 years BP" # "LGM" # "Pliocene" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Bathymetry Properties of bathymetry in ocean 3.1. Reference Dates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Reference date of bathymetry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.type') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.2. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the bathymetry fixed in time in the ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. Ocean Smoothing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any smoothing or hand editing of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.bathymetry.source') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Source Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe source of bathymetry in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Nonoceanic Waters Non oceanic waters treatement in ocean 4.1. Isolated Seas Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how isolated seas is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. River Mouth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how river mouth mixing or estuaries specific treatment is performed End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Software Properties Software properties of ocean code 5.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Resolution Resolution in the ocean grid 6.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Canonical Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Range Horizontal Resolution Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.4. Number Of Horizontal Gridpoints Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.5. Number Of Vertical Levels Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.6. Is Adaptive Grid Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 6.7. Thickness Level 1 Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Thickness of first surface ocean level (in meters) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Tuning Applied Tuning methodology for ocean component 7.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &amp;Document the relative weight given to climate performance metrics versus process oriented metrics, &amp;and on the possible conflicts with parameterization level tuning. In particular describe any struggle &amp;with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation Conservation in the ocean component 8.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Brief description of conservation methodology End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.scheme') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Energy" # "Enstrophy" # "Salt" # "Volume of ocean" # "Momentum" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 8.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N Properties conserved in the ocean by the numerical schemes End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Consistency Properties Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Corrected Conserved Prognostic Variables Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Set of variables which are conserved by more than the numerical scheme alone. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.5. Was Flux Correction Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Does conservation involve flux correction ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Grid Ocean grid 9.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of grid in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Z-coordinate" # "Z*-coordinate" # "S-coordinate" # "Isopycnic - sigma 0" # "Isopycnic - sigma 2" # "Isopycnic - sigma 4" # "Isopycnic - other" # "Hybrid / Z+S" # "Hybrid / Z+isopycnic" # "Hybrid / other" # "Pressure referenced (P)" # "P*" # "Z**" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Grid --&gt; Discretisation --&gt; Vertical Properties of vertical discretisation in ocean 10.1. Coordinates Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical coordinates in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 10.2. Partial Steps Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Using partial steps with Z or Z vertical coordinate in ocean ?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Lat-lon" # "Rotated north pole" # "Two north poles (ORCA-style)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Grid --&gt; Discretisation --&gt; Horizontal Type of horizontal discretisation scheme in ocean 11.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal grid type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Arakawa B-grid" # "Arakawa C-grid" # "Arakawa E-grid" # "N/a" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Staggering Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Horizontal grid staggering type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Finite difference" # "Finite volumes" # "Finite elements" # "Unstructured grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.3. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Horizontal discretisation scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Timestepping Framework Ocean Timestepping Framework 12.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Via coupling" # "Specific treatment" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12.2. Diurnal Cycle Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Diurnal cycle type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Timestepping Framework --&gt; Tracers Properties of tracers time stepping in ocean 13.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time stepping scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.2. Time Step Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Tracers time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Preconditioned conjugate gradient" # "Sub cyling" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Timestepping Framework --&gt; Baroclinic Dynamics Baroclinic dynamics in ocean 14.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics type End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Leap-frog + Asselin filter" # "Leap-frog + Periodic Euler" # "Predictor-corrector" # "Runge-Kutta 2" # "AM3-LF" # "Forward-backward" # "Forward operator" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Baroclinic dynamics scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.3. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Baroclinic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "split explicit" # "implicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Timestepping Framework --&gt; Barotropic Barotropic time stepping in ocean 15.1. Splitting Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Time splitting method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.2. Time Step Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Barotropic time step (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Timestepping Framework --&gt; Vertical Physics Vertical physics time stepping in ocean 16.1. Method Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of vertical time stepping in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17. Advection Ocean advection 17.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of advection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Flux form" # "Vector form" # TODO - please enter value(s) """ Explanation: 18. Advection --&gt; Momentum Properties of lateral momemtum advection scheme in ocean 18.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of lateral momemtum advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.scheme_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Scheme Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of ocean momemtum advection scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.momentum.ALE') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 18.3. ALE Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Using ALE for vertical advection ? (if vertical coordinates are sigma) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19. Advection --&gt; Lateral Tracers Properties of lateral tracer advection scheme in ocean 19.1. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 19.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for lateral tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 19.3. Effective Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Effective order of limited lateral tracer advection scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.4. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Ideal age" # "CFC 11" # "CFC 12" # "SF6" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19.5. Passive Tracers Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Passive tracers advected End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.6. Passive Tracers Advection Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Is advection of passive tracers different than active ? if so, describe. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20. Advection --&gt; Vertical Tracers Properties of vertical tracer advection scheme in ocean 20.1. Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 20.2. Flux Limiter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Monotonic flux limiter for vertical tracer advection scheme in ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21. Lateral Physics Ocean lateral physics 21.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of lateral physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Eddy active" # "Eddy admitting" # TODO - please enter value(s) """ Explanation: 21.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of transient eddy representation in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Lateral Physics --&gt; Momentum --&gt; Operator Properties of lateral physics operator for momentum in ocean 22.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics momemtum scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Lateral Physics --&gt; Momentum --&gt; Eddy Viscosity Coeff Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean 23.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics momemtum eddy viscosity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 23.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24. Lateral Physics --&gt; Tracers Properties of lateral physics for tracers in ocean 24.1. Mesoscale Closure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a mesoscale closure in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 24.2. Submesoscale Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Horizontal" # "Isopycnal" # "Isoneutral" # "Geopotential" # "Iso-level" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Lateral Physics --&gt; Tracers --&gt; Operator Properties of lateral physics operator for tracers in ocean 25.1. Direction Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Direction of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Harmonic" # "Bi-harmonic" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Order Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Order of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Second order" # "Higher order" # "Flux limiter" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Discretisation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Discretisation of lateral physics tracers scheme in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant" # "Space varying" # "Time + space varying (Smagorinsky)" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Lateral Physics --&gt; Tracers --&gt; Eddy Diffusity Coeff Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean 26.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Lateral physics tracers eddy diffusity coeff type in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.2. Constant Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.3. Variable Coefficient Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 26.4. Coeff Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 26.5. Coeff Backscatter Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "GM" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Lateral Physics --&gt; Tracers --&gt; Eddy Induced Velocity Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean 27.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV in lateral physics tracers in the ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 27.2. Constant Val Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If EIV scheme for tracers is constant, specify coefficient value (M2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.3. Flux Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV flux (advective or skew) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.4. Added Diffusivity Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of EIV added diffusivity (constant, flow dependent or none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28. Vertical Physics Ocean Vertical Physics 28.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of vertical physics in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 29. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Details Properties of vertical physics in ocean 29.1. Langmuir Cells Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there Langmuir cells mixing in upper ocean ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 30. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Tracers *Properties of boundary layer (BL) mixing on tracers in the ocean * 30.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 30.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 30.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure - TKE" # "Turbulent closure - KPP" # "Turbulent closure - Mellor-Yamada" # "Turbulent closure - Bulk Mixed Layer" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 31. Vertical Physics --&gt; Boundary Layer Mixing --&gt; Momentum *Properties of boundary layer (BL) mixing on momentum in the ocean * 31.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of boundary layer mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.2. Closure Order Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: FLOAT&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 31.3. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant BL mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 31.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Non-penetrative convective adjustment" # "Enhanced vertical diffusion" # "Included in turbulence closure" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 32. Vertical Physics --&gt; Interior Mixing --&gt; Details *Properties of interior mixing in the ocean * 32.1. Convection Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of vertical convection in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 32.2. Tide Induced Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how tide induced mixing is modelled (barotropic, baroclinic, none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.3. Double Diffusion Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there double diffusion End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 32.4. Shear Mixing Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is there interior shear mixing End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 33. Vertical Physics --&gt; Interior Mixing --&gt; Tracers *Properties of interior mixing on tracers in the ocean * 33.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for tracers in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 33.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of tracers, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 33.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Constant value" # "Turbulent closure / TKE" # "Turbulent closure - Mellor-Yamada" # "Richardson number dependent - PP" # "Richardson number dependent - KT" # "Imbeded as isopycnic vertical coordinate" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 34. Vertical Physics --&gt; Interior Mixing --&gt; Momentum *Properties of interior mixing on momentum in the ocean * 34.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of interior mixing for momentum in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 34.2. Constant Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If constant interior mixing of momentum, specific coefficient (m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.3. Profile Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 34.4. Background Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 35. Uplow Boundaries --&gt; Free Surface Properties of free surface in ocean 35.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of free surface in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear implicit" # "Linear filtered" # "Linear semi-explicit" # "Non-linear implicit" # "Non-linear filtered" # "Non-linear semi-explicit" # "Fully explicit" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 35.2. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Free surface scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 35.3. Embeded Seaice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the sea-ice embeded in the ocean model (instead of levitating) ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36. Uplow Boundaries --&gt; Bottom Boundary Layer Properties of bottom boundary layer in ocean 36.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Diffusive" # "Acvective" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 36.2. Type Of Bbl Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of bottom boundary layer in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 36.3. Lateral Mixing Coef Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: INTEGER&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 36.4. Sill Overflow Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe any specific treatment of sill overflows End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37. Boundary Forcing Ocean boundary forcing 37.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of boundary forcing in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.2. Surface Pressure Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.3. Momentum Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.4. Tracers Flux Correction Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.5. Wave Effects Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how wave effects are modelled at ocean surface. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.6. River Runoff Budget Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how river runoff from land surface is routed to ocean and any global adjustment done. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 37.7. Geothermal Heating Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how geothermal heating is present at ocean bottom. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Linear" # "Non-linear" # "Non-linear (drag function of speed of tides)" # "Constant drag coefficient" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 38. Boundary Forcing --&gt; Momentum --&gt; Bottom Friction Properties of momentum bottom friction in ocean 38.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum bottom friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "None" # "Free-slip" # "No-slip" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 39. Boundary Forcing --&gt; Momentum --&gt; Lateral Friction Properties of momentum lateral friction in ocean 39.1. Type Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of momentum lateral friction in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "1 extinction depth" # "2 extinction depth" # "3 extinction depth" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 40. Boundary Forcing --&gt; Tracers --&gt; Sunlight Penetration Properties of sunlight penetration scheme in ocean 40.1. Scheme Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of sunlight penetration scheme in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 40.2. Ocean Colour Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the ocean sunlight penetration scheme ocean colour dependent ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 40.3. Extinction Depth Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe and list extinctions depths for sunlight penetration scheme (if applicable). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41. Boundary Forcing --&gt; Tracers --&gt; Fresh Water Forcing Properties of surface fresh water forcing in ocean 41.1. From Atmopshere Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from atmos in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Freshwater flux" # "Virtual salt flux" # "Real salt flux" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 41.2. From Sea Ice Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface fresh water forcing from sea-ice in ocean End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 41.3. Forced Mode Restoring Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Type of surface salinity restoring in forced mode (OMIP) End of explanation """
quantumlib/ReCirq
docs/qaoa/binary_paintshop.ipynb
apache-2.0
# @title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2021 The Cirq Developers End of explanation """ from typing import Sequence, Tuple import numpy as np try: import cirq except ImportError: print("installing cirq...") !pip install --quiet cirq print("installed cirq.") import cirq import cirq_ionq as ionq """ Explanation: Binary Paintshop Problem with Quantum Approximate Optimization Algorithm <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/cirq/experiments/qaoa/binary_paintshop>"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/ReCirq/blob/master/docs/qaoa/binary_paintshop.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/ReCirq/blob/master/docs/qaoa/binary_paintshop"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/ReCirq/docs/qaoa/binary_paintshop"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> End of explanation """ CAR_PAIR_COUNT = 10 car_sequence = np.random.permutation([x for x in range(CAR_PAIR_COUNT)] * 2) print(car_sequence) """ Explanation: Binary Paintshop Problem Assume an automotive paint shop and a random, but fixed sequence of 2*n cars. Each car has a identical partner that only differs in the color it has to be painted. End of explanation """ def color_changes(paint_bitstring: Sequence[int], car_sequence: Sequence[int]) -> int: """Count the number of times the color changes if the robots paint each car in car_sequence according to paint_bitstring, which notes the color for the first car in each pair. Args: paint_bitstring: A sequence that determines the color to paint the first car in pair i. For example, 0 for blue and nonzero for red. car_sequence: A sequence that determines which cars are paired together Returns: Count of the number of times the robots change the color """ color_sequence = [] painted_once = set() for car in car_sequence: if car in painted_once: # paint the other color for the second car in the pair color_sequence.append(not paint_bitstring[car]) else: # paint the noted color for the first car in the pair color_sequence.append(paint_bitstring[car]) painted_once.add(car) paint_change_counter = 0 # count the number of times two adjacent cars differ in color for color0, color1 in zip(color_sequence, color_sequence[1:]): if color0 != color1: paint_change_counter += 1 return paint_change_counter """ Explanation: The task is to paint the cars such that in the end for every pair of cars one is painted in red and the other in blue. The objective of the following minimization procedure is to minimize the number of color changes in the paintshop. End of explanation """ def spin_glass(car_sequence: Sequence[int]) -> Sequence[Tuple[int, int, int]]: """Assign interactions between adjacent cars. Assign a ferromagnetic(1) interaction if both elements of the pair are the first/second in their respective pairs. Otheriwse, assign an antiferromagnetic(-1) interaction. Yield a tuple with the two paired cars followed by the chosen interaction. """ ferromagnetic = -1 antiferromagnetic = 1 appeared_already = set() for car0, car1 in zip(car_sequence, car_sequence[1:]): if car0 == car1: continue if car0 in appeared_already: appeared_already.add(car0) if car1 in appeared_already: yield car0, car1, ferromagnetic else: yield car0, car1, antiferromagnetic else: appeared_already.add(car0) if car1 in appeared_already: yield car0, car1, antiferromagnetic else: yield car0, car1, ferromagnetic """ Explanation: If two consecutive cars in the sequence are painted in different colors the robots have to rinse the old color, clean the nozzles and flush in the new color. This color change procedure costs time, paint, water and ultimately costs money, which is why we want to minimize the number of color changes. However, a rearrangement of the car sequence is not at our disposal (because of restrictions that are posed by the remainig manufacturing processes), but we can decide once we reach the first car of each car pair which color to paint the pair first. When we have chosen the color for the first car the other car has to be painted in the other respective color. Obvious generalizations exist, for example more than two colors and groups of cars with more than 2 cars where it is permissible to exchange colors, however for demonstration purposes it suffices to consider the here presented binary version of the paintshop problem. It is NP-hard to solve the binary paintshop problem exactly as well as approximately with an arbitrary performance guarantee. A performance guarantee in this context would be a proof that an approximation algorithm never gives us a solution with a number of color changes that is more than some factor times the optimal number of color changes. This is the situation where substantial quantum speedup can be assumed (c.f. Quantum Computing in the NISQ era and beyond). The quantum algorithm presented here can deliver, on average, better solutions than all polynomial runtime heuristics specifically developed for the paintshop problem in constant time (constant query complexity) (c.f. Beating classical heuristics for the binary paint shop problem with the quantum approximate optimization algorithm). Spin Glass To be able to solve the binary paintshop problem with the Quantum Approximate Optimization Algorithm (QAOA) we need to translate the problem to a spin glass problem. Interestingly, that is possible with no spatial overhead, i.e. the spin glass has as many spins as the sequence has car pairs. The state of every spin represents the color we paint the respective first car in the seqence of every car pair. Every second car is painted with the repsective other color. The interactions of the spin glass can be deduced proceeding through the fixed car sequence: If two cars are adjacent to each other and both of them are either the first or the second car in their respective car pairs we can add a ferromagnetic interaction to the spin glass in order to penalize the color change between these two cars. If two cars are next to each other and one of the cars is the first and the other the second in their respective car pairs we have to add a antiferromagnetic interaction to the spin glass in order to penalize the color change because in this case the color for the car that is the second car in its car pair is exactly the opposite. All color changes in the car sequence are equivalent which is why we have equal magnitude ferromagnetic and antiferromagnetic interactions and additionally we choose unit magnitude interactions. End of explanation """ def phase_separator( gamma: float, qubit_register: Sequence[cirq.Qid], car_sequence: Sequence[int] ) -> Sequence[cirq.Operation]: """Yield a sequence of Molmer Sorensen gates to implement a phase separator over the ferromagnetic/antiferromagnetic interactions between adjacent cars, as defined by spin_glass """ for car_pair0, car_pair1, interaction in spin_glass(car_sequence): yield cirq.ms(interaction * gamma).on( qubit_register[car_pair0], qubit_register[car_pair1] ) qubit_register = cirq.LineQubit.range(CAR_PAIR_COUNT) circuit = cirq.Circuit([phase_separator(0.1, qubit_register, car_sequence)]) """ Explanation: Quantum Approximate Optimization Algorithm We want to execute a one block version of the QAOA circuit for the binary paintshop instance with p = 1 on a trapped-ion quantum computer of IonQ. This device is composed of 11 fully connected qubits with average single- and two-qubit fidelities of 99.5% and 97.5% respectively (Benchmarking an 11-qubit quantum computer). As most available quantum hardware, trapped ion quantum computers only allow the application of gates from a restricted native gate set predetermined by the physics of the quantum processor. To execute an arbitrary gate, compilation of the desired gate into available gates is required. For trapped ions, a generic native gate set consists of a parameterized two-qubit rotation, the Molmer Sorensen gate, $R_\mathrm{XX}(\alpha)=\mathrm{exp}[-\mathrm{i}\alpha \sigma_\mathrm{x}^{(i)}\sigma_\mathrm{x}^{(j)}/2]$ and a parametrized single qubit rotation: $R(\theta,\phi)=\begin{pmatrix} \cos{(\theta/2)} & -\mathrm{i}\mathrm{e}^{-\mathrm{i}\phi}\sin{(\theta/2)} \-\mathrm{i}\mathrm{e}^{\mathrm{i}\phi}\sin{(\theta/2)} & \cos{(\theta/2)} \end{pmatrix}$ QAOA circuits employ parametrized two body $\sigma_z$ rotations, $R_\mathrm{ZZ}(\gamma)=\mathrm{exp}[-i\gamma \sigma_\mathrm{z}^{(i)}\sigma_\mathrm{z}^{(j)}]$. To circumvent a compilation overhead and optimally leverage the Ion Trap, we inject pairs of Hadamard gates $H H^{\dagger} = 1$ for every qubit in between the two body $\sigma_z$ rotations. This means we are able to formulate the phase separator entirely with Molmer Sorensen gates. To support this, the QAOA circuit starts in the state where all qubits are in the groundstate $\left| 0\right\rangle$ instead of the superposition of all computational basis states $\left| + \right\rangle$, End of explanation """ def mixer(beta: float, qubit_register: Sequence[cirq.Qid]) -> Iterator[cirq.Operation]: """Yield a QAOA mixer of RX gates, modified by adding RY gates first, to account for the additional Hadamard gates. """ yield cirq.ry(np.pi / 2).on_each(qubit_register) yield cirq.rx(beta - np.pi).on_each(qubit_register) """ Explanation: Because we replaced the two body $\sigma_z$ rotations with Molmer Sorensen gates we also have to adjust the mixer slightly to account for the injected Hadamard gates. End of explanation """ def average_color_changes( parameters: Tuple[float, float], qubit_register: Sequence[cirq.Qid], car_sequence: Sequence[int], ) -> float: """Calculate the average number of color changes over all measurements of the QAOA circuit, aross `repetitions` many runs, for provided parameters beta and gamma. Args: parameters: tuple of (`beta`, `gamma`), the two parameters for the QAOA circuit qubit_register: A sequence of qubits for the circuit to use. car_sequence: A sequence that determines which cars are paired together. Returns: A float average number of color changes over all measurements. """ beta, gamma = parameters repetitions = 100 circuit = cirq.Circuit() circuit.append(phase_separator(gamma, qubit_register, car_sequence)) circuit.append(mixer(beta, qubit_register)) circuit.append(cirq.measure(*qubit_register, key="z")) results = service.run(circuit, repetitions=repetitions) avg_cc = 0 for paint_bitstring in results.measurements["z"]: avg_cc += color_changes(paint_bitstring, car_sequence) / repetitions return avg_cc """ Explanation: To find the right parameters for the QAOA circuit, we have to assess the quality of the solutions for a given set of parameters. To this end, we execute the QAOA circuit with fixed parameters 100 times and calculate the average number of color changes. End of explanation """ from scipy.optimize import minimize service = cirq.Simulator() beta, gamma = np.random.rand(2) average_cc = average_color_changes([beta, gamma], qubit_register, car_sequence) optimization_function = lambda x: average_color_changes(x, qubit_register, car_sequence) for _ in range(10): initial_guess = np.random.rand(2) optimization_result = minimize( optimization_function, initial_guess, method="SLSQP", options={"eps": 0.1} ) average_cc_temp = average_color_changes( optimization_result.x, qubit_register, car_sequence ) if average_cc > average_cc_temp: beta, gamma = optimization_result.x average_cc = average_cc_temp average_cc """ Explanation: We optimize the average number of color changes by adjusting the parameters with scipy.optimzes function minimize. The results of these optimsation runs strongly depend on the random starting values we choose for the parameters, which is why we restart the optimization procedure for different starting parameters 10 times and take the best performing optimized parameters. End of explanation """ repetitions = 100 circuit = cirq.Circuit() circuit.append(phase_separator(gamma, qubit_register, car_sequence)) circuit.append(mixer(beta, qubit_register)) circuit.append(cirq.measure(*qubit_register, key="z")) service = ionq.Service( remote_host="<remote host>", api_key="<your key>", default_target="qpu" ) results = service.run(circuit, repetitions=repetitions) best_result = CAR_PAIR_COUNT for paint_bitstring in results.measurements["z"]: result = color_changes(paint_bitstring, car_sequence) if result < best_result: best_result = result best_paint_bitstring = paint_bitstring print(f"The minimal number of color changes found by level-1 QAOA is: {best_result}") print( f"The car pairs have to be painted according to {best_paint_bitstring}, with index i representing the paint of the first car of pair i." ) print(f" The other car in pair i is painted the second color.") """ Explanation: Note here that the structure of the problem graphs of the binary paintshop problem allow for an alternative technique to come up with good parameters independent of the specifics of the respective instance of the problem: Training the quantum approximate optimization algorithm without access to a quantum processing unit Once the parameters are optimised, we execute the optimised QAOA circuit 100 times and output the solution with the least color changes. Please replace &lt;your key&gt; with your IonQ API key and &lt;remote host&gt; with the API endpoint. End of explanation """
srodriguex/coursera_data_management_and_visualization
Week_4.ipynb
mit
%pylab inline # This package is very useful to data analysis in Python. import pandas as pd # This package makes nice looking graphics. import seaborn as sn # Read the csv file to a dataframe object. df = pd.read_csv('data/gapminder.csv') # Convert all number values to float. df = df.convert_objects(convert_numeric=True) # Define the Country as the unique id of the dataframe. df.index = df.country del df['country'] # List of the variables selected. vars_sel = ['incomeperperson', 'relectricperperson', 'employrate', 'urbanrate'] # Dataframe with only the variables selected. df = df[vars_sel] new_size = array((6,4))*1.3 figsize(*new_size) sn.set(color_codes=True) """ Explanation: Table of Contents 1. Week 4 assignment 1.1 Univariate graphs 1.1.1 Variable incomeperperson 1.1.2 Variable relectricperperson 1.1.3 Variable employrate 1.1.4 Variable urbanrate 1.2 Bivariate graphs 1.2.1 Income per person vs Electric per person 1.2.2 Income per person vs Employ rate 1.2.3 Income per person vs Employ rate 1. Week 4 assignment The dataset chosen is Gapminder. You can check the codebook clicking here. We chose incomeperperson as the response variable and relectricperperson, employrate, and urbanrate as the explanatory variables. To use this program you must have Python 3.3+ and IPython Notebook 1.0+ installed. End of explanation """ # Helper function to plots a histogram for the variable given. def plot_hist(var_name): g = sn.distplot(df[var_name].dropna(), kde=False, rug=True, label=var_name); g.set_title('Variable {}'.format(var_name), weight='bold', size=13); axvline(df[var_name].mean(), label='Mean') axvline(df[var_name].median(), label='Median', color='r') legend(loc='best'); """ Explanation: 1.1 Univariate graphs In this section we plot histograms to see the distribuition of the values for the variables chosen and plot the mean and median values to ease the understanding of the distribution. End of explanation """ plot_hist('incomeperperson') """ Explanation: 1.1.1 Variable incomeperperson The histogram shows a deep concentratation, clearly half of the world in the first bin, with low income per capita. End of explanation """ plot_hist('relectricperperson') """ Explanation: 1.1.2 Variable relectricperperson Another concentration of electric consumption in the low bins, but this is not surprising as the most developed countries are not great in numbers, mostly in Europe with United States, Canada, e some others exceptions. End of explanation """ plot_hist('employrate') """ Explanation: 1.1.3 Variable employrate This variable is the only one who follows a Gaussian distribution, where the mean and median are almost the same. The average of the world in nearly 60% of employ rate, with slightly more countries in the upper extreme than the lower. End of explanation """ plot_hist('urbanrate') """ Explanation: 1.1.4 Variable urbanrate An distribution slightly Gaussian than Uniform. End of explanation """ sn.jointplot(y=df.incomeperperson, x=df.relectricperperson, kind='reg'); """ Explanation: 1.2 Bivariate graphs For this task I've chosen the incomeperperson as the response variable and the others as explanatory variable. Please, ignore the red Warning messages, it's not an error, just an alert. 1.2.1 Income per person vs Electric per person The graph below clearly shows that the incompe per person follows electrict consumption, although a good number of observations are clustered in the lower left of the graph. End of explanation """ zoom = (df.relectricperperson>-100) & (df.relectricperperson < 2000); y_zoom = df.incomeperperson[zoom]; x_zoom = df.relectricperperson[zoom]; sn.jointplot(y=y_zoom, x=x_zoom, kind='reg'); """ Explanation: Here we filter the data to the region clustered to see in more details the relationship. It also shows that the income per peson rise as the eletric consumption rise, in general. There are a few observations that with electric consumption near 1400 not followed by an increase in income. This may be the cause of some countries with electric power subsidized by the state. End of explanation """ sn.jointplot(y=df.incomeperperson, x=df.employrate, kind='reg'); """ Explanation: 1.2.2 Income per person vs Employ rate Here we can there isn't a correlation between Income and Employ rates. Countries with high employ rate but low income per capita may be a sign of labor with low aggregated value as agriculture, or huge concentration of income leading to social inequality. End of explanation """ sn.jointplot(y=df.incomeperperson, x=df.urbanrate, kind='reg'); """ Explanation: 1.2.3 Income per person vs Urban rate For this last graph, it shows the more people live in urban areas, the more income per capita rises, there are a concentration of low income throughout the urban rate, though. End of explanation """
sjsrey/giddy
notebooks/RankMarkov.ipynb
bsd-3-clause
import libpysal as ps import numpy as np import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns import pandas as pd import geopandas as gpd """ Explanation: Full Rank Markov and Geographic Rank Markov Author: Wei Kang &#119;&#101;&#105;&#107;&#97;&#110;&#103;&#57;&#48;&#48;&#57;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109; End of explanation """ from giddy.markov import FullRank_Markov income_table = pd.read_csv(ps.examples.get_path("usjoin.csv")) income_table.head() pci = income_table[list(map(str,range(1929,2010)))].values pci m = FullRank_Markov(pci) m.ranks m.transitions """ Explanation: Full Rank Markov End of explanation """ m.p """ Explanation: Full rank Markov transition probability matrix End of explanation """ m.mfpt m.sojourn_time df_fullrank = pd.DataFrame(np.c_[m.p.diagonal(),m.sojourn_time], columns=["Staying Probability","Sojourn Time"], index = np.arange(m.p.shape[0])+1) df_fullrank.head() df_fullrank.plot(subplots=True, layout=(1,2), figsize=(15,5)) sns.distplot(m.mfpt.flatten(),kde=False) """ Explanation: Full rank mean first passage times End of explanation """ from giddy.markov import GeoRank_Markov, Markov, sojourn_time gm = GeoRank_Markov(pci) gm.transitions gm.p gm.sojourn_time[:10] gm.sojourn_time gm.mfpt income_table["geo_sojourn_time"] = gm.sojourn_time i = 0 for state in income_table["Name"]: income_table["geo_mfpt_to_" + state] = gm.mfpt[:,i] income_table["geo_mfpt_from_" + state] = gm.mfpt[i,:] i = i + 1 income_table.head() geo_table = gpd.read_file(ps.examples.get_path('us48.shp')) # income_table = pd.read_csv(libpysal.examples.get_path("usjoin.csv")) complete_table = geo_table.merge(income_table,left_on='STATE_NAME',right_on='Name') complete_table.head() complete_table.columns """ Explanation: Geographic Rank Markov End of explanation """ fig, axes = plt.subplots(nrows=2, ncols=2,figsize = (15,7)) target_states = ["California","Mississippi"] directions = ["from","to"] for i, direction in enumerate(directions): for j, target in enumerate(target_states): ax = axes[i,j] col = direction+"_"+target complete_table.plot(ax=ax,column = "geo_mfpt_"+ col,cmap='OrRd', scheme='quantiles', legend=True) ax.set_title("Mean First Passage Time "+direction+" "+target) ax.axis('off') leg = ax.get_legend() leg.set_bbox_to_anchor((0.8, 0.15, 0.16, 0.2)) plt.tight_layout() """ Explanation: Visualizing mean first passage time from/to California/Mississippi: End of explanation """ fig, axes = plt.subplots(nrows=1, ncols=2,figsize = (15,7)) schemes = ["Quantiles","Equal_Interval"] for i, scheme in enumerate(schemes): ax = axes[i] complete_table.plot(ax=ax,column = "geo_sojourn_time",cmap='OrRd', scheme=scheme, legend=True) ax.set_title("Rank Sojourn Time ("+scheme+")") ax.axis('off') leg = ax.get_legend() leg.set_bbox_to_anchor((0.8, 0.15, 0.16, 0.2)) plt.tight_layout() """ Explanation: Visualizing sojourn time for each US state: End of explanation """
rbiswas4/ObsCond
examples/CheckFiltCalc.ipynb
gpl-3.0
from brightness import mCalcs, atmTransName df.head() """ Explanation: def atmTransName(airmass): """ return filename for atmospheric transmission with aerosols for airmass closest to input Parameters ---------- airmass : airmass """ l = np.arange(1.0, 2.51, 0.1) idx = np.abs(l - airmass).argmin() a = np.int(10*l[idx]) baseline = getPackageDir('THROUGHPUTS') fname = os.path.join(baseline, 'atmos', 'atmos_{}_aerosol.dat'.format(a)) return fname def mCalcs(airmass, bandName, ra, dec, expMJD, FWHMeff, hwbpdict, photparams=None, sm=None): """ sm : """ if photparams is None: photparams = PhotometricParameters() if sm is None: sm = SkyModel(observatory='LSST', mags=False, preciseAltAz=True) # Obtain full sky transmission at airmass # Note that this method is not interpolating but choosing the atmospheric transmission from # Modtran simulations of the closest airmass in a sequence of np.arange(1., 2.51, 0.1) fname = atmTransName(airmass) print(fname) atmTrans = np.loadtxt(fname) wave, trans = hwbpdict[bandName].multiplyThroughputs(atmTrans[:, 0], atmTrans[:, 1]) bp = Bandpass(wavelen=wave, sb=trans) # Set the observing condition sm.setRaDecMjd(lon=[ra], lat=[dec], filterNames=[bandName], mjd=expMJD, degrees=False, azAlt=False) # Get the sky sed wave, spec = sm.returnWaveSpec() sed = Sed(wavelen=wave, flambda=spec[0]) m5 = calcM5(sed, bp, hwbpdict[bandName], photparams, FWHMeff) # Get the sky magnitude only in the band concerned m = sm.returnMags(bandpasses=hwbpdict)[bandName][0] return m5, m End of explanation """ tot, hwbpdict = BandpassDict.loadBandpassesFromFiles() from lsst.sims.skybrightness import SkyModel mCalcs(1.464, 'y', 1.676483, -1.082473, 59580.033829, 1.263038, hwbpdict) mCalcs(1.454958, 'y', 1.69412, -1.033972, 59580.034275, 1.258561, hwbpdict) df['diffmsky'] = df['skymags'] - df['filtSkyBrightness'] df.query('diffmsky > 3.3') mCalcs(1.008652, 'g', 0.925184, -0.4789, 61044.077855, 1.086662, hwbpdict=hwbpdict) from opsimsummary import OpSimOutput opsout = OpSimOutput.fromOpSimDB('/Users/rbiswas/data/LSST/OpSimData/minion_1016_sqlite.db') row = opsout.summary.ix[994149] mCalcs(row.airmass, row['filter'], row.fieldRA, row.fieldDec, row.expMJD, row.FWHMeff, hwbpdict=hwbpdict) sb.__version__ np.__version__ Sed.writeSED() laptop = np.loadtxt('skySED_laptop.csv') lsstuw = np.loadtxt('skySED.csv') laptop.shape import healpy as hp hp.__file__ fig, ax = plt.subplots(1, 2) ax[0].plot(laptop[:, 0], laptop[:, 1] - lsstuw[:,1], 'k-') #ax[0].plot(lsstuw[:, 0], lsstuw[:, 1]) ax[1].plot(laptop[:, 0], laptop[:, 1]/lsstuw[:, 1]) """ Explanation: Examples I copied the values of expMJD, FWHMeff, fieldRA, fieldDec from a sqlite query by hand (will change in a later iteration). Comparison of m5 and msky values from the dataframe for the first two rows is quite good. End of explanation """
rasbt/pattern_classification
machine_learning/scikit-learn/ensemble_classifier.ipynb
gpl-3.0
from sklearn import datasets iris = datasets.load_iris() X, y = iris.data[:, 1:3], iris.target from sklearn import cross_validation from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np np.random.seed(123) clf1 = LogisticRegression() clf2 = RandomForestClassifier() clf3 = GaussianNB() print('5-fold cross validation:\n') for clf, label in zip([clf1, clf2, clf3], ['Logistic Regression', 'Random Forest', 'naive Bayes']): scores = cross_validation.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label)) """ Explanation: Implementing a Weighted Majority Rule Ensemble Classifier in scikit-learn <br> <br> <div style="width:500px;height:70px;border:1px solid #000;padding:10px;background-color:#e5ffe5;"><p>If you are interested in using the <code>EnsembleClassifier</code>, please note that it is now also available through scikit learn (>0.17) as <a href="http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.VotingClassifier.html"><code>VotingClassifier</code></a>.</p></div> Here, I want to present a simple and conservative approach of implementing a weighted majority rule ensemble classifier in scikit-learn that yielded remarkably good results when I tried it in a kaggle competition. For me personally, kaggle competitions are just a nice way to try out and compare different approaches and ideas -- basically an opportunity to learn in a controlled environment with nice datasets. Of course, there are other implementations of more sophisticated ensemble methods in scikit-learn, such as bagging classifiers, random forests, or the famous AdaBoost algorithm. However, as far as I am concerned, they all require the usage of a common "base classifier." In contrast, my motivation for the following approach was to combine conceptually different machine learning classifiers and use a majority vote rule. The reason for this was that I had trained a set of equally well performing models, and I wanted to balance out their individual weaknesses. <br> <br> Sections Classifying Iris Flowers Using Different Classification Models Implementing the Majority Voting Rule Ensemble Classifier Additional Note About the EnsembleClassifier Implementation: Class Labels vs. Probabilities EnsembleClassifier - Tuning Weights EnsembleClassifier - Pipelines Some Final Words <br> <br> Classifying Iris Flowers Using Different Classification Models [back to top] For a simple example, let us use three different classification models to classify the samples in the Iris dataset: Logistic regression, a naive Bayes classifier with a Gaussian kernel, and a random forest classifier -- an ensemble method itself. At this point, let's not worry about preprocessing the data and training and test sets. Also, we will only use 2 feature columns (sepal width and petal height) to make the classification problem harder. End of explanation """ from sklearn.base import BaseEstimator from sklearn.base import ClassifierMixin import numpy as np import operator class EnsembleClassifier(BaseEstimator, ClassifierMixin): """ Ensemble classifier for scikit-learn estimators. Parameters ---------- clf : `iterable` A list of scikit-learn classifier objects. weights : `list` (default: `None`) If `None`, the majority rule voting will be applied to the predicted class labels. If a list of weights (`float` or `int`) is provided, the averaged raw probabilities (via `predict_proba`) will be used to determine the most confident class label. """ def __init__(self, clfs, weights=None): self.clfs = clfs self.weights = weights def fit(self, X, y): """ Fit the scikit-learn estimators. Parameters ---------- X : numpy array, shape = [n_samples, n_features] Training data y : list or numpy array, shape = [n_samples] Class labels """ for clf in self.clfs: clf.fit(X, y) def predict(self, X): """ Parameters ---------- X : numpy array, shape = [n_samples, n_features] Returns ---------- maj : list or numpy array, shape = [n_samples] Predicted class labels by majority rule """ self.classes_ = np.asarray([clf.predict(X) for clf in self.clfs]) if self.weights: avg = self.predict_proba(X) maj = np.apply_along_axis(lambda x: max(enumerate(x), key=operator.itemgetter(1))[0], axis=1, arr=avg) else: maj = np.asarray([np.argmax(np.bincount(self.classes_[:,c])) for c in range(self.classes_.shape[1])]) return maj def predict_proba(self, X): """ Parameters ---------- X : numpy array, shape = [n_samples, n_features] Returns ---------- avg : list or numpy array, shape = [n_samples, n_probabilities] Weighted average probability for each class per sample. """ self.probas_ = [clf.predict_proba(X) for clf in self.clfs] avg = np.average(self.probas_, axis=0, weights=self.weights) return avg np.random.seed(123) eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1]) for clf, label in zip([clf1, clf2, clf3, eclf], ['Logistic Regression', 'Random Forest', 'naive Bayes', 'Ensemble']): scores = cross_validation.cross_val_score(clf, X, y, cv=5, scoring='accuracy') print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label)) """ Explanation: As we can see from the cross-validation results above, the performance of the three models is almost equal. <br> <br> Implementing the Majority Voting Rule Ensemble Classifier [back to top] Now, we will implement a simple EnsembleClassifier class that allows us to combine the three different classifiers. We define a predict method that let's us simply take the majority rule of the predictions by the classifiers. E.g., if the prediction for a sample is classifier 1 -> class 1 classifier 2 -> class 1 classifier 3 -> class 2 we would classify the sample as "class 1." Furthermore, we add a weights parameter, which let's us assign a specific weight to each classifier. In order to work with the weights, we collect the predicted class probabilities for each classifier, multiply it by the classifier weight, and take the average. Based on these weighted average probabilties, we can then assign the class label. To illustrate this with a simple example, let's assume we have 3 classifiers and a 3-class classification problems where we assign equal weights to all classifiers (the default): w1=1, w2=1, w3=1. The weighted average probabilities for a sample would then be calculated as follows: | classifier | class 1 | class 2 | class 3 | |-----------------|----------|----------|----------| | classifier 1 | w1 * 0.2 | w1 * 0.5 | w1 * 0.3 | | classifier 2 | w2 * 0.6 | w2 * 0.3 | w2 * 0.1 | | classifier 3 | w3 * 0.3 | w3 * 0.4 | w3 * 0.3 | | weighted average| 0.37 | 0.4 | 0.3 | We can see in the table above that class 2 has the highest weighted average probability, thus we classify the sample as class 2. Now, let's put it into code and apply it to our Iris classification. End of explanation """ import pandas as pd np.random.seed(123) df = pd.DataFrame(columns=('w1', 'w2', 'w3', 'mean', 'std')) i = 0 for w1 in range(1,4): for w2 in range(1,4): for w3 in range(1,4): if len(set((w1,w2,w3))) == 1: # skip if all weights are equal continue eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], weights=[w1,w2,w3]) scores = cross_validation.cross_val_score( estimator=eclf, X=X, y=y, cv=5, scoring='accuracy', n_jobs=1) df.loc[i] = [w1, w2, w3, scores.mean(), scores.std()] i += 1 df.sort(columns=['mean', 'std'], ascending=False) """ Explanation: <br> <br> Additional Note About the EnsembleClassifier Implementation: Class Labels vs. Probabilities [back to top] You might be wondering why I implemented the EnsembleClassifier class so that it applies the majority voting purely on the class labels if no weights are provided and is the predicted probability values otherwise. Let's consider the following scenario: 1) Prediction based on majority class labels: | classifier | class 1 | class 2 | |-----------------|----------|----------| | classifier 1 | 1 | 0| | classifier 2 | 0 | 1| | classifier 3 | 0 | 1| | prediction | - | 1| To achieve this behavior, initialize the EnsembleClassifier like this: eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3]) 2) Prediction based on predicted probabilities (equal weights, weights=[1,1,1]) | classifier | class 1 | class 2 | |-----------------|----------|----------| | classifier 1 | 0.99 | 0.01| | classifier 2 | 0.49 | 0.51| | classifier 3 | 0.49 | 0.51| | weighted average| 0.66 | 0.18 | | prediction | 1 | - | To achieve this behavior, initialize the EnsembleClassifier like this: eclf = EnsembleClassifier(clfs=[clf1, clf2, clf3], weights=[1,1,1]) <br> <br> As we can see, the results are different depending on whether we apply a majority vote based on the class labels or take the average of the predicted probabilities. In general, I think it makes more sense to use the predicted probabilities (scenario 2). Here, the "very confident" classifier 1 overules the very unconfident classifiers 2 and 3. The reason for the different behaviors is that not all classifiers in scikit-learn support the predict_proba method. In this case, the EnsembleClassifier can still be used just based on the class labels if no weights are provided as parameter. <br> <br> EnsembleClassifier - Tuning Weights [back to top] Let's get back to our weights parameter. Here, we will use a naive brute-force approach to find the optimal weights for each classifier to increase the prediction accuracy. End of explanation """ class ColumnSelector(object): """ A feature selector for scikit-learn's Pipeline class that returns specified columns from a numpy array. """ def __init__(self, cols): self.cols = cols def transform(self, X, y=None): return X[:, self.cols] def fit(self, X, y=None): return self from sklearn.pipeline import Pipeline from sklearn.lda import LDA pipe1 = Pipeline([ ('sel', ColumnSelector([1])), # use only the 1st feature ('clf', GaussianNB())]) pipe2 = Pipeline([ ('sel', ColumnSelector([0, 1])), # use the 1st and 2nd feature ('dim', LDA(n_components=1)), # Dimensionality reduction via LDA ('clf', LogisticRegression())]) eclf = EnsembleClassifier([pipe1, pipe2]) scores = cross_validation.cross_val_score(eclf, X, y, cv=5, scoring='accuracy') print("Accuracy: %0.2f (+/- %0.2f) [%s]" % (scores.mean(), scores.std(), label)) pipe1 = Pipeline([ ('sel', ColumnSelector([1])), # use only the 1st feature ('clf', RandomForestClassifier())]) pipe2 = Pipeline([ ('sel', ColumnSelector([0, 1])), # use the 1st and 2nd feature ('dim', LDA(n_components=1)), # Dimensionality reduction via LDA ('clf', LogisticRegression())]) pipe3 = Pipeline([ ('eclf', EnsembleClassifier([pipe1, pipe2])), ]) parameters = { 'eclf__clfs__dim__n_components':(1,1), } grid_search = GridSearchCV(pipe3, parameters, n_jobs=-1, cv=5, verbose=5, refit=True, scoring=None) grid_search.fit(X, y) """ Explanation: <br> <br> EnsembleClassifier - Pipelines [back to top] Of course, we can also use the EnsembleClassifier in Pipelines. This is especially useful if a certain classifier does a pretty good job on a certain feature subset or requires different preprocessing steps. For demonstration purposes, let us implement a simple ColumnSelector class. End of explanation """
google/data-pills
pills/CM/[DATA_PILL]_[CM]_Frequency_Analysis_(ADH).ipynb
apache-2.0
# The Developer Key is used to retrieve a discovery document containing the # non-public Full Circle Query v2 API. This is used to build the service used # in the samples to make API requests. Please see the README for instructions # on how to configure your Google Cloud Project for access to the Full Circle # Query v2 API. DEVELOPER_KEY = 'yourkey' #'INSERT_DEVELOPER_KEY_HERE' # The client secrets file can be downloaded from the Google Cloud Console. CLIENT_SECRETS_FILE = 'adh-key.json' #'Make sure you have correctly renamed this file and you have uploaded it in this colab' """ Explanation: PLEASE MAKE A COPY BEFORE CHANGING Copyright 2021 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Important This content are intended for educational and informational purposes only. Configuration ADH APIs Configuration Steps Enable the ADH v1 API in the Google Cloud Storage account you use to access the API. When searching for the API in your GCP Console API Library, use the search term “adsdatahub”. Go to the Google Developers Console and verify that you have access to your Google Cloud project via the drop-down menu at the top of the page. If you don't see the right Google Cloud project, you should reach out to your Ads Data Hub team to get access. From the project drop-down menu, select your Big Query project. Click on the hamburger button on the top left corner of the page and click APIs & services > Credentials. If you have not done so already, create an API key by clicking the Create credentials drop-down menu and select API key. This will create an API key that you will need for a later step. If you have not done so already, create a new OAuth 2.0 client ID by clicking the Create credentials button and select OAuth client ID. For the Application type select Other and optionally enter a name to be associated with the client ID. Click Create to create the new Client ID and a dialog will appear to show you your client ID and secret. On the Credentials page for your project, find your new client ID listed under OAuth 2.0 client IDs, and click the corresponding download icon. The downloaded file will contain your credentials, which will be needed to step through the OAuth 2.0 installed application flow. update the DEVELOPER_KEY field to match the API key you retrieved earlier. Rename the credentials file you downloaded earlier to adh-key.json and upload the file in this colab (on the left menu click on the "Files" tab and then click on the "upload" button End of explanation """ import json import sys import argparse import pprint import random import datetime import pandas as pd import plotly.plotly as py import plotly.graph_objs as go from google_auth_oauthlib.flow import InstalledAppFlow from googleapiclient import discovery from oauthlib.oauth2.rfc6749.errors import InvalidGrantError from google.auth.transport.requests import AuthorizedSession from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials from plotly.offline import iplot from plotly.graph_objs import Contours, Histogram2dContour, Marker, Scatter from googleapiclient.errors import HttpError from google.colab import auth auth.authenticate_user() print('Authenticated') """ Explanation: Install Dependencies End of explanation """ # Allow plot images to be displayed %matplotlib inline # Functions def enable_plotly_in_cell(): import IPython from plotly.offline import init_notebook_mode display(IPython.core.display.HTML(''' <script src="/static/components/requirejs/require.js"></script> ''')) init_notebook_mode(connected=False) """ Explanation: Define function to enable charting library End of explanation """ #!/usr/bin/python # # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities used to step through OAuth 2.0 flow. These are intended to be used for stepping through samples for the Full Circle Query v2 API. """ _APPLICATION_NAME = 'ADH Campaign Overlap' _CREDENTIALS_FILE = 'fcq-credentials.json' _SCOPES = 'https://www.googleapis.com/auth/adsdatahub' _DISCOVERY_URL_TEMPLATE = 'https://%s/$discovery/rest?version=%s&key=%s' _FCQ_DISCOVERY_FILE = 'fcq-discovery.json' _FCQ_SERVICE = 'adsdatahub.googleapis.com' _FCQ_VERSION = 'v1' _REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob' _SCOPE = ['https://www.googleapis.com/auth/adsdatahub'] _TOKEN_URI = 'https://accounts.google.com/o/oauth2/token' MAX_PAGE_SIZE = 50 def _GetCredentialsFromInstalledApplicationFlow(): """Get new credentials using the installed application flow.""" flow = InstalledAppFlow.from_client_secrets_file( CLIENT_SECRETS_FILE, scopes=_SCOPE) flow.redirect_uri = _REDIRECT_URI # Set the redirect URI used for the flow. auth_url, _ = flow.authorization_url(prompt='consent') print ('Log into the Google Account you use to access the adsdatahub Query ' 'v1 API and go to the following URL:\n%s\n' % auth_url) print 'After approving the token, enter the verification code (if specified).' code = raw_input('Code: ') try: flow.fetch_token(code=code) except InvalidGrantError as ex: print 'Authentication has failed: %s' % ex sys.exit(1) credentials = flow.credentials _SaveCredentials(credentials) return credentials def _LoadCredentials(): """Loads and instantiates Credentials from JSON credentials file.""" with open(_CREDENTIALS_FILE, 'rb') as handler: stored_creds = json.loads(handler.read()) creds = Credentials(client_id=stored_creds['client_id'], client_secret=stored_creds['client_secret'], token=None, refresh_token=stored_creds['refresh_token'], token_uri=_TOKEN_URI) return creds def _SaveCredentials(creds): """Save credentials to JSON file.""" stored_creds = { 'client_id': getattr(creds, '_client_id'), 'client_secret': getattr(creds, '_client_secret'), 'refresh_token': getattr(creds, '_refresh_token') } with open(_CREDENTIALS_FILE, 'wb') as handler: handler.write(json.dumps(stored_creds)) def GetCredentials(): """Get stored credentials if they exist, otherwise return new credentials. If no stored credentials are found, new credentials will be produced by stepping through the Installed Application OAuth 2.0 flow with the specified client secrets file. The credentials will then be saved for future use. Returns: A configured google.oauth2.credentials.Credentials instance. """ try: creds = _LoadCredentials() creds.refresh(Request()) except IOError: creds = _GetCredentialsFromInstalledApplicationFlow() return creds def GetDiscoveryDocument(): """Downloads the adsdatahub v1 discovery document. Downloads the adsdatahub v1 discovery document to fcq-discovery.json if it is accessible. If the file already exists, it will be overwritten. Raises: ValueError: raised if the discovery document is inaccessible for any reason. """ credentials = GetCredentials() discovery_url = _DISCOVERY_URL_TEMPLATE % ( _FCQ_SERVICE, _FCQ_VERSION, DEVELOPER_KEY) auth_session = AuthorizedSession(credentials) discovery_response = auth_session.get(discovery_url) if discovery_response.status_code == 200: with open(_FCQ_DISCOVERY_FILE, 'wb') as handler: handler.write(discovery_response.text) else: raise ValueError('Unable to retrieve discovery document for api name "%s"' 'and version "%s" via discovery URL: %s' % _FCQ_SERVICE, _FCQ_VERSION, discovery_url) def GetService(): """Builds a configured adsdatahub v1 API service. Returns: A googleapiclient.discovery.Resource instance configured for the adsdatahub v1 service. """ credentials = GetCredentials() discovery_url = _DISCOVERY_URL_TEMPLATE % ( _FCQ_SERVICE, _FCQ_VERSION, DEVELOPER_KEY) service = discovery.build( 'adsdatahub', _FCQ_VERSION, credentials=credentials, discoveryServiceUrl=discovery_url) return service def GetServiceFromDiscoveryDocument(): """Builds a configured Full Circle Query v2 API service via discovery file. Returns: A googleapiclient.discovery.Resource instance configured for the Full Circle Query API v2 service. """ credentials = GetCredentials() with open(_FCQ_DISCOVERY_FILE, 'rb') as handler: discovery_doc = handler.read() service = discovery.build_from_document( service=discovery_doc, credentials=credentials) return service try: full_circle_query = GetService() except IOError as ex: print ('Unable to create ads data hub service - %s' % ex) print ('Did you specify the client secrets file in samples_util.py?') sys.exit(1) try: # Execute the request. response = full_circle_query.customers().list().execute() except HttpError as e: print (e) sys.exit(1) if 'customers' in response: print ('ADH API Returned {} Ads Data Hub customers for the current user!'.format(len(response['customers']))) for customer in response['customers']: print(json.dumps(customer)) else: print ('No customers found for current user.') """ Explanation: Authenticate against the ADH API ADH documentation End of explanation """ #@title Define ADH configuration parameters customer_id = 000000001 #@param dataset_id = 000000001 #@param query_name = "query_name" #@param {type:"string"} big_query_project = 'bq_project_id' #@param Destination Project ID {type:"string"} big_query_dataset = 'dataset_name' #@param Destination Dataset {type:"string"} big_query_destination_table = 'table_name' #@param Destination Table {type:"string"} start_date = '2019-09-01' #@param {type:"date", allow-input: true} end_date = '2019-09-30' #@param {type:"date", allow-input: true} max_freq = 100 #@param {type:"integer", allow-input: true} cpm = 5 #@param {type:"number", allow-input: true} id_type = "campaign_id" #@param ["", "advertiser_id", "campaign_id", "placement_id", "ad_id"] {type: "string", allow-input: false} IDs = "" #@param {type: "string", allow-input: true} """ Explanation: Frequency Analysis <b>Purpose:</b> This tool should be used to guide you defining an optimal frequency cap considering the CTR curve. Due to that it is more useful in awareness use cases. Key notes For some campaings the user ID will be <b>zeroed</b> (e.g. Googel Data, ITP browsers and YouTube Data), therefore <b>excluded</b> from the analysis. For more information click <a href="https://support.google.com/dcm/answer/9006418" > here</a>; It will be only included in the analysis campaigns which clicks and impressions were tracked. Instructions * First of all: <b>MAKE A COPY</b> =); * Fulfill the query parameters in the Box 1; * In the menu above click in Runtime > Run All; * Authorize your credentials; * Go to the end of the colab and your figures will be ready; * After defining what should be the optimal frequency cap fill it in the Box 2 and press play. Step 1 - Instructions - Defining parameters to find the optimal frequency <b>max_freq:</b> Stands for the amount of frequency you want to plot the graphics (e.g. if you put 50, you will look for impressions that was shown up to 50 times for users); <b>id_type:</b> How do you want to filter your data (if you don't want to filter leave it blank); <b>IDs:</b> Accordingly to the id_type chosen before, fill in this field following this patterns: 'id-1111', 'id-2222', ... End of explanation """ def df_calc_fields(df): df['ctr'] = df.clicks / df.impressions df['cost'] = (df.impressions / 1000 ) * cpm df['cpc'] = df.cost / df.clicks df['cumulative_clicks'] = df.clicks.cumsum() df['cumulative_impressions'] = df.impressions.cumsum() df['cumulative_reach'] = df.reach.cumsum() df['cumulative_cost'] = df.cost.cumsum() df['coverage_clicks'] = df.cumulative_clicks / df.clicks.sum() df['coverage_impressions'] = df.cumulative_impressions / df.impressions.sum() df['coverage_reach'] = df.cumulative_reach / df.reach.sum() return df """ Explanation: Step 2 - Create a function for the final calculations From DT data Calculate metrics using pandas Pass through the pandas dataframe when you call this function End of explanation """ # Build the query dc = {} if (IDs == ""): dc['ID_filters'] = "" else: dc['id_type'] = id_type dc['IDs'] = IDs dc['ID_filters'] = '''AND event.{id_type} IN ({IDs})'''.format(**dc) """ Explanation: Step 3 - Build the query Set up the vairables End of explanation """ q1 = """ WITH imp_u_clicks AS ( SELECT User_ID, event.event_time AS interaction_time, 'imp' AS interaction_type FROM adh.cm_dt_impressions WHERE user_id != '0' {ID_filters} """ """ Explanation: Part 1 - Find all impressions from the impression table: * Select all user IDs from the impression table * Select the event_time * Mark the interaction type as 'imp' for all of these rows * Filter for the dates set in Step 1 using the partition files to reduce bigQuery costs by only searching in files within a 2 day interval of the set date range * Filter out any user IDs that are 0 * If specific ID filters were applied in Step 1 filter the data for those IDs End of explanation """ q2 = """ UNION ALL ( SELECT User_ID, event.event_time AS interaction_time, 'click' AS interaction_type FROM adh.cm_dt_clicks WHERE user_id != '0' {ID_filters} ) ), """ """ Explanation: Part 2 - Find all clicks from the clicks table: Select all User IDs from the click table Select the event_time Mark the interaction type as 'click' for all of these rows Filter for the dates set in Step 1 using the partition files to reduce BigQuery costs by only searching in files within a 2 day interval of the set date range If specific ID filters were applied in Step 2 filter the data for those IDs Use a union to create a single table with both impressions and clicks End of explanation """ q3 = """ user_level_data AS ( SELECT user_id, SUM(IF(interaction_type = 'imp', 1, 0)) AS impressions, SUM(IF(interaction_type = 'click', 1, 0)) AS clicks FROM imp_u_clicks GROUP BY user_id) """ """ Explanation: output example: <table> <tr> <th>USER_ID</th> <th>interaction_time</th> <th>interaction_type</th> </tr> <tr> <td>001</td> <td>timestamp</td> <td>impression</td> </tr> <tr> <td>001</td> <td>timestamp</td> <td>impression</td> </tr> <tr> <td>001</td> <td>timestamp</td> <td>click</td> </tr> <tr> <td>002</td> <td>timestamp</td> <td>impression</td> </tr> </tr> <tr> <td>002</td> <td>timestamp</td> <td>click</td> </tr> </tr> <tr> <td>003</td> <td>timestamp</td> <td>impression</td> </tr> <tr> <td>001</td> <td>timestamp</td> <td>impression</td> </tr> </table> Part 3 - Calculate impressions and clicks per user: For each user, calculate the number of impressions and clicks using the table created in Part 1 and 2 End of explanation """ q4 = """ SELECT impressions AS frequency, SUM(clicks) AS clicks, SUM(impressions) AS impressions, COUNT(*) AS reach FROM user_level_data GROUP BY 1 ORDER BY frequency ASC """ """ Explanation: output example: <table> <tr> <th>USER_ID</th> <th>impressions</th> <th>clicks</th> </tr> <tr> <td>001</td> <td>3</td> <td>1</td> </tr> <tr> <td>002</td> <td>1</td> <td>1</td> </tr> <tr> <td>003</td> <td>1</td> <td>0</td> </tr> </table> Part 4 - Calculate metrics per frequency: Use the table created in Part 3 with metrics at user level to calculate metrics per each frequency Frequency: The number of impressions served to each user Clicks: The sum of clicks that occured at each frequency Impressions: The sum of all impressions that occured at each frequency Reach: The total number of unique users (the count of all user ids) Group by Frequency End of explanation """ query_text = (q1 + q2 + q3 + q4).format(**dc) print(query_text) """ Explanation: output example: <table> <tr> <th>frequency</th> <th>clicks</th> <th>impression</th> <th>reach</th> </tr> <tr> <td>1</td> <td>1</td> <td>2</td> <td>2</td> </tr> <tr> <td>2</td> <td>0</td> <td>0</td> <td>0</td> </tr> <tr> <td>3</td> <td>1</td> <td>3</td> <td>1</td> </tr> </table> Join the query and use pythons format method to pass in your parameters set in step 1 End of explanation """ try: full_circle_query = GetService() except IOError, ex: print 'Unable to create ads data hub service - %s' % ex print 'Did you specify the client secrets file?' sys.exit(1) query_create_body = { 'name': query_name, 'title': query_name, 'queryText': query_text } try: # Execute the request. new_query = full_circle_query.customers().analysisQueries().create(body=query_create_body, parent='customers/' + str(customer_id)).execute() new_query_name = new_query["name"] except HttpError as e: print e sys.exit(1) print 'New query %s created for customer ID "%s":' % (new_query_name, customer_id) print(json.dumps(new_query)) """ Explanation: Create the query required for ADH * When working with ADH the standard BigQuery query needs to be adapted to run in ADH * This can be done bia the API End of explanation """ # Build the query dc = {} if (IDs == ""): dc['ID_filters'] = "" else: dc['id_type'] = id_type dc['IDs'] = IDs dc['ID_filters'] = '''AND event.{id_type} IN ({IDs})'''.format(**dc) query_text = """ WITH imp_u_clicks AS ( SELECT User_ID, event.event_time AS interaction_time, 'imp' AS interaction_type FROM adh.cm_dt_impressions WHERE user_id != '0' {ID_filters} UNION ALL ( SELECT User_ID, event.event_time AS interaction_time, 'click' AS interaction_type FROM adh.cm_dt_clicks WHERE user_id != '0' {ID_filters} ) ), user_level_data AS ( SELECT user_id, SUM(IF(interaction_type = 'imp', 1, 0)) AS impressions, SUM(IF(interaction_type = 'click', 1, 0)) AS clicks FROM imp_u_clicks GROUP BY user_id) SELECT impressions AS frequency, SUM(clicks) AS clicks, SUM(impressions) AS impressions, COUNT(*) AS reach FROM user_level_data GROUP BY 1 ORDER BY frequency ASC """.format(**dc) print(query_text) try: full_circle_query = GetService() except IOError, ex: print 'Unable to create ads data hub service - %s' % ex print 'Did you specify the client secrets file?' sys.exit(1) query_create_body = { 'name': query_name, 'title': query_name, 'queryText': query_text } try: # Execute the request. new_query = full_circle_query.customers().analysisQueries().create(body=query_create_body, parent='customers/'+ str(customer_id)).execute() new_query_name = new_query["name"] except HttpError as e: print e sys.exit(1) print 'New query %s for customer ID "%s":' % (new_query_name, customer_id) print(json.dumps(new_query)) """ Explanation: Full Query End of explanation """ destination_table_full_path = big_query_project + '.' + big_query_dataset + '.' + big_query_destination_table CUSTOMER_ID = customer_id DATASET_ID = dataset_id QUERY_NAME = query_name DEST_TABLE = destination_table_full_path #Dates format_str = '%Y-%m-%d' # The format start_date_obj = datetime.datetime.strptime(start_date, format_str) end_date_obj = datetime.datetime.strptime(end_date, format_str) START_DATE = { "year": start_date_obj.year, "month": start_date_obj.month, "day": start_date_obj.day } END_DATE = { "year": end_date_obj.year, "month": end_date_obj.month, "day": end_date_obj.day } try: full_circle_query = GetService() except IOError, ex: print('Unable to create ads data hub service - %s' % ex) print('Did you specify the client secrets file?') sys.exit(1) query_start_body = { 'spec': { 'startDate': START_DATE, 'endDate': END_DATE, 'adsDataCustomerId': DATASET_ID }, 'destTable': DEST_TABLE, 'customerId': CUSTOMER_ID } try: # Execute the request. operation = full_circle_query.customers().analysisQueries().start(body=query_start_body, name=new_query_name).execute() except HttpError as e: print(e) sys.exit(1) print('Running query with name "%s" via the following operation:' % query_name) print(json.dumps(operation)) """ Explanation: Check your query exists https://adsdatahub.google.com/u/0/#/jobs Find your query in the my queries tab Check and ensure your query is valid (there will be a green tick in the top right corner) If your query is not valid hover over the red exclamation mark to see issues that need to be resolved Step 4 - Run the query Start the query Pass the query in to ADH using the full_circle_query method set at the start Pass in the dates, the destination table name in BigQuery and the customer ID End of explanation """ import time statusDone = False while statusDone is False: print("waiting for the job to complete...") updatedOperation = full_circle_query.operations().get(name=operation['name']).execute() if updatedOperation.has_key('done') and updatedOperation['done'] == True: statusDone = True time.sleep(5) print("Job completed... Getting results") #run bigQuery query dc = {} dc['table'] = big_query_dataset + '.' + big_query_destination_table q1 = ''' select * from {table} '''.format(**dc) """ Explanation: Retrieve the results from BigQuery Check to make sure the query has finished running and is saved in the new BigQuery TAble When it is done we cane retrieve it End of explanation """ # Run query as save as a table (also known as dataframe) df = pd.io.gbq.read_gbq(q1, project_id=big_query_project, dialect='standard', reauth=True) print(df) """ Explanation: We are using the pandas library to run the query. We pass in the query (q), the project id and set the SQL language to 'standard' (as opposed to legacy SQL) End of explanation """ # Save the original dataframe as a csv file in case you need to recover the original data df.to_csv('base_final_user.csv', index=False) """ Explanation: Save the output as a CSV End of explanation """ df = df[1:max_freq+1] # Reduces the dataframe to have the size you set as the maximum frequency (max_freq) df = df_calc_fields(df) df2=df.copy() # Copy the dataframe you calculated the fields in case you need to recover it graphs = [] # Variable to save all graphics """ Explanation: Step 6 - Set up the data and all the charts that will be plotted 6.1 Transform data Use the calculation function created to calculate all the values based off your data End of explanation """ # Save all data into a list to plot the graphics impressions = dict(type='bar', x=df.frequency, y=df.impressions, name='impressions', marker=dict(color='rgb(0, 29, 255)', line=dict(width=1))) ctr = dict( type='scatter', x=df.frequency, y=df.ctr, name='ctr', marker=dict(color='rgb(255, 148, 0)', line=dict(width=1)), xaxis='x1', yaxis='y2', ) layout = dict( title='Impressions and CTR Comparison on Each Frequency', autosize=True, legend=dict(x=1.15, y=1), hovermode='x', xaxis=dict(tickangle=-45, autorange=True, tickfont=dict(size=10), title='frequency', type='category'), yaxis=dict(showgrid=True, title='impressions'), yaxis2=dict(overlaying='y', anchor='x', side='right', showgrid=False, title='ctr'), ) fig = dict(data=[impressions, ctr], layout=layout) graphs.append(fig) clicks = dict(type='bar', x= df.frequency, y= df.clicks, name='Clicks', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) ctr = dict(type='scatter', x= df.frequency, y= df.cpc, name='cpc', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y2' ) layout = dict(autosize= True, title='Clicks and CPC Comparison on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category' ), yaxis=dict( showgrid=True, title= 'clicks' ), yaxis2=dict( overlaying= 'y', anchor= 'x', side= 'right', showgrid= False, title= 'cpc' ) ) fig = dict(data=[clicks, ctr], layout=layout) graphs.append(fig) ctr = dict(type='scatter', x= df.frequency, y= df.ctr, name='ctr', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) cpc = dict(type='scatter', x= df.frequency, y= df.cpc, name='cpc', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y2' ) layout = dict(autosize= True, title='CTR and CPC Comparison on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category', showgrid =False ), yaxis=dict( showgrid=False, title= 'ctr' ), yaxis2=dict( overlaying= 'y', anchor= 'x', side= 'right', showgrid= False, title= 'cpc' ) ) fig = dict(data=[ctr, cpc], layout=layout) graphs.append(fig) pareto = dict(type='scatter', x= df.frequency, y= df.coverage_clicks, name='Cumulative % Clicks', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) cpc = dict(type='scatter', x= df.frequency, y= df.cpc, name='cpc', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y2' ) layout = dict(autosize= True, title='Cumulative Clicks and CPC Comparison on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category' ), yaxis=dict( showgrid=True, title= 'cum clicks' ), yaxis2=dict( overlaying= 'y', anchor= 'x', side= 'right', showgrid= False, title= 'cpc' ) ) fig = dict(data=[pareto, cpc], layout=layout) graphs.append(fig) pareto = dict(type='scatter', x= df.frequency, y= df.coverage_clicks, name='Cumulative % Clicks', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) cpc = dict(type='scatter', x= df.frequency, y= df.ctr, name='ctr', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y2' ) layout = dict(autosize= True, title='Cumulative Clicks and CTR Comparison on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category' ), yaxis=dict( showgrid=True, title= 'cum clicks' ), yaxis2=dict( overlaying= 'y', anchor= 'x', side= 'right', showgrid= False, title= 'ctr' ) ) fig = dict(data=[pareto, cpc], layout=layout) graphs.append(fig) pareto = dict(type='scatter', x= df.frequency, y= df.coverage_reach, name='Cumulative % Reach', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) cpc = dict(type='scatter', x= df.frequency, y= df.cost, name='cost', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y2' ) layout = dict(autosize= True, title='Cumulative Reach and Cost Comparison on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category' ), yaxis=dict( showgrid=True, title= 'cummulative reach' ), yaxis2=dict( overlaying= 'y', anchor= 'x', side= 'right', showgrid= False, title= 'cost' ) ) """ Explanation: Analysis 1: Frequency Analysis by user Step 1: Set up graphs End of explanation """ # Show the first 5 rows of the dataframe (data matrix) with the final data df.head() # Export the whole dataframe to a csv file that can be used in an external environment df.to_csv('freq_analysis.csv', index=False) """ Explanation: Step 2: Export all the data (optional) End of explanation """ enable_plotly_in_cell() iplot(graphs[0]) """ Explanation: Output: Visualise the data Impression and CTR on each frequency Clicks and CPC Comparison on Each Frequency CTR and CPC Comparison on Each Frequency Cumulative Clicks and CPC Comparison on Each Frequency Cumulative Clicks and CTR Comparison on Each Frequency Impression and CTR on each frequency Consider your frequency range, ensure frequency management is in place. Where is your CTR floor? At what point does your CTR drop below a level that you care about. Determine what the wasted impressions is if you don't change your frequency. End of explanation """ enable_plotly_in_cell() iplot(graphs[1]) """ Explanation: Clicks and CPC Comparison on Each Frequency What is your CPC ceiling Understand what the frequency is at that level Determine what impact changing your frequency will have on clicks End of explanation """ enable_plotly_in_cell() iplot(graphs[2]) """ Explanation: CTR and CPC Comparison on Each Frequency How does your CTR and CPC impact each other Make an informed decision regarding suitable goals End of explanation """ enable_plotly_in_cell() iplot(graphs[3]) """ Explanation: Cumulative Clicks and CPC Comparison on Each Frequency Understand what a suitable CPC goal might be 1. What is the change in cost for increased clicks 2. What is the incremental gains for an increased cost End of explanation """ enable_plotly_in_cell() iplot(graphs[4]) """ Explanation: Cumulative Clicks and CTR Comparison on Each Frequency At what frequency does your CTR drop below an acceptable value End of explanation """ #Understand the logic behind calculation graphs2 = [] pareto = dict(type='scatter', x= df.frequency, y= df.coverage_reach, name='Cummulative % Reach', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) ccm_imp = dict(type='scatter', x= df.frequency, y= df.coverage_impressions, name='Cummulative % Impressions', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y' ) layout = dict(autosize= True, title='Cummulative Impressions and Cummulative Reach on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category' ), yaxis=dict( showgrid=True, title= 'cummulative %' ) ) fig = dict(data=[pareto, ccm_imp], layout=layout) graphs2.append(fig) pareto = dict(type='scatter', x= df.frequency, y= df.coverage_clicks, name='Cummulative % Clicks', marker=dict(color= 'rgb(0, 29, 255)', line= dict(width= 1)) ) ccm_imp = dict(type='scatter', x= df.frequency, y= df.coverage_impressions, name='Cummulative % Impressions', marker=dict(color= 'rgb(255, 148, 0)', line= dict(width= 1)), xaxis='x1', yaxis='y' ) layout = dict(autosize= True, title='Cumulative Impressions and Cummulative Clicks on Each Frequency', legend= dict(x= 1.15, y= 1 ), hovermode='x', xaxis=dict(tickangle= -45, autorange=True, tickfont=dict(size= 10), title= 'frequency', type= 'category' ), yaxis=dict( showgrid=True, title= 'cummulative %' ) ) fig = dict(data=[pareto, ccm_imp], layout=layout) graphs2.append(fig) """ Explanation: Analysis 2: Understanding optimal frequency Step 1: Set up charts End of explanation """ enable_plotly_in_cell() iplot(graphs2[0]) """ Explanation: Output: Visualise the results Cummulative Impressions and Cummulative Reach on Each Frequency How do you maximise your reach without drastically increasing your impressions? To obtain my reach goals, what frequency do I need at what impression cost? With higher frequency caps you will need more impressions to maximise your reach End of explanation """ enable_plotly_in_cell() iplot(graphs2[1]) """ Explanation: Cummulative Impressions and Cummulative Clicks on Each Frequency To obtain my goals in terms of clicks, what frequency do I need, at what impression cost? End of explanation """ #@title 1.1 - Optimal Frequency optimal_freq = 3 #@param {type:"integer", allow-input: true} """ Explanation: Analysis 3: Determine impressions outside optimal frequency Step 1: Define parameter to be the Optimal Frequency This parameter below will guide the analysis of media loss talking about impressions. We will calculate the percentage of impressions that are out of the number you set as the optimal frequency. End of explanation """ from __future__ import division df2 = df_calc_fields(df2) df_opt, df_not_opt = df[1:optimal_freq+1], df[optimal_freq+1:] total_impressions = list(df2.cumulative_impressions)[-1] total_imp_not_opt = list(df_not_opt.cumulative_impressions)[-1] - list(df_opt.cumulative_impressions)[-1] imp_not_opt_ratio = total_imp_not_opt / total_impressions total_clicks = list(df2.cumulative_clicks)[-1] total_clicks_not_opt = list(df_not_opt.cumulative_clicks)[-1] - list(df_opt.cumulative_clicks)[-1] clicks_within_opt_ratio = 1-(total_clicks_not_opt / total_clicks) print("{:.1f}% of your total impressions are out of the optimal frequency.".format(imp_not_opt_ratio*100)) print("{:,} of your impressions are out of the optimal frequency".format(total_imp_not_opt)) print("At a CPM of {} - preventing these would result in a cost saving of {:,}".format(cpm, cpm*total_imp_not_opt)) print("") print("If you limited frequency to {}, you would still achieve {:.1f}% of your clicks").format(optimal_freq, clicks_within_opt_ratio*100) """ Explanation: Output: Calculate impression loss End of explanation """
GoogleCloudPlatform/ml-design-patterns
02_data_representation/embeddings.ipynb
apache-2.0
import shutil import os import pandas as pd import tensorflow as tf from tensorflow import keras from tensorflow.keras import callbacks, layers, models, utils from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow_hub import KerasLayer """ Explanation: Embeddings An embedding is a low-dimensional, vector representation of a (typically) high-dimensional feature which maintains the semantic meaning of the feature in a such a way that similar features are close in the embedding space. End of explanation """ !head ./data/babyweight_sample.csv df = pd.read_csv("./data/babyweight_sample.csv") df.plurality.head(5) df.plurality.unique() CLASSES = { 'Single(1)': 0, 'Multiple(2+)': 1, 'Twins(2)': 2, 'Triplets(3)': 3, 'Quadruplets(4)': 4, 'Quintuplets(5)': 5 } N_CLASSES = len(CLASSES) """ Explanation: Embedding layer for categorical data End of explanation """ plurality_class = [CLASSES[plurality] for plurality in df.plurality] print(df.plurality[:5]) print(plurality_class[:5]) """ Explanation: Convert the plurality to a numeric index. End of explanation """ EMBED_DIM = 2 embedding_layer = layers.Embedding(input_dim=N_CLASSES, output_dim=EMBED_DIM) embeds = embedding_layer(tf.constant(plurality_class)) """ Explanation: Create an embedding layer. Supply arguments input_dim and output_dim - input_dim indicates the size of the vocabulary. For plurality this is 6. - ouptut_dim indicates the dimension of the dense embedding. End of explanation """ embeds.shape embeds[:5] """ Explanation: The variable embeds contains the two-dimensional for each plurality class. End of explanation """ LOGDIR = "./text_models" DATA_DIR = "./data" """ Explanation: Embedding Layers in a Keras model In this section, we will implement text models to recognize the probable source (Github, Tech-Crunch, or The New-York Times) of the titles we have in the title dataset we constructed in the previous lab. In a first step, we will load and pre-process the texts and labels so that they are suitable to be fed to a Keras model. For the texts of the titles we will learn how to split them into a list of tokens, and then how to map each token to an integer using the Keras Tokenizer class. What will be fed to our Keras models will be batches of padded list of integers representing the text. For the labels, we will learn how to one-hot-encode each of the 3 classes into a 3 dimensional basis vector. Then we will explore a few possible models to do the title classification. All models will be fed padded list of integers, and all models will start with a Keras Embedding layer that transforms the integer representing the words into dense vectors. Our model will be a simple bag-of-words DNN model that averages up the word vectors and feeds the tensor that results to further dense layers. Doing so means that we forget the word order (and hence that we consider sentences as a “bag-of-words”). Using an RNN or a 1-dimensional CNN would allow us to maintain the order of word embeddings in our model. Load dataset Let's start by specifying where the information about the trained models will be saved as well as where our dataset is located: End of explanation """ DATASET_NAME = "titles_full.csv" TITLE_SAMPLE_PATH = os.path.join(DATA_DIR, DATASET_NAME) COLUMNS = ['title', 'source'] titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS) titles_df.head() """ Explanation: Our dataset consists of titles of articles along with the label indicating from which source these articles have been taken from (GitHub, Tech-Crunch, or the New-York Times). End of explanation """ tokenizer = Tokenizer() tokenizer.fit_on_texts(titles_df.title) integerized_titles = tokenizer.texts_to_sequences(titles_df.title) """ Explanation: First, we'll find how many words we have in our dataset (VOCAB_SIZE), how many titles we have (DATASET_SIZE), and what the maximum length of the titles we have (MAX_LEN) is. Keras offers the Tokenizer class in its keras.preprocessing.text module to help us with this. End of explanation """ integerized_titles[:3] """ Explanation: The variable 'integerized_titles' contains the integer representation of each article title in out dataset. End of explanation """ VOCAB_SIZE = len(tokenizer.index_word) VOCAB_SIZE DATASET_SIZE = tokenizer.document_count DATASET_SIZE MAX_LEN = max(len(sequence) for sequence in integerized_titles) MAX_LEN """ Explanation: From this and the tokenizer we can extract the VOCAB_SIZE, DATASET_SIZE and MAX_LEN. End of explanation """ def create_sequences(texts, max_len=MAX_LEN): sequences = tokenizer.texts_to_sequences(texts) padded_sequences = pad_sequences(sequences, max_len, padding='post') return padded_sequences sample_titles = create_sequences(["holy cash cow batman - content is back", "close look at a flu outbreak upends some common wisdom"]) sample_titles """ Explanation: Preprocess data We'll need to pad the elements of our title to feed into the model. Keras has the helper functions pad_sequence for that on the top of the tokenizer methods. The function create_sequences will * take as input our titles as well as the maximum sentence length and * returns a list of the integers corresponding to our tokens padded to the sentence maximum length End of explanation """ CLASSES = { 'github': 0, 'nytimes': 1, 'techcrunch': 2 } N_CLASSES = len(CLASSES) def encode_labels(sources): classes = [CLASSES[source] for source in sources] one_hots = utils.to_categorical(classes) return one_hots """ Explanation: Next, we'll convert our label to numeric, categorical variable. End of explanation """ N_TRAIN = int(DATASET_SIZE * 0.80) titles_df = pd.read_csv(TITLE_SAMPLE_PATH, header=None, names=COLUMNS) titles_train, sources_train = ( titles_df.title[:N_TRAIN], titles_df.source[:N_TRAIN]) titles_valid, sources_valid = ( titles_df.title[N_TRAIN:], titles_df.source[N_TRAIN:]) sources_train.value_counts() """ Explanation: Create train/validation split End of explanation """ X_train, Y_train = create_sequences(titles_train), encode_labels(sources_train) X_valid, Y_valid = create_sequences(titles_valid), encode_labels(sources_valid) X_train[:3], Y_train[:3] """ Explanation: Then, prepare the data for the model. End of explanation """ def build_dnn_model(embed_dim): model = models.Sequential([ layers.Embedding(VOCAB_SIZE + 1, embed_dim, input_shape=[MAX_LEN]), layers.Lambda(lambda x: tf.reduce_mean(x, axis=1)), layers.Dense(N_CLASSES, activation='softmax') ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) return model Y_train.shape %%time tf.random.set_seed(33) MODEL_DIR = os.path.join(LOGDIR, 'dnn') shutil.rmtree(MODEL_DIR, ignore_errors=True) BATCH_SIZE = 300 EPOCHS = 100 EMBED_DIM = 10 PATIENCE = 0 dnn_model = build_dnn_model(embed_dim=EMBED_DIM) dnn_history = dnn_model.fit( X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_valid, Y_valid), callbacks=[callbacks.EarlyStopping(patience=PATIENCE), callbacks.TensorBoard(MODEL_DIR)], ) pd.DataFrame(dnn_history.history)[['loss', 'val_loss']].plot() pd.DataFrame(dnn_history.history)[['accuracy', 'val_accuracy']].plot() dnn_model.summary() """ Explanation: Build a DNN model The build_dnn_model function below returns a compiled Keras model that implements a simple embedding layer transforming the word integers into dense vectors, followed by a Dense softmax layer that returns the probabilities for each class. Note that we need to put a custom Keras Lambda layer in between the Embedding layer and the Dense softmax layer to do an average of the word vectors returned by the embedding layer. This is the average that's fed to the dense softmax layer. By doing so, we create a model that is simple but that loses information about the word order, creating a model that sees sentences as "bag-of-words". End of explanation """ NNLM = "https://tfhub.dev/google/nnlm-en-dim50/2" nnlm_module = KerasLayer( handle=NNLM, output_shape=[50], input_shape=[], dtype=tf.string, trainable=True) """ Explanation: Transfer Learning with Pre-trained Embedding We can also use a word embedding from a pre-trained modle using a Neural Probabilistic Language Model. TF-Hub has a 50-dimensional one called nnlm-en-dim50-with-normalization, which also normalizes the vectors produced. Once loaded from its url, the TF-hub module can be used as a normal Keras layer in a sequential or functional model. Since we have enough data to fine-tune the parameters of the pre-trained embedding itself, we will set trainable=True in the KerasLayer that loads the pre-trained embedding: End of explanation """ nnlm_module(tf.constant(["holy cash cow batman - content is back", "close look at a flu outbreak upends some common wisdom"])) """ Explanation: With this module, we do not need to pad our inputs. The NNLM module returns a 50-dimensional vector given a word or sentence. End of explanation """ X_train, Y_train = titles_train.values, encode_labels(sources_train) X_valid, Y_valid = titles_valid.values, encode_labels(sources_valid) X_train[:3] """ Explanation: With this in mind, we can simplify our data inputs since do not need to integerize or pad. End of explanation """ def build_hub_model(): model = models.Sequential([ KerasLayer(handle=NNLM, output_shape=[50], input_shape=[], dtype=tf.string, trainable=True), layers.Dense(N_CLASSES, activation='softmax') ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) return model %%time tf.random.set_seed(33) MODEL_DIR = os.path.join(LOGDIR, 'hub') shutil.rmtree(MODEL_DIR, ignore_errors=True) BATCH_SIZE = 300 EPOCHS = 100 EMBED_DIM = 10 PATIENCE = 3 hub_model = build_hub_model() hub_history = hub_model.fit( X_train, Y_train, epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_valid, Y_valid), callbacks=[callbacks.EarlyStopping(patience=PATIENCE), callbacks.TensorBoard(MODEL_DIR)], ) pd.DataFrame(hub_history.history)[['loss', 'val_loss']].plot() pd.DataFrame(hub_history.history)[['accuracy', 'val_accuracy']].plot() hub_model.summary() """ Explanation: Build DNN model using TF-Hub Embedding layer Next, we can add this TF-Hub module to our DNN model. End of explanation """
bashtage/statsmodels
examples/notebooks/distributed_estimation.ipynb
bsd-3-clause
import numpy as np from scipy.stats.distributions import norm from statsmodels.base.distributed_estimation import DistributedModel def _exog_gen(exog, partitions): """partitions exog data""" n_exog = exog.shape[0] n_part = np.ceil(n_exog / partitions) ii = 0 while ii < n_exog: jj = int(min(ii + n_part, n_exog)) yield exog[ii:jj, :] ii += int(n_part) def _endog_gen(endog, partitions): """partitions endog data""" n_endog = endog.shape[0] n_part = np.ceil(n_endog / partitions) ii = 0 while ii < n_endog: jj = int(min(ii + n_part, n_endog)) yield endog[ii:jj] ii += int(n_part) """ Explanation: Distributed Estimation This notebook goes through a couple of examples to show how to use distributed_estimation. We import the DistributedModel class and make the exog and endog generators. End of explanation """ X = np.random.normal(size=(1000, 25)) beta = np.random.normal(size=25) beta *= np.random.randint(0, 2, size=25) y = norm.rvs(loc=X.dot(beta)) m = 5 """ Explanation: Next we generate some random data to serve as an example. End of explanation """ debiased_OLS_mod = DistributedModel(m) debiased_OLS_fit = debiased_OLS_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: This is the most basic fit, showing all of the defaults, which are to use OLS as the model class, and the debiasing procedure. End of explanation """ from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Gaussian debiased_GLM_mod = DistributedModel( m, model_class=GLM, init_kwds={"family": Gaussian()} ) debiased_GLM_fit = debiased_GLM_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: Then we run through a slightly more complicated example which uses the GLM model class. End of explanation """ from statsmodels.base.distributed_estimation import _est_regularized_naive, _join_naive naive_OLS_reg_mod = DistributedModel( m, estimation_method=_est_regularized_naive, join_method=_join_naive ) naive_OLS_reg_params = naive_OLS_reg_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: We can also change the estimation_method and the join_method. The below example show how this works for the standard OLS case. Here we using a naive averaging approach instead of the debiasing procedure. End of explanation """ from statsmodels.base.distributed_estimation import ( _est_unregularized_naive, DistributedResults, ) naive_OLS_unreg_mod = DistributedModel( m, estimation_method=_est_unregularized_naive, join_method=_join_naive, results_class=DistributedResults, ) naive_OLS_unreg_params = naive_OLS_unreg_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: Finally, we can also change the results_class used. The following example shows how this work for a simple case with an unregularized model and naive averaging. End of explanation """
anhaidgroup/py_entitymatching
notebooks/guides/step_wise_em_guides/.ipynb_checkpoints/Performing Blocking Using Rule-Based Blocking-checkpoint.ipynb
bsd-3-clause
# Import py_entitymatching package import py_entitymatching as em import os import pandas as pd """ Explanation: Introduction This IPython notebook illustrates how to perform blocking using rule-based blocker. First, we need to import py_entitymatching package and other libraries as follows: End of explanation """ # Get the datasets directory datasets_dir = em.get_install_path() + os.sep + 'datasets' # Get the paths of the input tables path_A = datasets_dir + os.sep + 'person_table_A.csv' path_B = datasets_dir + os.sep + 'person_table_B.csv' # Read the CSV files and set 'ID' as the key attribute A = em.read_csv_metadata(path_A, key='ID') B = em.read_csv_metadata(path_B, key='ID') A.head() B.head() """ Explanation: Then, read the (sample) input tables for blocking purposes. End of explanation """ block_f = em.get_features_for_blocking(A, B, validate_inferred_attr_types=False) block_f em._block_c['corres'] em._atypes1['birth_year'], em._atypes1['hourly_wage'], em._atypes1['name'], em._atypes1['zipcode'] em._atypes2['birth_year'], em._atypes2['hourly_wage'], em._atypes2['name'], em._atypes2['zipcode'] """ Explanation: Generating Features for Blocking End of explanation """ rb = em.RuleBasedBlocker() # Add rule : block tuples if name_name_lev(ltuple, rtuple) < 0.4 rb.add_rule(['name_name_lev_sim(ltuple, rtuple) < 0.4'], block_f) C = rb.block_tables(A, B, l_output_attrs=['name', 'address'], r_output_attrs=['name', 'address'], show_progress=False) C.head() """ Explanation: Different Ways to Block Using Rule Based Blocker There are three different ways to do overlap blocking: Block two tables to produce a candidate set of tuple pairs. Block a candidate set of tuple pairs to typically produce a reduced candidate set of tuple pairs. Block two tuples to check if a tuple pair would get blocked. Block Tables to Produce a Candidate Set of Tuple Pairs End of explanation """ rb = em.RuleBasedBlocker() rb.add_rule(['birth_year_birth_year_exm(ltuple, rtuple) == 0'], block_f) D = rb.block_candset(C, show_progress=False) D.head() """ Explanation: Block Candidate Set End of explanation """ A.loc[[0]] B.loc[[1]] rb = em.RuleBasedBlocker() # Add rule : block tuples if name_name_lev(ltuple, rtuple) < 0.4 rb.add_rule(['name_name_lev_sim(ltuple, rtuple) < 0.4'], block_f) rb.add_rule(['birth_year_birth_year_exm(ltuple, rtuple) == 0'], block_f) status = rb.block_tuples(A.loc[0], B.loc[0]) print(status) """ Explanation: Block Two tuples To Check If a Tuple Pair Would Get Blocked End of explanation """
jaakla/getdelficomments
Welcome_To_Colaboratory.ipynb
unlicense
seconds_in_a_day = 24 * 60 * 60 seconds_in_a_day """ Explanation: <a href="https://colab.research.google.com/github/jaakla/getdelficomments/blob/master/Welcome_To_Colaboratory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <p><img alt="Colaboratory logo" height="45px" src="/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px"></p> <h1>What is Colaboratory?</h1> Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with - Zero configuration required - Free access to GPUs - Easy sharing Whether you're a student, a data scientist or an AI researcher, Colab can make your work easier. Watch Introduction to Colab to learn more, or just get started below! New Section Getting started The document you are reading is not a static web page, but an interactive environment called a Colab notebook that lets you write and execute code. For example, here is a code cell with a short Python script that computes a value, stores it in a variable, and prints the result: End of explanation """ seconds_in_a_week = 7 * seconds_in_a_day seconds_in_a_week """ Explanation: To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing. Variables that you define in one cell can later be used in other cells: End of explanation """ import numpy as np from matplotlib import pyplot as plt ys = 200 + np.random.randn(100) x = [x for x in range(len(ys))] plt.plot(x, ys, '-') plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6) plt.title("Sample Visualization") plt.show() """ Explanation: Colab notebooks allow you to combine executable code and rich text in a single document, along with images, HTML, LaTeX and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see Overview of Colab. To create a new Colab notebook you can use the File menu above, or use the following link: create a new Colab notebook. Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see jupyter.org. Data science With Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses numpy to generate some random data, and uses matplotlib to visualize it. To edit the code, just click the cell and start editing. End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive/02_tensorflow/labs/e_traineval.ipynb
apache-2.0
import tensorflow as tf import shutil print(tf.__version__) """ Explanation: Introducing tf.estimator.train_and_evaluate() Learning Objectives - Introduce new type of input function (serving_input_reciever_fn()) which supports remote access to our model via REST API - Use the tf.estimator.train_and_evaluate() method to periodically evaluate during training - Practice using TensorBoard to visualize training and evaluation loss curves Introduction In this notebook, we'll see how to use the train_and_evaluate method within tf.estimator to train and evaluate our machin learning model. Run the following cell and reset the session if needed: End of explanation """ CSV_COLUMN_NAMES = ["fare_amount","dayofweek","hourofday","pickuplon","pickuplat","dropofflon","dropofflat"] CSV_DEFAULTS = [[0.0],[1],[0],[-74.0], [40.0], [-74.0], [40.7]] def parse_row(row): fields = tf.decode_csv(records = row, record_defaults = CSV_DEFAULTS) features = dict(zip(CSV_COLUMN_NAMES, fields)) label = features.pop("fare_amount") return features, label def read_dataset(csv_path): dataset = tf.data.TextLineDataset(filenames = csv_path).skip(count = 1) # skip header dataset = dataset.map(map_func = parse_row) return dataset def train_input_fn(csv_path, batch_size = 128): dataset = read_dataset(csv_path) dataset = dataset.shuffle(buffer_size = 1000).repeat(count = None).batch(batch_size = batch_size) return dataset def eval_input_fn(csv_path, batch_size = 128): dataset = read_dataset(csv_path) dataset = dataset.batch(batch_size = batch_size) return dataset """ Explanation: Train and Evaluate Input Functions We'll use the same train and evaluation input functions that we created before. End of explanation """ FEATURE_NAMES = CSV_COLUMN_NAMES[1:] # all but first column feature_cols = [tf.feature_column.numeric_column(key = k) for k in FEATURE_NAMES] feature_cols """ Explanation: Feature Columns We also create the feature columns for the model the same as before. End of explanation """ def serving_input_receiver_fn(): receiver_tensors = # TODO: Your code goes here features = receiver_tensors return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = receiver_tensors) """ Explanation: Serving Input Receiver Function In a prior notebook we used the estimator.predict() function to get taxifare predictions. This worked fine because we had done our model training on the same machine. However in a production setting this won't usually be the case. Our clients may be remote web servers, mobile apps and more. Instead of having to ship our model files to every client, it would be better to host our model in one place, and make it remotely accesible for prediction requests using a REST API. The TensorFlow solution for this is a project called TF Serving, which is part of the larger Tensorflow Extended (TFX) platform that extends TensorFlow for production environments. The interface between TensorFlow and TF Serving is a serving_input_receiver_fn(). It has two jobs: - To add tf.placeholders to the graph to specify what type of tensors TF Serving should recieve during inference requests. The placeholders are specified as a dictionary object - To add any additional ops needed to convert data from the client into the tensors expected by the model. The function must return a tf.estimator.export.ServingInputReceiver object, which packages the placeholders and the neccesary transformations together. Exercise 1 In the cell below, implement a serving_input_receiver_fn function that returns an instance of tf.estimator.export.ServingInputReceiver(features, receiver_tensors). Have a look at the documentation for Tensorflow's ServingInputReceiver. Here receiver_tensors is a dictionary describing the JSON object received by the Cloud ML Engine API, and is a dictionary features that has the structure as the feature dictionary accepted by our estimator. Here we keep things simple by assuming that the API receives a JSON object that has already the correct structure (i.e. features = receiver_tensors): End of explanation """ OUTDIR = "taxi_trained" config = tf.estimator.RunConfig( # TODO: Your code goes here ) """ Explanation: Train and Evaluate One issue with the previous notebooks is we only evaluate on our validation data once training is complete. This means we can't tell at what point overfitting began. What we really want is to evaluate at specified intervals during the training phase. The Estimator API way of doing this is to replace estimator.train() and estimator.evaluate() with estimator.train_and_evaluate(). This causes an evaluation to be done after every training checkpoint. However by default Tensorflow only checkpoints once every 10 minutes. Since this is less than the length of our total training we'd end up with the same behavior as before which is just one evaluation at the end of training. To remedy this we speciy in the tf.estimator.RunConfig() that TensorFlow should checkpoint every 100 steps. The default evaluation metric average_loss is MSE, but we want RMSE. Previously we just took the square root of the final average_loss. However it would be better if we could calculate RMSE not just at the end, but for every intermediate checkpoint and plot the change over time in TensorBoard. tf.contrib.estimator.add_metrics() allows us to do this. We wrap our estimator with it, and provide a custom evaluation function. train_and_evaluate() also allows us to use our serving_input_receiver_fn() to export our models in the SavedModel format required by TF Serving. Note: Training will be slower than the last notebook because we are now evaluating after every 100 train steps. Previously we didn't evaluate until training finished. Exercise 2 In the cell below, create a instance of tf.estimator.RunConfig named config and pass to its constructor information concerning: - the directory where we want the trained model and its checkpoints to be saved - the random seed which we want to be set to 1 - the cadence at which we want the model to create checkpoints (every 100 steps) To remind yourself what arguments tf.estimator.RunConfig takes have a look at the documentation. End of explanation """ model = tf.estimator.DNNRegressor( # TODO: Your code goes here ) """ Explanation: Exercise 3 In the cell below, create a DNNRegressor model with two layers of 10 neurons each using the RunConfig instance and the feature_cols list you just created. Note that we do not need to pass the model directory directly to the estimator constructor, since that info should already be wrapped into the RunConfig instance. End of explanation """ def my_rmse(labels, predictions): pred_values = # TODO: Your code goes here return { "rmse": # TODO: Your code goes here } """ Explanation: Adding custom evaluation metrics If we want to add a custom evaluation metric (one not included automatically with the canned DNNRegressor estimator) we will can do that by wrapping our model with our custom metric function using the contrib function .add_metrics. We will implement a my_rmse function that - takes as input a tensor of labels and a tensor of predictions - returns a dictionary with the single key rmse and with value the root mean square error between the labels and the predictions You can have a look at this blog post by Lak Lakshmanan on "How to extend a canned TensorFlow Estimator" for more information. Exercise 4 Implement a my_rmse function that - takes as input a tensor of labels and a tensor of predictions - returns a dictionary with the single key rmse and with value the root mean square error between the labels and the predictions Hint: Have a look at the Tensorflow documentation for tf.metrics.root_mean_squared_error. You will have to do some preliminary step to predictions before you can compute the RMSE. In fact, you may notice that you get a shape error if you try to use the prediction values as is. It may help to use tf.squeeze. Have a closer look at what tf.sqeeze does in the docs here. End of explanation """ model = tf.contrib.estimator.add_metrics(estimator = model, metric_fn = my_rmse) """ Explanation: Run the following cell to add the custom metric you defined above to the model: End of explanation """ train_spec = tf.estimator.TrainSpec( input_fn = # TODO: Your code goes here max_steps = # TODO: Your code goes here ) """ Explanation: Exercise 5 In the cell block below, create an instance of a tf.estimator.TrainSpec using the train_input_fn defined at the top of this file and with a max_steps of 500. Note, the training data should be loaded from ./taxi-train.csv. See the details of how to implement a Tensorflow TrainSpec in the documentation. Hint: You may need to use a lambda function to pass the training input function correctly. End of explanation """ exporter = # TODO: Your code goes here """ Explanation: Exercise 6 Next, create an exporter using the serving_input_receiver_fn defined at the beginning of this notebook. You want to export the trained model and its checkpoints in the './exporter' subdirectory. Use tf.estimator.FinalExporter to create the exporter intance. Have a look at the documentation for FinalExporter to ensure proper usage. Note: You may alternatively use tf.estimator.BestExporter to export at every checkpoint that has lower loss than the previous checkpoint, instead of exporting only the last checkpoint. End of explanation """ eval_spec = # TODO: Your code goes here """ Explanation: Exercise 7 In the cell below, create an instance of an EvalSpec to which you specify that - the data should be loaded from /.taxi-valid.csv during evaluation (use the correct input function!) - the exporter you defined above should be used - the first evaluation should start after 1 second of training - and then be repeated every 1 second Look at the documentaiton for tf.estimator.EvalSpec to help. Note: We use the checkpoint setting above because we want to evaluate after every checkpoint. As long as checkpoints are > 1 sec apart this ensures the throttling never kicks in. End of explanation """ tf.logging.set_verbosity(tf.logging.INFO) shutil.rmtree(path = OUTDIR, ignore_errors = True) tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file tf.estimator.train_and_evaluate(# TODO: Your code goes here ) """ Explanation: Exercise 8 Finally we use tf.estimator.train_and_evaluate to start the training and evaluation as you specified them above. Complete the code in the cell below, providing the necessary arguments. Have a look at the documentation for the train_and_evaluate method to make sure you pass everything it needs. End of explanation """ !ls -R taxi_trained/export """ Explanation: Inspect Export Folder Now in the output directory, in addition to the checkpoint files, you'll see a subfolder called 'export'. This contains one or models in the SavedModel format which is compatible with TF Serving. In the next notebook we will deploy the SavedModel behind a production grade REST API. End of explanation """ get_ipython().system_raw( "tensorboard --logdir {} --host 0.0.0.0 --port 6006 &" .format(OUTDIR) ) get_ipython().system_raw("../assets/ngrok http 6006 &") !curl -s http://localhost:4040/api/tunnels | python3 -c \ "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])" """ Explanation: Monitoring with TensorBoard TensorBoard is a web UI that allows us to visualize various aspects of our model, including the training and evaluation loss curves. Although you won't see the loss curves yet, it is best to launch TensorBoard before you start training so that you may see them update during a long running training process. To get Tensorboard to work within a Deep Learning VM or Colab, we need to create a tunnel connection to your local machine. To do this we'll set up a tunnel connection with ngrok. Using ngrok we'll then create a tunnel connection to our virtual machine's port 6006. We can view the Tensorboard results by following the link provided by ngrok after executing the following cell. End of explanation """ # this will kill the processes for Tensorboard !ps aux | grep tensorboard | awk '{print $2}' | xargs kill # this will kill the processes for ngrok !ps aux | grep ngrok | awk '{print $2}' | xargs kill """ Explanation: Tensorboard cleanup To close the tunnel connection to Tensorboard, we can find the PIDs for ngrok and Tensorboard and stop them. End of explanation """
kwinkunks/timefreak
stft.ipynb
apache-2.0
import numpy as np from scipy.fftpack import fft, ifft, rfft, irfft, fftfreq, rfftfreq import scipy.signal import matplotlib.pyplot as plt %matplotlib inline """ Explanation: STFT and ISTFT I'd like to make my own spectrogram, so that I can play with Gabor logons, AKA Heisenberg boxes. End of explanation """ def stft(x, fs, framesz, hop): framesamp = int(framesz*fs) hopsamp = int(hop*fs) w = scipy.signal.hann(framesamp) X = np.array([fft(w*x[i:i+framesamp]) for i in range(0, len(x)-framesamp, hopsamp)]) return X def istft(X, fs, T, hop): x = np.zeros(T*fs) framesamp = X.shape[1] hopsamp = int(hop*fs) for n,i in enumerate(range(0, len(x)-framesamp, hopsamp)): x[i:i+framesamp] += np.real(ifft(X[n])) return x f0 = 440 # Compute the STFT of a 440 Hz sinusoid fs = 8000 # sampled at 8 kHz T = 5. # lasting 5 seconds framesz = 0.050 # with a frame size of 50 milliseconds hop = 0.020 # and hop size of 20 milliseconds. # Create test signal and STFT. t = np.linspace(0, T, T*fs, endpoint=False) x = np.sin(2*scipy.pi*f0*t) X = stft(x, fs, framesz, hop) # Plot the magnitude spectrogram. plt.figure(figsize=(12,8)) plt.subplot(311) plt.imshow(np.absolute(X.T), origin='lower', aspect='auto', interpolation='none') plt.ylabel('Frequency') # Compute the ISTFT. xhat = istft(X, fs, T, hop) # Plot the input and output signals over 0.1 seconds. T1 = int(0.1*fs) plt.subplot(312) plt.plot(t[:T1], x[:T1], t[:T1], xhat[:T1]) plt.ylabel('Amplitude') plt.subplot(313) plt.plot(t[-T1:], x[-T1:], t[-T1:], xhat[-T1:]) plt.xlabel('Time (seconds)') plt.ylabel('Ampltitude') plt.show() f0 = 10 fs = 1000 T = 2 framesz = 0.050 hop = 0.020 delta = int(T*fs/4) y = np.sin(2*np.pi*f0*t) + np.sin(2*np.pi*3*f0*t) y[delta] = 3. y[-delta] = 3. Y = stft(y, fs, framesz, hop) plt.imshow(np.absolute(Y.T), origin='lower', aspect='auto', interpolation='none') """ Explanation: From Stack Overflow End of explanation """ # Let's make a function to plot a signal and its specgram def tf(signal, fs, w=256, wtime=False, poverlap=None, xlim=None, ylim=None, colorbar=False, vmin=None, vmax=None, filename=None, interpolation="bicubic"): dt = 1./fs n = signal.size t = np.arange(0.0, n*dt, dt) if wtime: # Then the window length is time so change to samples w *= fs w = int(w) if poverlap: # Then overlap is a percentage noverlap = int(w * poverlap/100.) else: noverlap = w - 1 plt.figure(figsize=(12,8)) ax1 = plt.subplot(211) ax1.plot(t, signal) if xlim: ax1.set_xlim((0,xlim)) ax2 = plt.subplot(212) Pxx, freqs, bins, im = plt.specgram(signal, NFFT=w, Fs=fs, noverlap=noverlap, cmap='Greys', vmin=vmin, vmax=vmax, interpolation=interpolation) if colorbar: plt.colorbar() if ylim: ax2.set_ylim((0,ylim)) if xlim: ax2.set_xlim((0,xlim)) if filename: plt.savefig(filename) plt.show() synthetic = np.loadtxt('benchmark_signals/synthetic.txt') tf(synthetic, 800, w=256, xlim=10, ylim=256, vmin=-30, filename="/Users/matt/Pictures/stft_interpolated.png") synthetic.shape print("length of signal =", 8192/800.) SYN = stft(synthetic, 800, 0.128, 0.010) print(SYN.shape) freqs = fftfreq(128, d=1/800.) plt.figure(figsize=(12,4)) plt.imshow(np.absolute(SYN.T[:SYN.shape[1]/2.,:1000]), origin='lower', aspect='auto', interpolation='none', cmap="Greys") #plt.ylim(freqs[0],freqs[-65]) #plt.colorbar() plt.savefig("/Users/matt/Pictures/stft_uninterpolated.png") fftfreq(128, d=1/800.) # Gabor boxes fw = 1/.128 tw = .128 print(fw, "Hz ", tw, "s") """ Explanation: Benchmark signals End of explanation """ import colorsys fm = np.amax(np.abs(SYN)) np.amin(np.angle(SYN)) timesteps = [] for t in SYN.T: freqs = [] for f in t: # This is not right, need phase angle and mag: #rgb = colorsys.hsv_to_rgb(f.imag, f.real, f.real) hue = 0.5 + (np.angle(f) / (2*np.pi)) rgb = colorsys.hsv_to_rgb(hue, np.abs(f)/fm, 1.0) freqs.append(rgb) timesteps.append(freqs) rgb_arr = np.array(timesteps) rgb_arr.shape """ Explanation: Complex display End of explanation """ plt.figure(figsize=(12,4)) plt.imshow(rgb_arr[:rgb_arr.shape[0]/2.,...], aspect="auto", origin="lower", interpolation="none") plt.savefig('/Users/matt/Pictures/stft_complex.png') plt.show() """ Explanation: The matplotlib function imshow interprets arrays like this as 3-channel colour images, so we can just display this array directly. We'll chop off the negative frequencies again. End of explanation """ SYN.shape 8192/800. fs=800 T = 8192/800. hop = 0.010 syn = istft(SYN, fs, T, hop) plt.figure(figsize=(12,4)) plt.plot(syn) plt.show() """ Explanation: Inverse STFT End of explanation """ import pytfd.stft N = 256 t_max = 10 t = np.linspace(0, t_max, N) fs = N/t_max f = np.linspace(0, fs, N) plt.figure(figsize=(15,6)) for i, T in enumerate([32, 64, 128]): w = scipy.signal.boxcar(T) # Rectangular window delta1 = np.zeros(N) delta1[N/4] = 5 delta2 = np.zeros(N) delta2[-N/4] = 5 y = np.sin(2*np.pi*10*t) + np.sin(2*np.pi*30*t) + delta1 + delta2 Y = pytfd.stft.stft(y, w) plt.subplot(3, 2, 2*i + 1) plt.plot(t, y) #plt.xlabel("Time") #plt.ylabel("Amplitude") #plt.title(r"Signal") plt.subplot(3, 2, 2*i + 2) plt.imshow(np.absolute(Y)[N/2:], interpolation="none", aspect=0.5, origin="lower") #plt.xlabel("Time") #plt.ylabel("Frequency") #plt.title(r"STFT T = %d$T_s$"%T) plt.show() """ Explanation: PyTFD implementation I found a lightweight library for doing all sorts of time-frequency stuff: pytfd. https://github.com/endolith/pytfd End of explanation """ w = scipy.signal.hann(256) plt.plot(w) plt.show() W = rfft(w) plt.plot(np.absolute(W)[:10]) plt.show() 1/0.256 """ Explanation: Nice! But no inverse STFT. Uncertainty principle The time and frequency localication of the window determine the localization of the result. End of explanation """
domino14/macondo
notebooks/deprecated/superleaves.ipynb
gpl-3.0
from itertools import combinations import numpy as np import pandas as pd import seaborn as sns from string import ascii_uppercase import time as time %matplotlib inline maximum_superleave_length = 5 ev_calculator_max_length = 5 log_file = 'log_games.csv' """ Explanation: How to use maximum_superleave_length indicates the maximum length of superleaves to consider. Right now, maximum runnable on my local machine is 5. ev_calculator_max_length indicates the maximum length of superleave that we want to calculate the EV for based on a log file of games (log_file). To-do Have creation of duplicates go in reverse so that all necessary columns are present. Changelog 1/27/19 - Determined that the speed of creation of the rack dataframes is a function of the length of the dataframe. From that, realized that we should organize leaves by least-frequent to most-frequent letter, such that sub-dataframes are created from the shortest racks possible. End of explanation """ tilebag = ['A']*9+['B']*2+['C']*2+['D']*4+['E']*12+\ ['F']*2+['G']*3+['H']*2+['I']*9+['J']*1+\ ['K']*1+['L']*4+['M']*2+['N']*6+['O']*8+\ ['P']*2+['Q']*1+['R']*6+['S']*4+['T']*6+\ ['U']*4+['V']*2+['W']*2+['X']*1+['Y']*2+\ ['Z']*1+['?']*2 tiles = [x for x in ascii_uppercase] + ['?'] # potential future improvement: calculate optimal order of letters on the fly rarity_key = 'ZXKJQ?HYMFPWBCVSGDLURTNAOIE' # alphabetical_key = '?ABCDEFGHIJKLMNOPQRSTUVWXYZ' sort_func = lambda x: rarity_key.index(x) t0 = time.time() leaves = {i:sorted(list(set(list(combinations(tilebag,i))))) for i in range(1,maximum_superleave_length+1)} # turn leaves from lists of letters into strings # algorith runs faster if leaves non-alphabetical! for i in range(1,maximum_superleave_length+1): leaves[i] = [''.join(sorted(leave, key=sort_func)) for leave in leaves[i]] t1 = time.time() print('Calculated superleaves up to length {} in {} seconds'.format( maximum_superleave_length,t1-t0)) """ Explanation: Create a dictionary of all possible 1 to 6-tile leaves. Also, add functionality for sorting by an arbitrary key - allowing us to put rarest letters first End of explanation """ for i in range(1,maximum_superleave_length+1): print(i,len(leaves[i])) column_dict = { 0:'rack', 1:'score', 2:'tiles_remaining' } df = pd.read_csv(log_file, header=None, keep_default_na=False) df.rename(columns=column_dict,inplace=True) tile_limit = 1 df = df.loc[df['tiles_remaining']>=tile_limit] df = df.iloc[:2000000] """ Explanation: The bottom creates the full set of leaves for all lengths from 1-5 (6 breaks on my local machine) End of explanation """ t0 = time.time() df['rack'] = df['rack'].apply(lambda x: ''.join(sorted(x, key=sort_func))) t1 = time.time() print(t1-t0) tb = time.time() df_dict = {'': df} for multiple in range(1,maximum_superleave_length+1): t0 = time.time() # iterate through all 27 tiles for c in leaves[1]: if multiple*c in leaves[multiple]: condition = df_dict[(multiple-1)*c]['rack'].apply(lambda x: multiple*c in x) df_dict[multiple*c] = df_dict[(multiple-1)*c].loc[condition] df[multiple*c] = condition df[multiple*c].fillna(False, inplace=True) t1 = time.time() print('Added columns for all duplicates up to length {} in {} seconds'.format(multiple,t1-t0)) te = time.time() print('Added all necessary columns in {} seconds'.format(te-tb)) """ Explanation: Order rack (originally alphabetical, but now custom key with rarest letters first for maximum efficiency). Note that this is slower than alphabetical organization because it has to use the index function, but should be rewarded with subsequent performance enhancements. End of explanation """ all_leaves = [] for i in range(1,ev_calculator_max_length+1): all_leaves += leaves[i] df_dict = {leave: pd.DataFrame() for leave in all_leaves} df_dict[''] = df ev_df = pd.DataFrame(columns=['mean','std','count','ev','synergy'], index=all_leaves) """ Explanation: Set up dataframe for storing EV of all leaves. End of explanation """ def get_columns(leave): letters=list(set(leave)) tags = [] for l in letters: tags += [sum([l==letter for letter in leave])*l] return tags for leave_length in range(3,5): print(leave_length) t0 = time.time() for leave in leaves[leave_length]: print(leave) print(len(df_dict[leave[:-1]])) t2 = time.time() condition = df_dict[leave[:-1]][get_columns(leave)].all(axis=1) t3 = time.time() df_dict[leave] = df_dict[leave[:-1]].loc[condition] t4 = time.time() ev_df.loc[leave]['mean'] = df_dict[leave]['score'].mean() t5 = time.time() ev_df.loc[leave]['std'] = df_dict[leave]['score'].std() t6 = time.time() ev_df.loc[leave]['count'] = len(df_dict[leave]) t7 = time.time() print('condition calc time (ms): {:.5f} ({})'.format(1000*(t3-t2),100*(t3-t2)/(t7-t2))) print('condition calc time (ms): {:.5f} ({})'.format(1000*(t4-t3),100*(t4-t3)/(t7-t2))) print('condition calc time (ms): {:.5f} ({})'.format(1000*(t5-t4),100*(t5-t4)/(t7-t2))) print('condition calc time (ms): {:.5f} ({})'.format(1000*(t6-t5),100*(t6-t5)/(t7-t2))) print('condition calc time (ms): {:.5f} ({})'.format(1000*(t7-t6),100*(t7-t6)/(t7-t2))) t1 = time.time() print('Calculated mean, std and count in {} seconds'.format(t1-t0)) """ Explanation: To find all of the racks corresponding to a particular leave, we have added columns to the dataframe of plays df marking each letter (A, B, C...) and also for duplicates (AA, BB, CC...) and triplicates where possible (AAA, DDD, EEE...). If the letters in a given leave are all different, we can look for rows by using df['A']&df['B']. However, if there are duplicates involved, we have to look for df['AA']. The following function gives the correct dataframe columns to be looked up. End of explanation """ for leave_length in range(1,ev_calculator_max_length+1): print(leave_length) t0 = time.time() for leave in leaves[leave_length]: condition = df_dict[leave[:-1]][get_columns(leave)].all(axis=1) df_dict[leave] = df_dict[leave[:-1]].loc[condition] ev_df.loc[leave]['mean'] = df_dict[leave]['score'].mean() ev_df.loc[leave]['std'] = df_dict[leave]['score'].std() ev_df.loc[leave]['count'] = len(df_dict[leave]) t1 = time.time() print('Calculated mean, std and count in {} seconds'.format(t1-t0)) ev_df['pct'] = 100*ev_df['count']/len(df) ev_df['ev'] = ev_df['mean']-df['score'].mean() """ Explanation: Benchmark figures With 2M racks, following amount of time was taken: * 1 - 3s (.11s/leave) * 2 - 15s (.04s/leave) * 3 - 38s (.011s/leave) * 4 - 84s (.0033s/leave) * 5 - 244s (.0016s/leave) -> 383s total Using improvement of non-alphabetical leaves, performance was as follows: * 1 - 3s (.11s/leave) * 2 - 10s (.027s/leave) * 3 - 23s (.0066s/leave) * 4 - 64s (.0025s/leave) * 5 - 226s (.0015s/leave) -> 326 total (15% faster than previous version) End of explanation """ for leave_length in range(2,ev_calculator_max_length+1): for leave in leaves[leave_length]: ev_df.loc[leave]['synergy'] = ev_df.loc[leave]['ev']-\ sum([ev_df.loc[c]['ev'] for c in leave]) ev_df ev_df.to_csv('leave_values_011219_v7.csv') ev_df.sort_values('synergy') """ Explanation: Calculate leave "synergy", in other words the difference between the EV of the rack and what we'd expect just from adding the individual values of the tiles End of explanation """
savioabuga/arrows
arrows.ipynb
mit
from arrows.preprocess import load_df """ Explanation: arrows: Yet Another Twitter/Python Data Analysis Geospatially, Temporally, and Linguistically Analyzing Tweets about Top U.S. Presidential Candidates with Pandas, TextBlob, Seaborn, and Cartopy Hi, I'm Raj. For my internship this summer, I've been using data science and geospatial Python libraries like xray, numpy, rasterio, and cartopy. A week ago, I had a discussion about the relevance of Bernie Sanders among millenials - and so, I set out to get a rough idea by looking at recent tweets. I don't explain any of the code in this document, but you can skip the code and just look at the results if you like. If you're interested in going further with this data, I've posted source code and the dataset at https://github.com/raj-kesavan/arrows. If you have any comments or suggestions (oneither code or analysis), please let me know at rajk@berkeley.edu. Enjoy! First, I used Tweepy to pull down 20,000 tweets for each of Hillary Clinton, Bernie Sanders, Rand Paul, and Jeb Bush [retrieve_tweets.py]. I've also already done some calculations, specifically of polarity, subjectivity, influence, influenced polarity, and longitude and latitude (all explained later) [preprocess.py]. End of explanation """ from textblob import TextBlob import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib import seaborn as sns import cartopy pd.set_option('display.max_colwidth', 200) pd.options.display.mpl_style = 'default' matplotlib.style.use('ggplot') sns.set_context('talk') sns.set_style('whitegrid') plt.rcParams['figure.figsize'] = [12.0, 8.0] % matplotlib inline """ Explanation: Just adding some imports and setting graph display options. End of explanation """ df = load_df('arrows/data/results.csv') df.info() """ Explanation: Let's look at our data! load_df loads it in as a pandas.DataFrame, excellent for statistical analysis and graphing. End of explanation """ df[['candidate', 'created_at', 'lang', 'place', 'user_followers_count', 'user_time_zone', 'polarity', 'influenced_polarity', 'text']].head(1) """ Explanation: We'll be looking primarily at candidate, created_at, lang, place, user_followers_count, user_time_zone, polarity, and influenced_polarity, and text. End of explanation """ TextBlob("Tear down this wall!").sentiment """ Explanation: First I'll look at sentiment, calculated with TextBlob using the text column. Sentiment is composed of two values, polarity - a measure of the positivity or negativity of a text - and subjectivity. Polarity is between -1.0 and 1.0; subjectivity between 0.0 and 1.0. End of explanation """ TextBlob("Radix malorum est cupiditas.").sentiment """ Explanation: Unfortunately, it doesn't work too well on anything other than English. End of explanation """ sentence = TextBlob("Radix malorum est cupiditas.").translate() print(sentence) print(sentence.sentiment) """ Explanation: TextBlob has a cool translate() function that uses Google Translate to take care of that for us, but we won't be using it here - just because tweets include a lot of slang and abbreviations that can't be translated very well. End of explanation """ english_df = df[df.lang == 'en'] english_df.sort('polarity', ascending = False).head(3)[['candidate', 'polarity', 'subjectivity', 'text']] """ Explanation: All right - let's figure out the most (positively) polarized English tweets. End of explanation """ candidate_groupby = english_df.groupby('candidate') candidate_groupby[['polarity', 'influence', 'influenced_polarity']].mean() """ Explanation: Extrema don't mean much. We might get more interesting data with mean polarities for each candidate. Let's also look at influenced polarity, which takes into account the number of retweets and followers. End of explanation """ jeb = candidate_groupby.get_group('Jeb Bush') jeb_influence = jeb.sort('influence', ascending = False) jeb_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5) """ Explanation: So tweets about Jeb Bush, on average, aren't as positive as the other candidates, but the people tweeting about Bush get more retweets and followers. I used the formula influence = sqrt(followers + 1) * sqrt(retweets + 1). You can experiment with different functions if you like [preprocess.py:influence]. We can look at the most influential tweets about Jeb Bush to see what's up. End of explanation """ df[df.user_name == 'Donald J. Trump'].groupby('candidate').size() """ Explanation: Side note: you can see that sentiment analysis isn't perfect - the last tweet is certainly negative toward Jeb Bush, but it was actually assigned a positive polarity. Over a large number of tweets, though, sentiment analysis is more meaningful. As to the high influence of tweets about Bush: it looks like Donald Trump (someone with a lot of followers) has been tweeting a lot about Bush over the other candidates - one possible reason for Jeb's greater influenced_polarity. End of explanation """ language_groupby = df.groupby(['candidate', 'lang']) language_groupby.size() """ Explanation: Looks like our favorite toupéed candidate hasn't even been tweeting about anyone else! What else can we do? We know the language each tweet was (tweeted?) in. End of explanation """ largest_languages = language_groupby.filter(lambda group: len(group) > 10) """ Explanation: That's a lot of languages! Let's try plotting to get a better idea, but first, I'll remove smaller language/candidate groups. By the way, each lang value is an IANA language tag - you can look them up at https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry. End of explanation """ non_english = largest_languages[largest_languages.lang != 'en'] non_english_groupby = non_english.groupby(['lang', 'candidate'], as_index = False) sizes = non_english_groupby.text.agg(np.size) sizes = sizes.rename(columns={'text': 'count'}) sizes_pivot = sizes.pivot_table(index='lang', columns='candidate', values='count', fill_value=0) plot = sns.heatmap(sizes_pivot) plot.set_title('Number of non-English Tweets by Candidate', family='Ubuntu') plot.set_ylabel('language code', family='Ubuntu') plot.set_xlabel('candidate', family='Ubuntu') plot.figure.set_size_inches(12, 7) """ Explanation: I'll also remove English, since it would just dwarf all the other languages. End of explanation """ mean_polarities = df.groupby(['candidate', 'created_at']).influenced_polarity.mean() plot = mean_polarities.unstack('candidate').resample('60min').plot() plot.set_title('Influenced Polarity over Time by Candidate', family='Ubuntu') plot.set_ylabel('influenced polarity', family='Ubuntu') plot.set_xlabel('time', family='Ubuntu') plot.figure.set_size_inches(12, 7) """ Explanation: Looks like Spanish and Portuguese speakers mostly tweet about Jeb Bush, while Francophones lean more liberal, and Clinton tweeters span the largest range of languages. We also have the time-of-tweet information - I'll plot influenced polarity over time for each candidate. I'm also going to resample the influenced_polarity values to 1 hour intervals to get a smoother graph. End of explanation """ language_sizes = df.groupby('lang').size() threshold = language_sizes.quantile(.75) top_languages_df = language_sizes[language_sizes > threshold] top_languages = set(top_languages_df.index) - {'und'} top_languages df['hour'] = df.created_at.apply(lambda datetime: datetime.hour) for language_code in top_languages: lang_df = df[df.lang == language_code] normalized = lang_df.groupby('hour').size() / lang_df.lang.count() plot = normalized.plot(label = language_code) plot.set_title('Tweet Frequency in non-English Languages by Hour of Day', family='Ubuntu') plot.set_ylabel('normalized frequency', family='Ubuntu') plot.set_xlabel('hour of day (UTC)', family='Ubuntu') plot.legend() plot.figure.set_size_inches(12, 7) """ Explanation: Since I only took the last 20,000 tweets for each candidate, I didn't receive as large a timespan from Clinton (a candidate with many, many tweeters) compared to Rand Paul. But we can still analyze the data in terms of hour-of-day. I'd like to know when tweeters in each language tweet each day, and I'm going to use percentages instead of raw number of tweets so I can compare across different languages easily. By the way, the times in the dataframe are in UTC. End of explanation """ df_of_interest = df[(df.hour == 2) & (df.lang == 'pt')] print('Number of tweets:', df_of_interest.text.count()) print('Number of unique users:', df_of_interest.user_name.unique().size) """ Explanation: Note that English, French, and Spanish are significantly flatter than the other languages - this means that there's a large spread of speakers all over the globe. But why is Portuguese spiking at 11pm Brasilia time / 3 am Lisbon time? Let's find out! My first guess was that maybe there's a single person making a ton of posts at that time. End of explanation """ df_of_interest.text.head(25).unique() """ Explanation: So that's not it. Maybe there was a major event everyone was retweeting? End of explanation """ df_of_interest[df_of_interest.text.str.contains('Jeb Bush diz que foi atingido')].text.count() """ Explanation: Seems to be a lot of these 'Jeb Bush diz que foi atingido...' tweets. How many? We can't just count unique ones because they all are different slightly, but we can check for a large-enough substring. End of explanation """ tz_df = english_df.dropna(subset=['user_time_zone']) us_tz_df = tz_df[tz_df.user_time_zone.str.contains("US & Canada")] us_tz_candidate_groupby = us_tz_df.groupby(['candidate', 'user_time_zone']) us_tz_candidate_groupby.influenced_polarity.mean() """ Explanation: That's it! Looks like there was a news article from a Brazilian website (http://jconline.ne10.uol.com.br/canal/mundo/internacional/noticia/2015/07/05/jeb-bush-diz-que-foi-atingido-por-criticas-de-trump-a-mexicanos-188801.php) that happened to get a lot of retweets at that time period. A similar article in English is at http://www.nytimes.com/politics/first-draft/2015/07/04/an-angry-jeb-bush-says-he-takes-donald-trumps-remarks-personally/. Since languages can span across different countries, we might get results if we search by location, rather than just language. We don't have very specific geolocation information other than timezone, so let's try plotting candidate sentiment over the 4 major U.S. timezones (Los Angeles, Denver, Chicago, and New York). This is also be a good opportunity to look at a geographical map. End of explanation """ tz_shapes = cartopy.io.shapereader.Reader('arrows/world/tz_world_mp.shp') tz_records = list(tz_shapes.records()) tz_translator = { 'Eastern Time (US & Canada)': 'America/New_York', 'Central Time (US & Canada)': 'America/Chicago', 'Mountain Time (US & Canada)': 'America/Denver', 'Pacific Time (US & Canada)': 'America/Los_Angeles', } american_tz_records = { tz_name: next(filter(lambda record: record.attributes['TZID'] == tz_id, tz_records)) for tz_name, tz_id in tz_translator.items() } """ Explanation: That's our raw data: now to plot it on a map. I got the timezone Shapefile from http://efele.net/maps/tz/world/. First, I read in the Shapefile with Cartopy. End of explanation """ albers_equal_area = cartopy.crs.AlbersEqualArea(-95, 35) plate_carree = cartopy.crs.PlateCarree() states_and_provinces = cartopy.feature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none' ) cmaps = [matplotlib.cm.Blues, matplotlib.cm.Greens, matplotlib.cm.Reds, matplotlib.cm.Purples] norm = matplotlib.colors.Normalize(vmin=0, vmax=30) candidates = df['candidate'].unique() plt.rcParams['figure.figsize'] = [6.0, 4.0] for index, candidate in enumerate(candidates): plt.figure() plot = plt.axes(projection=albers_equal_area) plot.set_extent((-125, -66, 20, 50)) plot.add_feature(cartopy.feature.LAND) plot.add_feature(cartopy.feature.COASTLINE) plot.add_feature(cartopy.feature.BORDERS) plot.add_feature(states_and_provinces, edgecolor='gray') plot.add_feature(cartopy.feature.LAKES, facecolor="#00BCD4") for tz_name, record in american_tz_records.items(): tz_specific_df = us_tz_df[us_tz_df.user_time_zone == tz_name] tz_candidate_specific_df = tz_specific_df[tz_specific_df.candidate == candidate] mean_polarity = tz_candidate_specific_df.influenced_polarity.mean() plot.add_geometries( [record.geometry], crs=plate_carree, color=cmaps[index](norm(mean_polarity)), alpha=.8 ) plot.set_title('Influenced Polarity toward {} by U.S. Timezone'.format(candidate), family='Ubuntu') plot.figure.set_size_inches(6, 3.5) plt.show() print() """ Explanation: Next, I have to choose a projection and plot it (again using Cartopy). The Albers Equal-Area is good for maps of the U.S. I'll also download some featuresets from the Natural Earth dataset to display state borders. End of explanation """ american_timezones = ('US & Canada|Canada|Arizona|America|Hawaii|Indiana|Alaska' '|New_York|Chicago|Los_Angeles|Detroit|CST|PST|EST|MST') foreign_tz_df = tz_df[~tz_df.user_time_zone.str.contains(american_timezones)] foreign_tz_groupby = foreign_tz_df.groupby('user_time_zone') foreign_tz_groupby.size().sort(inplace = False, ascending = False).head(25) """ Explanation: My friend Gabriel Wang pointed out that U.S. timezones other than Pacific don't mean much since each timezone covers both blue and red states, but the data is still interesting. As expected, midwestern states lean toward Jeb Bush. I wasn't expecting Jeb Bush's highest polarity-tweets to come from the East; this is probably Donald Trump (New York, New York) messing with our data again. In a few months I'll look at these statistics with the latest tweets and compare. What are tweeters outside the U.S. saying about our candidates? Outside of the U.S., if someone is in a major city, the timezone is often that city itself. Here are the top (by number of tweets) non-American 25 timezones in our dataframe. End of explanation """ foreign_english_tz_df = foreign_tz_df[foreign_tz_df.lang == 'en'] """ Explanation: I also want to look at polarity, so I'll only use English tweets. (Sorry, Central/South Americans - my very rough method of filtering out American timezones gets rid of some of your timezones too. Let me know if there's a better way to do this.) End of explanation """ foreign_tz_groupby = foreign_english_tz_df.groupby(['candidate', 'user_time_zone']) top_foreign_tz_df = foreign_tz_groupby.filter(lambda group: len(group) > 40) top_foreign_tz_groupby = top_foreign_tz_df.groupby(['user_time_zone', 'candidate'], as_index = False) mean_influenced_polarities = top_foreign_tz_groupby.influenced_polarity.mean() pivot = mean_influenced_polarities.pivot_table( index='user_time_zone', columns='candidate', values='influenced_polarity', fill_value=0 ) plot = sns.heatmap(pivot) plot.set_title('Influenced Polarity in Major Foreign Cities by Candidate', family='Ubuntu') plot.set_ylabel('city', family='Ubuntu') plot.set_xlabel('candidate', family='Ubuntu') plot.figure.set_size_inches(12, 7) """ Explanation: Now we have a dataframe containing (mostly) world cities as time zones. Let's get the top cities by number of tweets for each candidate, then plot polarities. End of explanation """ df_place = df.dropna(subset=['place']) mollweide = cartopy.crs.Mollweide() plot = plt.axes(projection=mollweide) plot.set_global() plot.add_feature(cartopy.feature.LAND) plot.add_feature(cartopy.feature.COASTLINE) plot.add_feature(cartopy.feature.BORDERS) plot.scatter( list(df_place.longitude), list(df_place.latitude), transform=plate_carree, zorder=2 ) plot.set_title('International Tweeters with Geolocation Enabled', family='Ubuntu') plot.figure.set_size_inches(14, 9) plot = plt.axes(projection=albers_equal_area) plot.set_extent((-125, -66, 20, 50)) plot.add_feature(cartopy.feature.LAND) plot.add_feature(cartopy.feature.COASTLINE) plot.add_feature(cartopy.feature.BORDERS) plot.add_feature(states_and_provinces, edgecolor='gray') plot.add_feature(cartopy.feature.LAKES, facecolor="#00BCD4") candidate_groupby = df_place.groupby('candidate', as_index = False) colors = ['#1976d2', '#7cb342', '#f4511e', '#7b1fa2'] for index, (name, group) in enumerate(candidate_groupby): longitudes = group.longitude.values latitudes = group.latitude.values plot.scatter( longitudes, latitudes, transform=plate_carree, color=colors[index], label=name, zorder=2 ) plot.set_title('U.S. Tweeters by Candidate', family='Ubuntu') plt.legend(loc='lower left') plot.figure.set_size_inches(12, 7) """ Explanation: Exercise for the reader: why is Rand Paul disliked in Athens? You can probably guess, but the actual tweets causing this are rather amusing. Greco-libertarian relations aside, the data shows that London and Amsterdam are among the most influential of cities, with the former leaning toward Jeb Bush and the latter about neutral. In India, Clinton-supporters reside in New Delhi while Chennai tweeters back Rand Paul. By contrast, in 2014, New Delhi constituents voted for the conservative Bharatiya Janata Party while Chennai voted for the more liberal All India Anna Dravida Munnetra Kazhagam Party - so there seems to be some kind of cultural difference between the voters of 2014 and the tweeters of today. Last thing I thought was interesting: Athens has the highest mean polarity for Bernie Sanders, the only city for which this is the case. Could this have anything to do with the recent economic crisis, 'no' vote for austerity, and Bernie's social democratic tendencies? Finally, I'll look at specific geolocation (latitude and longitude) data. Since only about 750 out of 80,000 tweets had geolocation enabled, this data can't really be used for sentiment analysis, but we can still get a good idea of international spread. First I'll plot everything on a world map, then break it up by candidate in the U.S. End of explanation """
hpparvi/Parviainen-2017-WASP-80b
notebooks/01_broadband_analysis/E1_data_preparation.ipynb
mit
%pylab inline %run __init__.py import astropy.io.fits as pf import pandas as pd import seaborn as sb from glob import glob from os.path import basename, splitext, join from astropy.table import Table from exotk.utils.misc import fold from src.extcore import TC, P, TZERO, DDATA """ Explanation: WASP-80b broadband analysis 1. Data preparation Hannu Parviainen, Instituto de Astrofísica de Canarias<br> This notebook works as an appendix to Parviainen et al., Ground based transmission spectroscopy of WASP-80b (2017). The paper covers two analyses: a broadband analysis using three previously published datasets, and a transmission spectroscopy analysis using two GTC-observed spectroscopic time series, and this notebook covers a part of the broadband analysis. Last (significant) revision: 11.08.2017 The broadband analysis uses three broadband datasets by Triaud et al. (T13, 2013), Fukui et al. (F14, 2014), and Mancini et al. (M14, 2014), containing 27 light curves observed in g', r', i', z', I, J, H, K. The T13 and M14 datasets contain only the time, flux, and flux uncertainty estimates, while the F14 dataset contains also a set of useful covariates, such as the airmass and FWHM estimates. We start the broadband analysis by homogenising the three datasets into a common format, and store the data in a HDF5-file DDATA/external_lcs.h5. The F14 data is stored as Pandas DataFrames, and the T13, and M14 light curves as Pandas Series. The structure of the file is /lc/fukui2014/H/irsf_130716 frame (shape-&gt;[537,7]) /lc/fukui2014/H/irsf_130822 frame (shape-&gt;[576,7]) /lc/fukui2014/H/irsf_131007 frame (shape-&gt;[597,7]) /lc/fukui2014/J/irsf_130716 frame (shape-&gt;[541,7]) /lc/fukui2014/J/irsf_130822 frame (shape-&gt;[566,7]) /lc/fukui2014/J/irsf_131007 frame (shape-&gt;[597,7]) /lc/fukui2014/J/isle_130813 frame (shape-&gt;[324,7]) /lc/fukui2014/J/isle_130922 frame (shape-&gt;[241,7]) /lc/fukui2014/K/irsf_130716 frame (shape-&gt;[539,7]) /lc/fukui2014/K/irsf_130822 frame (shape-&gt;[582,7]) /lc/fukui2014/K/irsf_131007 frame (shape-&gt;[603,7]) /lc/fukui2014/g/oao50_130813 frame (shape-&gt;[562,7]) /lc/fukui2014/g/oao50_130922 frame (shape-&gt;[268,7]) /lc/fukui2014/i/oao50_130813 frame (shape-&gt;[563,7]) /lc/fukui2014/i/oao50_130922 frame (shape-&gt;[295,7]) /lc/fukui2014/r/oao50_130813 frame (shape-&gt;[566,7]) /lc/fukui2014/r/oao50_130922 frame (shape-&gt;[290,7]) /lc/mancini2014/H/grond series (shape-&gt;[508]) /lc/mancini2014/J/grond series (shape-&gt;[508]) /lc/mancini2014/K/grond series (shape-&gt;[508]) /lc/mancini2014/g/grond series (shape-&gt;[162]) /lc/mancini2014/i/dfosc series (shape-&gt;[156]) /lc/mancini2014/i/grond series (shape-&gt;[156]) /lc/mancini2014/r/grond series (shape-&gt;[156]) /lc/mancini2014/z/grond series (shape-&gt;[157]) /lc/triaud2013/r/eulercam_1 series (shape-&gt;[208]) /lc/triaud2013/z/trappist_1 series (shape-&gt;[721]) /lc/triaud2013/z/trappist_2 series (shape-&gt;[704]) /transmission frame (shape-&gt;[2151,7]) Note: We also store the transmission as a function of wavelength for each passband in the HDF5-file, under /transmission, and simplify the analysis a bit by merging the i' and I bands: the average transmission is used for both passbands. End of explanation """ fnames = sorted(glob(join(DDATA,'fukui_2014/[i,o]*dat'))) hnames = list(map(lambda s: 'lc/fukui2014/{t[1]:s}/{t[0]:s}_{t[2]:s}'.format(t=splitext(basename(s))[0].split('-')), fnames)) hnames = list(map(lambda s: s.replace('/I/','/i/').replace('/G/','/g/').replace('/R/','/r/'), hnames)) dfs = {} for fname,hname in zip(fnames,hnames): df = pd.read_csv(fname, sep=' ') df = pd.DataFrame(df.values[:,:-2], columns=df.columns[1:-1]) stime = pd.Series(df.iloc[:,0].values + 2450000, name='bjd', dtype=np.float64) df = pd.DataFrame(df.values[:,1:], index=stime, columns=df.columns[1:], dtype=np.float64) ph = fold(df.index.values, P, TC+TZERO, normalize=True, shift=0.5) - 0.5 df['oe_mask'] = abs(ph) < 0.015 df.flux = df.flux / median(df.flux[~df.oe_mask]) df.to_hdf(join(DDATA,'external_lcs.h5'), hname) dfs[hname] = df fig,axs = subplots(6,3,figsize=(14,16), sharey=True, sharex=True) for k,ax in zip(sorted(dfs.keys()),axs.flat): df = dfs[k] ph = fold(df.index.values, P, TC+TZERO, normalize=True, shift=0.5) - 0.5 fl = df.flux.values ax.plot(ph, fl, '.') ax.text(0.5,0.9,k.split('2014/')[1].replace('/',' - '), ha='center', transform=ax.transAxes) [ax.axvline(v, ls='--', c='k', lw=1, alpha=0.5) for v in [-0.015,0.015]] setp(axs, ylim=(0.93,1.03), xlim=(-0.04,0.04), xticks=linspace(-0.03,0.03,5), yticks=linspace(0.95,1.01,7)) fig.tight_layout() axs.flat[-1].set_visible(False) """ Explanation: Fukui et al. 2013 End of explanation """ df = Table.read(join(DDATA,'triaud_2013_trappist.fits')).to_pandas() df.columns = map(str.lower, df.columns) mz1 = df.bjd < 6070 mr1 = (df.bjd < 6150) & (~mz1) mz2 = (~mz1) & (~mr1) df.bjd += 2450000 fz1 = pd.Series(df[mz1].flux.values, index=df[mz1].bjd.values, name='z_trappist_1') fz2 = pd.Series(df[mz2].flux.values, index=df[mz2].bjd.values, name='z_trappist_2') fr1 = pd.Series(df[mr1].flux.values, index=df[mr1].bjd.values, name='r_eulercam_') with pd.HDFStore(join(DDATA,'external_lcs.h5')) as f: f.put('lc/triaud2013/z/trappist_1', fz1) f.put('lc/triaud2013/z/trappist_2', fz2) f.put('lc/triaud2013/r/eulercam_1', fr1) fig, axs = subplots(1,3,figsize=(14,4), sharey=True) fz1.plot(ax=axs[0], style='.') fz2.plot(ax=axs[1], style='.') fr1.plot(ax=axs[2], style='.') fig.tight_layout() """ Explanation: Triaud et al. 2013 Here we prepare the TRAPPIST and EulerCam light curves by Triaud et al. (2013) (Obtained from Vizier, http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=J/A+A/551/A80&-to=3) The data contains two TRAPPIST z' light curves and an Euler r' light curve. End of explanation """ DFOSC = ['I'] GROND = 'g r i z J H K'.split() dfm = Table.read(join(DDATA, 'mancini_2014.fits')).to_pandas() dfm.columns = map(str.lower, dfm.columns) dfm.bjd += 2400000 dfm['flux'] = 10**(-dfm.mag/2.5) dfm.drop(['mag','e_mag'], inplace=True, axis=1) dfm.sort_values(['band','bjd'], inplace=True) fig,axs = subplots(3,3, figsize=(14,10), sharey=True, sharex=True) for i,band in enumerate('g r i I z J H K'.split()): dft = dfm[dfm.band==band] axs.flat[i].plot(dft.bjd, dft.flux, '.') setp(axs, xlim=[dfm.bjd.min(),dfm.bjd.max()], ylim=(0.95, 1.03)) fig.tight_layout() with pd.HDFStore(join(DDATA, 'external_lcs.h5'), 'a') as f: for band in dfm.band.unique(): inst = 'grond' if band in GROND else 'dfosc' band = band.replace('I','i') name = 'lc/mancini2014/{band:s}/{name:s}'.format(band=band,name=inst) dft = dfm[dfm.band==band] f.put(name, pd.Series(dft.flux.values, dft.bjd.values, name=name)) """ Explanation: Mancini et al. 2014 Mancini et al. Observe a single transit simultaneously with the Danish Telescope (Bessel I) and GROND (g r i z J H K). End of explanation """ def N(a): return a/a.max() def favg(filters): return N(sum(list(map(N, filters)), 0)) dff = pd.read_csv(join(DDATA, 'fukui_2014', 'TM_all.dat'), sep=' ') dff = pd.DataFrame(dff.values[:,1:-1], columns=dff.columns[2:], index=pd.Series(dff.values[:,0], name='Wavelength')) dfg = pd.read_csv(join(DDATA, 'GROND_filters.txt'), sep='\t', index_col=0) dfg.index.name = 'Wavelength' filter_names = 'g r i z J H K'.split() transmission = array([favg([dfg.gBand, dff.MITSuME_g]), favg([dfg.rBand, dff.MITSuME_Rc]), favg([dfg.iBand, dff.MITSuME_Ic]), N(dfg.zBand), favg([dfg.JBand, dff['2MASS_J'], dff.IRSF_J]), favg([dfg.HBand, dff.IRSF_H]), favg([dfg.KBand, dff.IRSF_Ks])]).T ddf = pd.DataFrame(data=transmission, columns=filter_names, index=dfg.index) ddf.to_hdf(join(DDATA, 'external_lcs.h5'), 'transmission') ddf.plot(); with pd.HDFStore(join(DDATA, 'external_lcs.h5')) as f: print(f) """ Explanation: Save the transmission information End of explanation """
jorisvandenbossche/DS-python-data-analysis
notebooks/visualization_02_seaborn.ipynb
bsd-3-clause
import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: <p><font size="6"><b>Visualisation: Seaborn </b></font></p> © 2021, Joris Van den Bossche and Stijn Van Hoey (&#106;&#111;&#114;&#105;&#115;&#118;&#97;&#110;&#100;&#101;&#110;&#98;&#111;&#115;&#115;&#99;&#104;&#101;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109;, &#115;&#116;&#105;&#106;&#110;&#118;&#97;&#110;&#104;&#111;&#101;&#121;&#64;&#103;&#109;&#97;&#105;&#108;&#46;&#99;&#111;&#109;). Licensed under CC BY 4.0 Creative Commons End of explanation """ import seaborn as sns """ Explanation: Seaborn Seaborn is a Python data visualization library: Built on top of Matplotlib, but providing High level functions. Support for tidy data, which became famous due to the ggplot2 R package. Attractive and informative statistical graphics out of the box. Interacts well with Pandas End of explanation """ titanic = pd.read_csv('data/titanic.csv') titanic.head() """ Explanation: Introduction We will use the Titanic example data set: End of explanation """ age_stat = titanic.groupby(["Pclass", "Sex"])["Age"].mean().reset_index() age_stat """ Explanation: Let's consider following question: For each class at the Titanic and each gender, what was the average age? Hence, we should define the mean of the male and female groups of column Survived in combination with the groups of the Pclass column. In Pandas terminology: End of explanation """ age_stat.plot(kind='bar') ## A possible other way of plotting this could be using groupby again: #age_stat.groupby('Pclass').plot(x='Sex', y='Age', kind='bar') # (try yourself by uncommenting) """ Explanation: Providing this data in a bar chart with pure Pandas is still partly supported: End of explanation """ sns.catplot(data=age_stat, x="Sex", y="Age", col="Pclass", kind="bar") """ Explanation: but with mixed results. Seaborn provides another level of abstraction to visualize such grouped plots with different categories: End of explanation """ # A relation between variables in a Pandas DataFrame -> `relplot` sns.relplot(data=titanic, x="Age", y="Fare") """ Explanation: Check <a href="#this_is_tidy">here</a> for a short recap about tidy data. <div class="alert alert-info"> **Remember** - Seaborn is especially suitbale for these so-called <a href="http://vita.had.co.nz/papers/tidy-data.pdf">tidy</a> dataframe representations. - The [Seaborn tutorial](https://seaborn.pydata.org/tutorial/data_structure.html#long-form-vs-wide-form-data) provides a very good introduction to tidy (also called _long-form_) data. - You can use __Pandas column names__ as input for the visualisation functions of Seaborn. </div> Interaction with Matplotlib Seaborn builds on top of Matplotlib/Pandas, adding an additional layer of convenience. Topic-wise, Seaborn provides three main modules, i.e. type of plots: relational: understanding how variables in a dataset relate to each other distribution: specialize in representing the distribution of datapoints categorical: visualize a relationship involving categorical data (i.e. plot something for each category) The organization looks like this: We first check out the top commands of each of the types of plots: relplot, displot, catplot, each returning a Matplotlib Figure: Figure level functions Let's start from: What is the relation between Age and Fare? End of explanation """ sns.relplot(data=titanic, x="Age", y="Fare", hue="Survived") """ Explanation: Extend to: Is the relation between Age and Fare different for people how survived? End of explanation """ age_fare = sns.relplot(data=titanic, x="Age", y="Fare", hue="Survived", col="Sex") """ Explanation: Extend to: Is the relation between Age and Fare different for people how survived and/or the gender of the passengers? End of explanation """ type(age_fare), type(age_fare.fig) """ Explanation: The function returns a Seaborn FacetGrid, which is related to a Matplotlib Figure: End of explanation """ age_fare.axes, type(age_fare.axes.flatten()[0]) """ Explanation: As we are dealing here with 2 subplots, the FacetGrid consists of two Matplotlib Axes: End of explanation """ scatter_out = sns.scatterplot(data=titanic, x="Age", y="Fare", hue="Survived") type(scatter_out) """ Explanation: Hence, we can still apply all the power of Matplotlib, but start from the convenience of Seaborn. <div class="alert alert-info"> **Remember** The `Figure` level Seaborn functions: - Support __faceting__ by data variables (split up in subplots using a categorical variable) - Return a Matplotlib `Figure`, hence the output can NOT be part of a larger Matplotlib Figure </div> Axes level functions In 'technical' terms, when working with Seaborn functions, it is important to understand which level they operate, as Axes-level or Figure-level: axes-level functions plot data onto a single matplotlib.pyplot.Axes object and return the Axes figure-level functions return a Seaborn object, FacetGrid, which is a matplotlib.pyplot.Figure Remember the Matplotlib Figure, axes and axis anatomy explained in visualization_01_matplotlib? Each plot module has a single Figure-level function (top command in the scheme), which offers a unitary interface to its various Axes-level functions (. We can ask the same question: Is the relation between Age and Fare different for people how survived? End of explanation """ # sns.scatterplot(data=titanic, x="Age", y="Fare", hue="Survived", col="Sex") # uncomment to check the output """ Explanation: But we can't use the col/row options for facetting: End of explanation """ fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(10, 6)) sns.scatterplot(data=titanic, x="Age", y="Fare", hue="Survived", ax=ax0) sns.violinplot(data=titanic, x="Survived", y="Fare", ax=ax1) # boxplot, stripplot,.. as alternative to represent distribution per category """ Explanation: We can use these functions to create custom combinations of plots: End of explanation """ sns.catplot(data=titanic, x="Survived", col="Pclass", kind="count") """ Explanation: Note! Check the similarity with the best of both worlds approach: Prepare with Matplotlib Plot using Seaborn Further adjust specific elements with Matplotlib if needed <div class="alert alert-info"> **Remember** The `Axes` level Seaborn functions: - Do NOT support faceting by data variables - Return a Matplotlib `Axes`, hence the output can be used in combination with other Matplotlib `Axes` in the same `Figure` </div> Summary statistics Aggregations such as count, mean are embedded in Seaborn (similar to other 'Grammar of Graphics' packages such as ggplot in R and plotnine/altair in Python). We can do these operations directly on the original titanic data set in a single coding step: End of explanation """ sns.catplot(data=titanic, x="Sex", y="Age", col="Pclass", kind="bar", estimator=np.mean) """ Explanation: To use another statistical function to apply on each of the groups, use the estimator: End of explanation """ # %load _solutions/visualization_02_seaborn1.py """ Explanation: Exercises <div class="alert alert-success"> **EXERCISE 1** - Make a histogram of the age, split up in two subplots by the `Sex` of the passengers. - Put both subplots underneath each other. - Use the `height` and `aspect` arguments of the plot function to adjust the size of the figure. <details><summary>Hints</summary> - When interested in a histogram, i.e. the distribution of data, use the `displot` module - A split into subplots is requested using a variable of the DataFrame (facetting), so use the `Figure`-level function instead of the `Axes` level functions. - Link a column name to the `row` argument for splitting into subplots row-wise. </details> End of explanation """ # %load _solutions/visualization_02_seaborn2.py # %load _solutions/visualization_02_seaborn3.py """ Explanation: <div class="alert alert-success"> **EXERCISE 2** Make a violin plot showing the `Age` distribution in each of the `Pclass` categories comparing for `Sex`: - Use the `Pclass` column to create a violin plot for each of the classes. To do so, link the `Pclass` column to the `x-axis`. - Use a different color for the `Sex`. - Check the behavior of the `split` argument and apply it to compare male/female. - Use the `sns.despine` function to remove the boundaries around the plot. <details><summary>Hints</summary> - Have a look at https://seaborn.pydata.org/examples/grouped_violinplots.html for inspiration. </details> End of explanation """ # joined distribution plot sns.jointplot(data=titanic, x="Fare", y="Age", hue="Sex", kind="scatter") # kde sns.pairplot(data=titanic[["Age", "Fare", "Sex"]], hue="Sex") # Also called scattermatrix plot """ Explanation: Some more Seaborn functionalities to remember Whereas the relplot, catplot and displot represent the main components of the Seaborn library, more useful functions are available. You can check the gallery yourself, but let's introduce a few rof them: jointplot() and pairplot() jointplot() and pairplot() are Figure-level functions and create figures with specific subplots by default: End of explanation """ titanic_age_summary = titanic.pivot_table(columns="Pclass", index="Sex", values="Age", aggfunc="mean") titanic_age_summary sns.heatmap(data=titanic_age_summary, cmap="Reds") """ Explanation: heatmap() Plot rectangular data as a color-encoded matrix. End of explanation """ g = sns.lmplot( data=titanic, x="Age", y="Fare", hue="Survived", col="Survived", # hue="Pclass" ) """ Explanation: lmplot() regressions Figure level function to generate a regression model fit across a FacetGrid: End of explanation """ # RUN THIS CELL TO PREPARE THE ROAD CASUALTIES DATA SET %run ./data/load_casualties.py 2005 2020 """ Explanation: Exercises data set road casualties The Belgian road casualties data set contains data about the number of victims involved in road accidents. The script load_casualties.py in the data folder contains the routine to download the individual years of data, clean up the data and concatenate the individual years. The %run is an 'IPython magic' function to run a Python file as if you would run it from the command line. Run %run ./data/load_casualties.py --help to check the input arguments required to run the script. As data is available since 2005, we download 2005 till 2020. Note As the scripts downloads the individual files, it can take a while to run the script the first time. End of explanation """ casualties = pd.read_csv("./data/casualties.csv", parse_dates=["datetime"]) """ Explanation: When succesfull, the casualties.csv data is available in the data folder: End of explanation """ # %load _solutions/visualization_02_seaborn4.py # %load _solutions/visualization_02_seaborn5.py """ Explanation: The data contains the following columns (in bold the main columns used in the exercises): datetime: Date and time of the casualty. week_day: Weekday of the datetime. n_victims: Number of victims n_victims_ok: Number of victims without injuries n_slightly_injured: Number of slightly injured victims n_seriously_injured: Number of severely injured victims n_dead_30days: Number of victims that died within 30 days road_user_type: Road user type (passenger car, motorbike, bicycle, pedestrian, ...) victim_type: Type of victim (driver, passenger, ...) gender age road_type: Regional road, Motorway or Municipal road build_up_area: Outside or inside built-up area light_conditions: Day or night (with or without road lights), or dawn refnis_municipality: Postal reference ID number of municipality municipality: Municipality name refnis_region: Postal reference ID number of region region: Flemish Region, Walloon Region or Brussels-Capital Region Each row of the dataset does not represent a single accident, but a number of victims for a set of characteristics (for example, how many victims for accidents that happened between 8-9am at a certain day and at a certain road type in a certain municipality with the given age class and gender, ...). Thus, in practice, the victims of one accidents might be split over multiple rows (and one row might in theory also come from multiple accidents). Therefore, to get meaningful numbers in the exercises, we will each time sum the number of victims for a certain aggregation level (a subset of those characteristics). <div class="alert alert-success"> **EXERCISE 3** Create a barplot with the number of victims ("n_victims") for each hour of the day. Before plotting, calculate the total number of victims for each hour of the day with pandas and assign it to the variable `victims_hour_of_day`. Update the column names to respectively "Hour of the day" and "Number of victims". Use the `height` and `aspect` to adjust the figure width/height. <details><summary>Hints</summary> - The sum of victims _for each_ hour of the day requires `groupby`. One can create a new column with the hour of the day or pass the hour directly to `groupby`. - The `.dt` accessor provides access to all kinds of datetime information. - `rename` requires a dictionary with a mapping of the old vs new names. - A bar plot is in seaborn one of the `catplot` options. </details> End of explanation """ # %load _solutions/visualization_02_seaborn6.py # %load _solutions/visualization_02_seaborn7.py """ Explanation: <div class="alert alert-success"> **EXERCISE 4** Create a barplot with the number of victims ("n_victims") for each hour of the day for each category in the gender column. Before plotting, calculate the total number of victims for each hour of the day and each gender with Pandas and assign it to the variable `victims_gender_hour_of_day`. Create a separate subplot for each gender category in a separate row and apply the `rocket` color palette. Make sure to include the `NaN` values of the "gender" column as a separate subplot, called _"unknown"_ without changing the `casualties` DataFrame data. <details><summary>Hints</summary> - The sum of victims _for each_ hour of the day requires `groupby`. Groupby accepts multiple inputs to group on multiple categories together. - `groupby` also accepts a parameter `dropna=False` and/or using `fillna` is a useful function to replace the values in the gender column with the value "unknown". - The `.dt` accessor provides access to all kinds of datetime information. - Link the "gender" column with the `row` parameter to create a facet of rows. - Use the `height` and `aspect` to adjust the figure width/height. </details> End of explanation """ # Convert weekday to Pandas categorical data type casualties["week_day"] = pd.Categorical( casualties["week_day"], categories=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"], ordered=True ) casualties_motorway_trucks = casualties[ (casualties["road_type"] == "Motorway") & casualties["road_user_type"].isin(["Light truck", "Truck"]) ] sns.catplot(data=casualties_motorway_trucks, x="week_day", y="n_victims", estimator=np.sum, ci=None, kind="bar", color="#900C3F", height=3, aspect=4) """ Explanation: <div class="alert alert-success"> **EXERCISE 5** Compare the number of victims for each day of the week for casualties that happened on a "Motorway" (`road_type` column) for trucks ("Truck" and "Light truck" in the `road_user_type` column). Use a bar plot to compare the victims for each day of the week with Seaborn directly (do not use the `groupby`). __Note__ The `week_day` is converted to an __ordered__ categorical variable. This ensures the days are sorted correctly in Seaborn. <details><summary>Hints</summary> - The first part of the exercise is filtering the data. Combine the statements with `&` and do not forget to provide the necessary brackets. The `.isin()`to create a boolean condition might be useful for the road user type selection. - Whereas using `groupby` to get to the counts is perfectly correct, using the `estimator` in Seaborn gives the same result. __Note__ The `estimator=np.sum` is less performant than using pandas `groupby`. After filtering the data set, the summation with Seaborn is a feasible option. </details> End of explanation """ # %load _solutions/visualization_02_seaborn8.py # %load _solutions/visualization_02_seaborn9.py """ Explanation: <div class="alert alert-success"> **EXERCISE 6** Compare the relative number of deaths within 30 days (in relation to the total number of victims) in between the following "road_user_type"s: "Bicycle", "Passenger car", "Pedestrian", "Motorbike" for the year 2019 and 2020: - Filter the data for the years 2019 and 2020. - Filter the data on the road user types "Bicycle", "Passenger car", "Pedestrian" and "Motorbike". Call the new variable `compare_dead_30`. - Count for each combination of year and road_user_type the total victims and the total deaths within 30 days victims. - Calculate the percentage deaths within 30 days (add a new column "dead_prop"). - Use a horizontal bar chart to plot the results with the "road_user_type" on the y-axis and a separate color for each year. <details><summary>Hints</summary> - By setting `datetime` as the index, slicing time series can be done using strings to filter data on the years 2019 and 2020. - Use `isin()` to filter "road_user_type" categories used in the exercise. - Count _for each_... Indeed, use `groupby` with 2 inputs, "road_user_type" and the year of `datetime`. - Deriving the year from the datetime: When having an index, use `compare_dead_30.index.year`, otherwise `compare_dead_30["datetime"].dt.year`. - Dividing columns works element-wise in Pandas. - A horizontal bar chart in seaborn is a matter of defining `x` and `y` inputs correctly. </details> End of explanation """ # %load _solutions/visualization_02_seaborn10.py # %load _solutions/visualization_02_seaborn11.py # %load _solutions/visualization_02_seaborn12.py # %load _solutions/visualization_02_seaborn13.py """ Explanation: <div class="alert alert-success"> **EXERCISE 7** Create a line plot of the __monthly__ number of victims for each of the categories of victims ('n_victims_ok', 'n_dead_30days', 'n_slightly_injured' and 'n_seriously_injured') as a function of time: - Create a new variable `monthly_victim_counts` that contains the monthly sum of 'n_victims_ok', 'n_dead_30days', 'n_slightly_injured' and 'n_seriously_injured'. - Create a line plot of the `monthly_victim_counts` using Seaborn. Choose any [color palette](https://seaborn.pydata.org/tutorial/color_palettes.html). - Create an `area` plot (line plot with the individual categories stacked on each other) using Pandas. What happens with the data registration since 2012? <details><summary>Hints</summary> - Monthly statistics from a time series requires `resample` (with - in this case - `sum`), which also takes the `on` parameter to specify the datetime column (instead of using the index of the DataFrame). - Apply the resampling on the `["n_victims_ok", "n_slightly_injured", "n_seriously_injured", "n_dead_30days"]` columns only. - Seaborn line plots works without tidy data when NOT providing `x` and `y` argument. It also works using tidy data. To 'tidy' the data set, `.melt()` can be used, see [pandas_08_reshaping.ipynb](pandas_08_reshaping.ipynb). - Pandas plot method works on the non-tidy data set with `plot.area()` . __Note__ Seaborn does not have an area plot. </details> End of explanation """ # %load _solutions/visualization_02_seaborn14.py # %load _solutions/visualization_02_seaborn15.py """ Explanation: <div class="alert alert-success"> **EXERCISE 8** Make a line plot of the daily victims (column "n_victims") in 2020. Can you explain the counts from March till May? <details><summary>Hints</summary> - To get the line plot of 2020 with daily counts, the data preparation steps are: - Filter data on 2020. By defining `datetime` as the index, slicing time series can be done using strings. - Resample to daily counts. Use `resample` with the sum on column "n_victims". - Create a line plot. Do you prefer Pandas or Seaborn? </details> End of explanation """ # %load _solutions/visualization_02_seaborn16.py # %load _solutions/visualization_02_seaborn17.py """ Explanation: <div class="alert alert-success"> **EXERCISE 9** Combine the following two plots in a single Matplotlib figure: - (left) The empirical cumulative distribution of the _weekly_ proportion of victims that died (`n_dead_30days` / `n_victims`) with a separate color for each "light_conditions". - (right) The empirical cumulative distribution of the _weekly_ proportion of victims that died (`n_dead_30days` / `n_victims`) with a separate color for each "road_type". Prepare the data for both plots separately with Pandas and use the variable `weekly_victim_dead_lc` and `weekly_victim_dead_rt`. <details><summary>Hints</summary> - The plot can not be made by a single Seaborn Figure-level plot. Create a Matplotlib figure first and use the __axes__ based functions of Seaborn to plot the left and right Axes. - The data for both subplots need to be prepared separately, by `groupby` once on "light_conditions" and once on "road_type". - Weekly sums (`resample`) _for each_ (`groupby`) "light_conditions" or "road_type"?! yes! you need to combine both here. - [`sns.ecdfplot`](https://seaborn.pydata.org/generated/seaborn.ecdfplot.html#seaborn.ecdfplot) creates empirical cumulative distribution plots. </details> End of explanation """ # available (see previous exercises) daily_total_counts_2020 = casualties.set_index("datetime")["2020": "2021"].resample("D")["n_victims"].sum() # %load _solutions/visualization_02_seaborn18.py # %load _solutions/visualization_02_seaborn19.py # %load _solutions/visualization_02_seaborn20.py """ Explanation: <div class="alert alert-success"> **EXERCISE 10** You wonder if there is a relation between the number of victims per day and the minimal daily temperature. A data set with minimal daily temperatures for the year 2020 is available in the `./data` subfolder: `daily_min_temperature_2020.csv`. - Read the file `daily_min_temperature_2020.csv` and assign output to the variable `daily_min_temp_2020`. - Combine the daily (minimal) temperatures with the `daily_total_counts_2020` variable - Create a regression plot with Seaborn. Does it make sense to present the data as a regression plot? <details><summary>Hints</summary> - `pd.read_csv` has a `parse_dates` parameter to load the `datetime` column as a Timestamp data type. - `pd.merge` need a (common) key to link the data. - `sns.lmplot` or `sns.jointplot` are both seaborn functions to create scatter plots with a regression. Joint plot adds the marginal distributions. </details> End of explanation """
staeiou/wiki-stat-notebooks
retention_20180712/wiki_edit_counts.ipynb
mit
import pandas as pd import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import ScalarFormatter %matplotlib inline matplotlib.style.use('ggplot') # Data by Erik Zachte at https://stats.wikimedia.org/EN/TablesWikipediaEN.htm counts = pd.read_csv("edit_counts.tsv", sep="\t") # Convert dates to datetimes counts.date=pd.to_datetime(counts.date,infer_datetime_format=True) # Peek at the dataset counts[0:10] """ Explanation: Visualizations of editing activity in en.wikipedia.org By Stuart Geiger, Berkeley Institute for Data Science (C) 2016, Released under The MIT license. This data is collected and aggregated by Erik Zachte, which is here for the English Wikipedia. I have just copied that data from HTML tables into a CSV (which is not done here), then imported it into Pandas dataframes, and plotted it with matplotlib. Processing and cleaning data End of explanation """ def units_convert(s): """ Convert cells with k and M to times 1,000 and 1,000,000 respectively I got this solution from http://stackoverflow.com/questions/14218728/converting-string-of-numbers-and-letters-to-int-float-in-pandas-dataframe """ powers = {'k': 1000, 'M': 10 ** 6} if(s[-1] == 'k' or s[-1] == 'M'): try: power = s[-1] return float(s[:-1]) * powers[power] except TypeError: return float(s) else: return float(s) # Apply this function to the columns that have 'k' or 'M' units, store them as new _float columns counts['edits_float']=counts.edits.apply(units_convert) counts['article_count_float']=counts['article count'].apply(units_convert) # Make sure we've got data types figured out counts.dtypes # Set date column as index counts.set_index(['date']) # Calculate some ratios counts['highly_active_to_newcomer_ratio']=counts['>100 edits']/counts['new accts'] counts['active_to_newcomer_ratio']=counts['>5 edits']/counts['new accts'] counts['highly_active_to_active_ratio']=counts['>100 edits']/(counts['>5 edits']-counts['>100 edits']) """ Explanation: Some of the columns use 'k' for thousands and 'M' for millions, so we need to convert them. End of explanation """ matplotlib.style.use(['bmh']) font = {'weight' : 'regular', 'size' : 16} matplotlib.rc('font', **font) ax1 = counts.plot(x='date',y='>5 edits', figsize=(12,4), label="Users making >5 edits in a month", color="r") ax1.set_xlabel("Year") ax1.set_ylabel("Number of users") ax2 = counts.plot(x='date',y='>100 edits', figsize=(12,4), label="Users making >100 edits in a month",color="g") ax2.set_xlabel("Year") ax2.set_ylabel("Number of editors") ax3 = counts.plot(x='date',y='new accts', figsize=(12,4), label="New users making >10 edits in a month",color="b") ax3.set_xlabel("Year") ax3.set_ylabel("Number of editors") ax3.yaxis.set_major_formatter(ScalarFormatter()) ax1 = counts.plot(x='date',y=['>5 edits','>100 edits','new accts'], figsize=(12,4), label="Users making >5 edits in a month",color=['r','g','b'],logy=True) ax1.set_xlim("2001-01-01","2005-01-01") ax1.set_ylim(0,10000) ax1.set_xlabel("Year") ax1.set_ylabel("Number of users") ax1.yaxis.set_major_formatter(ScalarFormatter()) ax1 = counts.plot(x='date',y=['>5 edits','>100 edits','new accts'], figsize=(12,4), label="Users making >5 edits in a month",color=['r','g','b']) ax1.set_xlabel("Year") ax1.set_ylabel("Number of users") ax1.yaxis.set_major_formatter(ScalarFormatter()) ax1 = counts.plot(x='date',y=['>5 edits','>100 edits','new accts'], figsize=(12,4), label="Users making >5 edits in a month") ax1.set_xlabel("Year") ax1.set_ylabel("Number of users") ax1.yaxis.set_major_formatter(ScalarFormatter()) matplotlib.style.use(['bmh']) font = {'weight' : 'regular', 'size' : 16} matplotlib.rc('font', **font) ax1 = counts.plot(x='date',y='>5 edits', figsize=(12,4), label="Users making >5 edits in a month",logy=True) ax1.set_xlabel("Year") ax1.set_ylabel("Number of users") ax1.yaxis.set_major_formatter(ScalarFormatter()) plt.legend(bbox_to_anchor=(.9, .3), bbox_transform=plt.gcf().transFigure) ax2 = counts.plot(x='date',y='>100 edits', figsize=(12,4), label="Users making >100 edits in a month",color="g", logy=True) ax2.set_xlabel("Year") ax2.set_ylabel("Number of editors") ax2.yaxis.set_major_formatter(ScalarFormatter()) ax3 = counts.plot(x='date',y='new accts', figsize=(12,4), label="New users making >10 edits in a month",color="r", logy=True) ax3.set_xlabel("Year") ax3.set_ylabel("Number of editors") ax3.yaxis.set_major_formatter(ScalarFormatter()) ax1 = counts.plot(x='date',y=['>5 edits','>100 edits','new accts'], figsize=(12,4), label="Users making >5 edits in a month",logy=True, color=['r','g','b']) ax1.set_xlabel("Year") ax1.set_ylabel("Number of users") ax1.yaxis.set_major_formatter(ScalarFormatter()) ax3 = counts.plot(x='date',y='highly_active_to_active_ratio', figsize=(12,4), label="Highly active users to active users ratio",color="k") ax3.set_xlabel("Year") ax3.set_ylabel("Ratio") ax3 = counts.plot(x='date',y='highly_active_to_newcomer_ratio', figsize=(12,4), label="Highly active users to newcomers ratio",color="k") ax3.set_xlabel("Year") ax3.set_ylabel("Ratio") ax3 = counts.plot(x='date',y='active_to_newcomer_ratio', figsize=(12,4), label="Active users to newcomers ratio",color="k") ax3.set_xlabel("Year") ax3.set_ylabel("Ratio") ax3 = counts.plot(x='date',y='edits_float', figsize=(12,4), label="Number of edits per month",color="k") ax3.set_xlabel("Year") ax3.set_ylabel("Number of editors") ax3 = counts.plot(x='date',y='new per day', figsize=(12,4), label="New articles written per day",color="k") ax3.set_xlabel("Year") ax3.set_ylabel("Number of articles") ax3 = counts.plot(x='date',y='article_count_float', figsize=(12,4), label="Number of articles",color="k") ax3.set_xlabel("Year") ax3.set_ylabel("Number of articles") ax3 = counts.plot(x='date',y='article_count_float', figsize=(12,4), label="Number of articles",color="k",logy=True) ax3.set_xlabel("Year") ax3.set_ylabel("Number of articles") """ Explanation: Graphs End of explanation """
tensorflow/examples
courses/udacity_intro_to_tensorflow_for_deep_learning/l09c06_nlp_subwords.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ import tensorflow as tf from tensorflow.keras.preprocessing.sequence import pad_sequences """ Explanation: What's in a (sub)word? <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l09c06_nlp_subwords.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l09c06_nlp_subwords.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> In this colab, we'll work with subwords, or words made up of the pieces of larger words, and see how that impacts our network and related embeddings. Import TensorFlow and related functions End of explanation """ !wget --no-check-certificate \ https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \ -O /tmp/sentiment.csv import pandas as pd dataset = pd.read_csv('/tmp/sentiment.csv') # Just extract out sentences and labels first - we will create subwords here sentences = dataset['text'].tolist() labels = dataset['sentiment'].tolist() """ Explanation: Get the original dataset We'll once again use the dataset containing Amazon and Yelp reviews. This dataset was originally extracted from here. End of explanation """ import tensorflow_datasets as tfds vocab_size = 1000 tokenizer = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(sentences, vocab_size, max_subword_length=5) # Check that the tokenizer works appropriately num = 5 print(sentences[num]) encoded = tokenizer.encode(sentences[num]) print(encoded) # Separately print out each subword, decoded for i in encoded: print(tokenizer.decode([i])) """ Explanation: Create a subwords dataset We can use the existing Amazon and Yelp reviews dataset with tensorflow_datasets's SubwordTextEncoder functionality. SubwordTextEncoder.build_from_corpus() will create a tokenizer for us. You could also use this functionality to get subwords from a much larger corpus of text as well, but we'll just use our existing dataset here. The Amazon and Yelp dataset we are using isn't super large, so we'll create a subword vocab_size of only the 1,000 most common words, as well as cutting off each subword to be at most 5 characters. Check out the related documentation here. End of explanation """ for i, sentence in enumerate(sentences): sentences[i] = tokenizer.encode(sentence) # Check the sentences are appropriately replaced print(sentences[1]) """ Explanation: Replace sentence data with encoded subwords Now, we'll re-create the dataset to be used for training by actually encoding each of the individual sentences. This is equivalent to text_to_sequences with the Tokenizer we used in earlier exercises. End of explanation """ import numpy as np max_length = 50 trunc_type='post' padding_type='post' # Pad all sentences sentences_padded = pad_sequences(sentences, maxlen=max_length, padding=padding_type, truncating=trunc_type) # Separate out the sentences and labels into training and test sets training_size = int(len(sentences) * 0.8) training_sentences = sentences_padded[0:training_size] testing_sentences = sentences_padded[training_size:] training_labels = labels[0:training_size] testing_labels = labels[training_size:] # Make labels into numpy arrays for use with the network later training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) """ Explanation: Final pre-processing Before training, we still need to pad the sequences, as well as split into training and test sets. End of explanation """ embedding_dim = 16 model = tf.keras.Sequential([ tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.GlobalAveragePooling1D(), tf.keras.layers.Dense(6, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid') ]) model.summary() num_epochs = 30 model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) history = model.fit(training_sentences, training_labels_final, epochs=num_epochs, validation_data=(testing_sentences, testing_labels_final)) """ Explanation: Train a Sentiment Model End of explanation """ import matplotlib.pyplot as plt def plot_graphs(history, string): plt.plot(history.history[string]) plt.plot(history.history['val_'+string]) plt.xlabel("Epochs") plt.ylabel(string) plt.legend([string, 'val_'+string]) plt.show() plot_graphs(history, "accuracy") plot_graphs(history, "loss") """ Explanation: Visualize the Training Graph We can visualize the training graph below again. Does there appear to be a difference in how validation accuracy and loss is trending compared to with full words? End of explanation """ # First get the weights of the embedding layer e = model.layers[0] weights = e.get_weights()[0] print(weights.shape) # shape: (vocab_size, embedding_dim) import io # Write out the embedding vectors and metadata out_v = io.open('vecs.tsv', 'w', encoding='utf-8') out_m = io.open('meta.tsv', 'w', encoding='utf-8') for word_num in range(0, vocab_size - 1): word = tokenizer.decode([word_num]) embeddings = weights[word_num] out_m.write(word + "\n") out_v.write('\t'.join([str(x) for x in embeddings]) + "\n") out_v.close() out_m.close() # Download the files try: from google.colab import files except ImportError: pass else: files.download('vecs.tsv') files.download('meta.tsv') """ Explanation: Get files for visualizing the network Once again, you can visualize the sentiment related to all of the subwords using the below code and by heading to http://projector.tensorflow.org/ to upload and view the data. Note that the below code does have a few small changes to handle the different way text is encoded in our dataset compared to before with the built in Tokenizer. You may get an error like "Number of tensors (999) do not match the number of lines in metadata (992)." As long as you load the vectors first without error and wait a few seconds after this pops up, you will be able to click outside the file load menu and still view the visualization. End of explanation """
AllenDowney/ModSimPy
notebooks/chap13.ipynb
mit
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * """ Explanation: Modeling and Simulation in Python Chapter 13 Copyright 2017 Allen Downey License: Creative Commons Attribution 4.0 International End of explanation """ def make_system(beta, gamma): """Make a system object for the SIR model. beta: contact rate in days gamma: recovery rate in days returns: System object """ init = State(S=89, I=1, R=0) init /= np.sum(init) t0 = 0 t_end = 7 * 14 return System(init=init, t0=t0, t_end=t_end, beta=beta, gamma=gamma) def plot_results(S, I, R): """Plot the results of a SIR model. S: TimeSeries I: TimeSeries R: TimeSeries """ plot(S, '--', label='Susceptible') plot(I, '-', label='Infected') plot(R, ':', label='Recovered') decorate(xlabel='Time (days)', ylabel='Fraction of population') def calc_total_infected(results): """Fraction of population infected during the simulation. results: DataFrame with columns S, I, R returns: fraction of population """ return get_first_value(results.S) - get_last_value(results.S) def run_simulation(system, update_func): """Runs a simulation of the system. system: System object update_func: function that updates state returns: TimeFrame """ init, t0, t_end = system.init, system.t0, system.t_end frame = TimeFrame(columns=init.index) frame.row[t0] = init for t in linrange(t0, t_end): frame.row[t+1] = update_func(frame.row[t], t, system) return frame def update_func(state, t, system): """Update the SIR model. state: State (s, i, r) t: time system: System object returns: State (sir) """ beta, gamma = system.beta, system.gamma s, i, r = state infected = beta * i * s recovered = gamma * i s -= infected i += infected - recovered r += recovered return State(S=s, I=i, R=r) """ Explanation: Code from previous chapters make_system, plot_results, and calc_total_infected are unchanged. End of explanation """ beta_array = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0 , 1.1] gamma = 0.2 """ Explanation: Sweeping beta Make a range of values for beta, with constant gamma. End of explanation """ for beta in beta_array: system = make_system(beta, gamma) results = run_simulation(system, update_func) print(system.beta, calc_total_infected(results)) """ Explanation: Run the simulation once for each value of beta and print total infections. End of explanation """ def sweep_beta(beta_array, gamma): """Sweep a range of values for beta. beta_array: array of beta values gamma: recovery rate returns: SweepSeries that maps from beta to total infected """ sweep = SweepSeries() for beta in beta_array: system = make_system(beta, gamma) results = run_simulation(system, update_func) sweep[system.beta] = calc_total_infected(results) return sweep """ Explanation: Wrap that loop in a function and return a SweepSeries object. End of explanation """ infected_sweep = sweep_beta(beta_array, gamma) label = 'gamma = ' + str(gamma) plot(infected_sweep, label=label) decorate(xlabel='Contact rate (beta)', ylabel='Fraction infected') savefig('figs/chap13-fig01.pdf') """ Explanation: Sweep beta and plot the results. End of explanation """ beta_array """ Explanation: Sweeping gamma Using the same array of values for beta End of explanation """ gamma_array = [0.2, 0.4, 0.6, 0.8] """ Explanation: And now an array of values for gamma End of explanation """ plt.figure(figsize=(7, 4)) for gamma in gamma_array: infected_sweep = sweep_beta(beta_array, gamma) label = 'gamma = ' + str(gamma) plot(infected_sweep, label=label) decorate(xlabel='Contact rate (beta)', ylabel='Fraction infected', loc='upper left') plt.legend(bbox_to_anchor=(1.02, 1.02)) plt.tight_layout() savefig('figs/chap13-fig02.pdf') """ Explanation: For each value of gamma, sweep beta and plot the results. End of explanation """ # Solution goes here # Solution goes here # Solution goes here """ Explanation: Exercise: Suppose the infectious period for the Freshman Plague is known to be 2 days on average, and suppose during one particularly bad year, 40% of the class is infected at some point. Estimate the time between contacts. End of explanation """ def sweep_parameters(beta_array, gamma_array): """Sweep a range of values for beta and gamma. beta_array: array of infection rates gamma_array: array of recovery rates returns: SweepFrame with one row for each beta and one column for each gamma """ frame = SweepFrame(columns=gamma_array) for gamma in gamma_array: frame[gamma] = sweep_beta(beta_array, gamma) return frame """ Explanation: SweepFrame The following sweeps two parameters and stores the results in a SweepFrame End of explanation """ frame = sweep_parameters(beta_array, gamma_array) frame.head() """ Explanation: Here's what the SweepFrame look like. End of explanation """ for gamma in gamma_array: label = 'gamma = ' + str(gamma) plot(frame[gamma], label=label) decorate(xlabel='Contact rate (beta)', ylabel='Fraction infected', title='', loc='upper left') """ Explanation: And here's how we can plot the results. End of explanation """ plt.figure(figsize=(7, 4)) for beta in [1.1, 0.9, 0.7, 0.5, 0.3]: label = 'beta = ' + str(beta) plot(frame.row[beta], label=label) decorate(xlabel='Recovery rate (gamma)', ylabel='Fraction infected') plt.legend(bbox_to_anchor=(1.02, 1.02)) plt.tight_layout() savefig('figs/chap13-fig03.pdf') """ Explanation: We can also plot one line for each value of beta, although there are a lot of them. End of explanation """ contour(frame) decorate(xlabel='Recovery rate (gamma)', ylabel='Contact rate (beta)', title='Fraction infected, contour plot') savefig('figs/chap13-fig04.pdf') """ Explanation: It's often useful to separate the code that generates results from the code that plots the results, so we can run the simulations once, save the results, and then use them for different analysis, visualization, etc. After running sweep_parameters, we have a SweepFrame with one row for each value of beta and one column for each value of gamma. End of explanation """
samstav/scipy_2015_sklearn_tutorial
notebooks/04.2 Model Complexity and GridSearchCV.ipynb
cc0-1.0
from figures import plot_kneighbors_regularization plot_kneighbors_regularization() """ Explanation: Parameter selection, Validation & Testing Most models have parameters that influence how complex a model they can learn. Remember using KNeighborsRegressor. If we change the number of neighbors we consider, we get a smoother and smoother prediction: End of explanation """ from sklearn.cross_validation import cross_val_score, KFold from sklearn.neighbors import KNeighborsRegressor # generate toy dataset: x = np.linspace(-3, 3, 100) y = np.sin(4 * x) + x + np.random.normal(size=len(x)) X = x[:, np.newaxis] cv = KFold(n=len(x), shuffle=True) # for each parameter setting do cross_validation: for n_neighbors in [1, 3, 5, 10, 20]: scores = cross_val_score(KNeighborsRegressor(n_neighbors=n_neighbors), X, y, cv=cv) print("n_neighbors: %d, average score: %f" % (n_neighbors, np.mean(scores))) """ Explanation: In the above figure, we see fits for three different values of n_neighbors. For n_neighbors=2, the data is overfit, the model is too flexible and can adjust too much to the noise in the training data. For n_neighbors=20, the model is not flexible enough, and can not model the variation in the data appropriately. In the middle, for n_neighbors = 5, we have found a good mid-point. It fits the data fairly well, and does not suffer from the overfit or underfit problems seen in the figures on either side. What we would like is a way to quantitatively identify overfit and underfit, and optimize the hyperparameters (in this case, the polynomial degree d) in order to determine the best algorithm. We trade off remembering too much about the particularities and noise of the training data vs. not modeling enough of the variability. This is a trade-off that needs to be made in basically every machine learning application and is a central concept, called bias-variance-tradeoff or "overfitting vs underfitting". Hyperparameters, Over-fitting, and Under-fitting Unfortunately, there is no general rule how to find the sweet spot, and so machine learning practitioners have to find the best trade-off of model-complexity and generalization by trying several parameter settings. Most commonly this is done using a brute force search, for example over multiple values of n_neighbors: End of explanation """ from sklearn.learning_curve import validation_curve n_neighbors = [1, 3, 5, 10, 20, 50] train_errors, test_errors = validation_curve(KNeighborsRegressor(), X, y, param_name="n_neighbors", param_range=n_neighbors) plt.plot(n_neighbors, train_errors.mean(axis=1), label="train error") plt.plot(n_neighbors, test_errors.mean(axis=1), label="test error") plt.legend(loc="best") """ Explanation: There is a function in scikit-learn, called validation_plot to reproduce the cartoon figure above. It plots one parameter, such as the number of neighbors, against training and validation error (using cross-validation): End of explanation """ from sklearn.cross_validation import cross_val_score, KFold from sklearn.svm import SVR # each parameter setting do cross_validation: for C in [0.001, 0.01, 0.1, 1, 10]: for gamma in [0.001, 0.01, 0.1, 1]: scores = cross_val_score(SVR(C=C, gamma=gamma), X, y, cv=cv) print("C: %f, gamma: %f, average score: %f" % (C, gamma, np.mean(scores))) """ Explanation: Note that many neighbors mean a "smooth" or "simple" model, so the plot is the mirror image of the diagram above. If multiple parameters are important, like the parameters C and gamma in an SVM (more about that later), all possible combinations are tried: End of explanation """ from sklearn.grid_search import GridSearchCV param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]} grid = GridSearchCV(SVR(), param_grid=param_grid, cv=cv, verbose=3) """ Explanation: As this is such a very common pattern, there is a built-in class for this in scikit-learn, GridSearchCV. GridSearchCV takes a dictionary that describes the parameters that should be tried and a model to train. The grid of parameters is defined as a dictionary, where the keys are the parameters and the values are the settings to be tested. End of explanation """ grid.fit(X, y) """ Explanation: One of the great things about GridSearchCV is that it is a meta-estimator. It takes an estimator like SVR above, and creates a new estimator, that behaves exactly the same - in this case, like a regressor. So we can call fit on it, to train it: End of explanation """ grid.predict(X) """ Explanation: What fit does is a bit more involved then what we did above. First, it runs the same loop with cross-validation, to find the best parameter combination. Once it has the best combination, it runs fit again on all data passed to fit (without cross-validation), to built a single new model using the best parameter setting. Then, as with all models, we can use predict or score: End of explanation """ print(grid.best_score_) print(grid.best_params_) """ Explanation: You can inspect the best parameters found by GridSearchCV in the best_params_ attribute, and the best score in the best_score_ attribute: End of explanation """ from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]} cv = KFold(n=len(X_train), n_folds=10, shuffle=True) grid = GridSearchCV(SVR(), param_grid=param_grid, cv=cv) grid.fit(X_train, y_train) grid.score(X_test, y_test) """ Explanation: There is a problem with using this score for evaluation, however. You might be making what is called a multiple hypothesis testing error. If you try very many parameter settings, some of them will work better just by chance, and the score that you obtained might not reflect how your model would perform on new unseen data. Therefore, it is good to split off a separate test-set before performing grid-search. This pattern can be seen as a training-validation-test split, and is common in machine learning: We can do this very easily by splitting of some test data using train_test_split, training GridSearchCV on the training set, and applying the score method to the test set: End of explanation """ from sklearn.cross_validation import train_test_split, ShuffleSplit X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]} single_split_cv = ShuffleSplit(len(X_train), 1) grid = GridSearchCV(SVR(), param_grid=param_grid, cv=single_split_cv, verbose=3) grid.fit(X_train, y_train) grid.score(X_test, y_test) """ Explanation: Some practitioners go for an easier scheme, splitting the data simply into three parts, training, validation and testing. This is a possible alternative if your training set is very large, or it is infeasible to train many models using cross-validation because training a model takes very long. You can do this with scikit-learn for example by splitting of a test-set and then applying GridSearchCV with ShuffleSplit cross-validation with a single iteration: End of explanation """
tensorflow/docs-l10n
site/ko/tutorials/estimator/linear.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. End of explanation """ !pip install sklearn import os import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt from IPython.display import clear_output from six.moves import urllib """ Explanation: 추정기(Estimator)로 선형 모델 만들기 <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/estimator/linear"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/estimator/linear.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/estimator/linear.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub) 소스 보기</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/estimator/linear.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도 불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다. 이 번역에 개선할 부분이 있다면 tensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다. 문서 번역이나 리뷰에 참여하려면 docs-ko@tensorflow.org로 메일을 보내주시기 바랍니다. 개요 이 문서에서는 tf.estimator API를 사용하여 로지스틱 회귀 모델(logistic regression model)을 훈련합니다. 이 모델은 다른 더 복잡한 알고리즘의 기초로 사용할 수 있습니다. 설정 End of explanation """ import tensorflow.compat.v2.feature_column as fc import tensorflow as tf # 데이터셋 불러오기. dftrain = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv') dfeval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv') y_train = dftrain.pop('survived') y_eval = dfeval.pop('survived') """ Explanation: 타이타닉 데이터셋을 불러오기 타이타닉 데이터셋을 사용할 것입니다. 성별, 나이, 클래스, 기타 등 주어진 정보를 활용하여 승객이 살아남을 것인지 예측하는 것을 목표로 합니다. End of explanation """ dftrain.head() dftrain.describe() """ Explanation: 데이터 탐험하기 데이터셋은 다음의 특성을 가집니다 End of explanation """ dftrain.shape[0], dfeval.shape[0] """ Explanation: 훈련셋은 627개의 샘플로 평가셋은 264개의 샘플로 구성되어 있습니다. End of explanation """ dftrain.age.hist(bins=20) """ Explanation: 대부분의 승객은 20대와 30대 입니다. End of explanation """ dftrain.sex.value_counts().plot(kind='barh') """ Explanation: 남자 승객이 여자 승객보다 대략 2배 많습니다. End of explanation """ dftrain['class'].value_counts().plot(kind='barh') """ Explanation: 대부분의 승객은 "삼등석" 입니다. End of explanation """ pd.concat([dftrain, y_train], axis=1).groupby('sex').survived.mean().plot(kind='barh').set_xlabel('% survive') """ Explanation: 여자는 남자보다 살아남을 확률이 훨씬 높습니다. 이는 명확하게 모델에 유용한 특성입니다. End of explanation """ CATEGORICAL_COLUMNS = ['sex', 'n_siblings_spouses', 'parch', 'class', 'deck', 'embark_town', 'alone'] NUMERIC_COLUMNS = ['age', 'fare'] feature_columns = [] for feature_name in CATEGORICAL_COLUMNS: vocabulary = dftrain[feature_name].unique() feature_columns.append(tf.feature_column.categorical_column_with_vocabulary_list(feature_name, vocabulary)) for feature_name in NUMERIC_COLUMNS: feature_columns.append(tf.feature_column.numeric_column(feature_name, dtype=tf.float32)) """ Explanation: 모델을 위한 특성 공학(feature engineering) 추정기는 특성 열(feature columns)이라는 시스템을 사용하여 모델이 각각의 입력 특성을 어떻게 해석할지 설명합니다. 추정기가 숫자 입력 벡터를 요구하면, 특성 열은 모델이 어떻게 각 특성을 변환해야하는지 설명합니다. 효과적인 모델 학습에서는 적절한 특성 열을 고르고 다듬는 것이 키포인트 입니다. 하나의 특성 열은 특성 딕셔너리(dict)의 원본 입력으로 만들어진 열(기본 특성 열)이거나 하나 이상의 기본 열(얻어진 특성 열)에 정의된 변환을 이용하여 새로 생성된 열입니다. 선형 추정기는 수치형, 범주형 특성을 모두 사용할 수 있습니다. 특성 열은 모든 텐서플로 추정기와 함께 작동하고 목적은 모델링에 사용되는 특성들을 정의하는 것입니다. 또한 원-핫-인코딩(one-hot-encoding), 정규화(normalization), 버킷화(bucketization)와 같은 특성 공학 방법을 지원합니다. 기본 특성 열 End of explanation """ def make_input_fn(data_df, label_df, num_epochs=10, shuffle=True, batch_size=32): def input_function(): ds = tf.data.Dataset.from_tensor_slices((dict(data_df), label_df)) if shuffle: ds = ds.shuffle(1000) ds = ds.batch(batch_size).repeat(num_epochs) return ds return input_function train_input_fn = make_input_fn(dftrain, y_train) eval_input_fn = make_input_fn(dfeval, y_eval, num_epochs=1, shuffle=False) """ Explanation: input_function은 입력 파이프라인을 스트리밍으로 공급하는 tf.data.Dataset으로 데이터를 변환하는 방법을 명시합니다. tf.data.Dataset은 데이터 프레임, CSV 형식 파일 등과 같은 여러 소스를 사용합니다. End of explanation """ ds = make_input_fn(dftrain, y_train, batch_size=10)() for feature_batch, label_batch in ds.take(1): print('특성 키:', list(feature_batch.keys())) print() print('클래스 배치:', feature_batch['class'].numpy()) print() print('레이블 배치:', label_batch.numpy()) """ Explanation: 다음과 같이 데이터셋을 점검할 수 있습니다: End of explanation """ age_column = feature_columns[7] tf.keras.layers.DenseFeatures([age_column])(feature_batch).numpy() """ Explanation: 또한 tf.keras.layers.DenseFeatures 층을 사용하여 특정한 특성 열의 결과를 점검할 수 있습니다: End of explanation """ gender_column = feature_columns[0] tf.keras.layers.DenseFeatures([tf.feature_column.indicator_column(gender_column)])(feature_batch).numpy() """ Explanation: DenseFeatures는 조밀한(dense) 텐서만 허용합니다. 범주형 데이터를 점검하려면 우선 범주형 열에 indicator_column 함수를 적용해야 합니다: End of explanation """ linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns) linear_est.train(train_input_fn) result = linear_est.evaluate(eval_input_fn) clear_output() print(result) """ Explanation: 모든 기본 특성을 모델에 추가한 다음에 모델을 훈련해 봅시다. 모델을 훈련하려면 tf.estimator API를 이용한 메서드 호출 한번이면 충분합니다: End of explanation """ age_x_gender = tf.feature_column.crossed_column(['age', 'sex'], hash_bucket_size=100) """ Explanation: 도출된 특성 열 이제 정확도 75%에 도달했습니다. 별도로 각 기본 특성 열을 사용하면 데이터를 설명하기에는 충분치 않을 수 있습니다. 예를 들면, 성별과 레이블간의 상관관계는 성별에 따라 다를 수 있습니다. 따라서 gender="Male"과 'gender="Female"의 단일 모델가중치만 배우면 모든 나이-성별 조합(이를테면gender="Male" 그리고 'age="30"그리고gender="Male"그리고age="40"`을 구별하는 것)을 포함시킬 수 없습니다. 서로 다른 특성 조합들 간의 차이를 학습하기 위해서 모델에 교차 특성 열을 추가할 수 있습니다(또한 교차 열 이전에 나이 열을 버킷화할 수 있습니다): End of explanation """ derived_feature_columns = [age_x_gender] linear_est = tf.estimator.LinearClassifier(feature_columns=feature_columns+derived_feature_columns) linear_est.train(train_input_fn) result = linear_est.evaluate(eval_input_fn) clear_output() print(result) """ Explanation: 조합 특성을 모델에 추가하고 모델을 다시 훈련합니다: End of explanation """ pred_dicts = list(linear_est.predict(eval_input_fn)) probs = pd.Series([pred['probabilities'][1] for pred in pred_dicts]) probs.plot(kind='hist', bins=20, title='예측 확률') """ Explanation: 이제 정확도 77.6%에 도달했습니다. 기본 특성만 이용한 학습보다는 약간 더 좋았습니다. 더 많은 특성과 변환을 사용해서 더 잘할 수 있다는 것을 보여주세요! 이제 훈련 모델을 이용해서 평가셋에서 승객에 대해 예측을 할 수 있습니다. 텐서플로 모델은 한번에 샘플의 배치 또는 일부에 대한 예측을 하도록 최적화되어있습니다. 앞서, eval_input_fn은 모든 평가셋을 사용하도록 정의되어 있었습니다. End of explanation """ from sklearn.metrics import roc_curve from matplotlib import pyplot as plt fpr, tpr, _ = roc_curve(y_eval, probs) plt.plot(fpr, tpr) plt.title('ROC curve') plt.xlabel('오탐률(false positive rate)') plt.ylabel('정탐률(true positive rate)') plt.xlim(0,) plt.ylim(0,) """ Explanation: 마지막으로, 수신자 조작 특성(receiver operating characteristic, ROC)을 살펴보면 정탐률(true positive rate)과 오탐률(false positive rate)의 상충관계에 대해 더 잘 이해할 수 있습니다. End of explanation """
YuriyGuts/kaggle-quora-question-pairs
notebooks/feature-magic-frequencies.ipynb
mit
from pygoose import * """ Explanation: Feature: Question Occurrence Frequencies This is a "magic" (leaky) feature published by Jared Turkewitz that doesn't rely on the question text. Questions that occur more often in the training and test sets are more likely to be duplicates. Imports This utility package imports numpy, pandas, matplotlib and a helper kg module into the root namespace. End of explanation """ project = kg.Project.discover() """ Explanation: Config Automatically discover the paths to various data folders and compose the project structure. End of explanation """ feature_list_id = 'magic_frequencies' """ Explanation: Identifier for storing these features on disk and referring to them later. End of explanation """ tokens_train = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_train.pickle') tokens_test = kg.io.load(project.preprocessed_data_dir + 'tokens_lowercase_spellcheck_test.pickle') """ Explanation: Read data Preprocessed and tokenized questions. End of explanation """ df_all_pairs = pd.DataFrame( [ [' '.join(pair[0]), ' '.join(pair[1])] for pair in tokens_train + tokens_test ], columns=['question1', 'question2'], ) df_unique_texts = pd.DataFrame(np.unique(df_all_pairs.values.ravel()), columns=['question']) question_ids = pd.Series(df_unique_texts.index.values, index=df_unique_texts['question'].values).to_dict() """ Explanation: Build features Unique question texts. End of explanation """ df_all_pairs['q1_id'] = df_all_pairs['question1'].map(question_ids) df_all_pairs['q2_id'] = df_all_pairs['question2'].map(question_ids) """ Explanation: Mark every question with its number according to the uniques table. End of explanation """ q1_counts = df_all_pairs['q1_id'].value_counts().to_dict() q2_counts = df_all_pairs['q2_id'].value_counts().to_dict() df_all_pairs['q1_freq'] = df_all_pairs['q1_id'].map(lambda x: q1_counts.get(x, 0) + q2_counts.get(x, 0)) df_all_pairs['q2_freq'] = df_all_pairs['q2_id'].map(lambda x: q1_counts.get(x, 0) + q2_counts.get(x, 0)) """ Explanation: Map to frequency space. End of explanation """ df_all_pairs['freq_ratio'] = df_all_pairs['q1_freq'] / df_all_pairs['q2_freq'] df_all_pairs['freq_ratio_inverse'] = df_all_pairs['q2_freq'] / df_all_pairs['q1_freq'] """ Explanation: Calculate ratios. End of explanation """ columns_to_keep = [ 'q1_freq', 'q2_freq', 'freq_ratio', 'freq_ratio_inverse', ] X_train = df_all_pairs[columns_to_keep].values[:len(tokens_train)] X_test = df_all_pairs[columns_to_keep].values[len(tokens_train):] print('X train:', X_train.shape) print('X test :', X_test.shape) """ Explanation: Build final features. End of explanation """ feature_names = [ 'magic_freq_q1', 'magic_freq_q2', 'magic_freq_q1_q2_ratio', 'magic_freq_q2_q1_ratio', ] project.save_features(X_train, X_test, feature_names, feature_list_id) """ Explanation: Save features End of explanation """
amkatrutsa/MIPT-Opt
Spring2017-2019/15-ConjGrad/Seminar15.ipynb
mit
import numpy as np n = 100 # Random # A = np.random.randn(n, n) # A = A.T.dot(A) # Clustered eigenvalues A = np.diagflat([np.ones(n//4), 10 * np.ones(n//4), 100*np.ones(n//4), 1000* np.ones(n//4)]) U = np.random.rand(n, n) Q, _ = np.linalg.qr(U) A = Q.dot(A).dot(Q.T) A = (A + A.T) * 0.5 print("A is normal matrix: ||AA* - A*A|| =", np.linalg.norm(A.dot(A.T) - A.T.dot(A))) b = np.random.randn(n) # Hilbert matrix # A = np.array([[1.0 / (i+j - 1) for i in range(1, n+1)] for j in range(1, n+1)]) # b = np.ones(n) f = lambda x: 0.5 * x.dot(A.dot(x)) - b.dot(x) grad_f = lambda x: A.dot(x) - b x0 = np.zeros(n) """ Explanation: Метод сопряжённых градиентов (Conjugate gradient method): гадкий утёнок На прошлом семинаре... Методы спуска Направление убывания Градиентный метод Правила выбора шага Теоремы сходимости Эксперименты Система линейных уравнений vs. задача безусловной минимизации Рассмотрим задачу $$ \min_{x \in \mathbb{R}^n} \frac{1}{2}x^{\top}Ax - b^{\top}x, $$ где $A \in \mathbb{S}^n_{++}$. Из необходимого условия экстремума имеем $$ Ax^* = b $$ Также обозначим $f'(x_k) = Ax_k - b = r_k$ Как решить систему $Ax = b$? Прямые методы основаны на матричных разложениях: Плотная матрица $A$: для размерностей не больше нескольких тысяч Разреженная (sparse) матрица $A$: для размерностей порядка $10^4 - 10^5$ Итерационные методы: хороши во многих случаях, единственный подход для задач с размерностью $ > 10^6$ Немного истории... M. Hestenes и E. Stiefel предложили метод сопряжённых градиентов для решения систем линейных уравнений в 1952 году как прямой метод. Также долгое время считалось, что метод представляет только теоретический интерес поскольку - метод сопряжённых градиентов не работает на логарифмической линейке - метод сопряжённых градиентов имеет небольшое преимущество перед исключением Гаусса при вычислениях на калькуляторе - для вычислений на "human computers" слишком много обменов данными <img src="./human_computer.jpeg"> Метод сопряжённых градиентов необходимо рассматривать как итерационный метод, то есть останавливаться до точной сходимости! Подробнее здесь Метод сопряжённых направлений В градиентном спуске направления убывания - анти-градиенты, но для функций с плохо обусловленным гессианом сходимость медленная. Идея: двигаться вдоль направлений, которые гарантируют сходимость за $n$ шагов. Определение. Множество ненулевых векторов ${p_0, \ldots, p_l}$ называется сопряжённым относительно матрицы $A \in \mathbb{S}^n_{++}$, если $$ p^{\top}_iAp_j = 0, \qquad i \neq j $$ Утверждение. Для любой $x_0 \in \mathbb{R}^n$ последовательность ${x_k}$, генерируемая методом сопряжённых направлений, сходится к решению системы $Ax = b$ максимум за $n$ шагов. python def ConjugateDirections(x0, A, b, p): x = x0 r = A.dot(x) - b for i in range(len(p)): alpha = - (r.dot(p[i])) / (p[i].dot(A.dot(p[i]))) x = x + alpha * p[i] r = A.dot(x) - b return x Примеры сопряжённых направлений Собственные векторы матрицы $A$ Для любого набора из $n$ векторов можно провести аналог ортогонализации Грама-Шмидта и получить сопряжённые направления Вопрос: что такое ортогонализация Грама-Шмидта? :) Геометрическая интерпретация (Mathematics Stack Exchange) <center><img src="./cg.png" ></center> Метод сопряжённых градиентов Идея: новое направление $p_k$ ищется в виде $p_k = -r_k + \beta_k p_{k-1}$, где $\beta_k$ выбирается, исходя из требования сопряжённости $p_k$ и $p_{k-1}$: $$ \beta_k = \dfrac{p^{\top}{k-1}Ar_k}{p^{\top}{k-1}Ap^{\top}_{k-1}} $$ Таким образом, для получения следующего сопряжённого направления $p_k$ необходимо хранить только сопряжённое направление $p_{k-1}$ и остаток $r_k$ с предыдущей итерации. Вопрос: как находить размер шага $\alpha_k$? Сопряжённость сопряжённых градиентов Теорема Пусть после $k$ итераций $x_k \neq x^*$. Тогда $\langle r_k, r_i \rangle = 0, \; i = 1, \ldots k - 1$ $\mathtt{span}(r_0, \ldots, r_k) = \mathtt{span}(r_0, Ar_0, \ldots, A^kr_0)$ $\mathtt{span}(p_0, \ldots, p_k) = \mathtt{span}(r_0, Ar_0, \ldots, A^kr_0)$ $p_k^{\top}Ap_i = 0$, $i = 1,\ldots,k-1$ Теоремы сходимости Теорема 1. Если матрица $A$ имеет только $r$ различных собственных значений, то метод сопряжённых градиентов cойдётся за $r$ итераций. Теорема 2. Имеет место следующая оценка сходимости $$ \| x_{k} - x^ \|_A \leq 2\left( \dfrac{\sqrt{\kappa(A)} - 1}{\sqrt{\kappa(A)} + 1} \right)^k \|x_0 - x^\|_A, $$ где $\|x\|_A = x^{\top}Ax$ и $\kappa(A) = \frac{\lambda_1(A)}{\lambda_n(A)}$ - число обусловленности матрицы $A$, $\lambda_1(A) \geq ... \geq \lambda_n(A)$ - собственные значения матрицы $A$ Замечание: сравните коэффициент геометрической прогрессии с аналогом в градиентном спуске. Интерпретации метода сопряжённых градиентов Градиентный спуск в пространстве $y = Sx$, где $S = [p_0, \ldots, p_n]$, в котором матрица $A$ становится диагональной (или единичной в случае ортонормированности сопряжённых направлений) Поиск оптимального решения в Крыловском подпространстве $\mathcal{K}_k(A) = {b, Ab, A^2b, \ldots A^{k-1}b}$ $$ x_k = \arg\min_{x \in \mathcal{K}_k} f(x) $$ Однако естественный базис Крыловского пространства неортогональный и, более того, плохо обусловлен. Упражнение Проверьте численно, насколько быстро растёт обусловленность матрицы из векторов ${b, Ab, ... }$ Поэтому его необходимо ортогонализовать, что и происходит в методе сопряжённых градиентов Основное свойство $$ A^{-1}b \in \mathcal{K}_n(A) $$ Доказательство Теорема Гамильтона-Кэли: $p(A) = 0$, где $p(\lambda) = \det(A - \lambda I)$ $p(A)b = A^nb + a_1A^{n-1}b + \ldots + a_{n-1}Ab + a_n b = 0$ $A^{-1}p(A)b = A^{n-1}b + a_1A^{n-2}b + \ldots + a_{n-1}b + a_nA^{-1}b = 0$ $A^{-1}b = -\frac{1}{a_n}(A^{n-1}b + a_1A^{n-2}b + \ldots + a_{n-1}b)$ Сходимость по функции и по аргументу Решение: $x^* = A^{-1}b$ Минимум функции: $$ f^ = \frac{1}{2}b^{\top}A^{-\top}AA^{-1}b - b^{\top}A^{-1}b = -\frac{1}{2}b^{\top}A^{-1}b = -\frac{1}{2}\|x^\|^2_A $$ Оценка сходимости по функции: $$ f(x) - f^ = \frac{1}{2}x^{\top}Ax - b^{\top}x + \frac{1}{2}\|x^\|_A^2 =\frac{1}{2}\|x\|_A^2 - x^{\top}Ax^ + \frac{1}{2}\|x^\|_A^2 = \frac{1}{2}\|x - x^*\|_A^2 $$ Доказательство сходимости $x_k$ лежит в $\mathcal{K}_k$ $x_k = \sum\limits_{i=1}^k c_i A^{i-1}b = p(A)b$, где $p(x)$ некоторый полином степени не выше $k-1$ $x_k$ минимизирует $f$ на $\mathcal{K}_k$, отсюда $$ 2(f_k - f^) = \inf_{x \in \mathcal{K}_k} \|x - x^ \|^2_A = \inf_{\mathrm{deg}(p) < k} \|(p(A) - A^{-1})b\|^2_A $$ Спектральное разложение $A = U\Lambda U^*$ даёт $$ 2(f_k - f^*) = \inf_{\mathrm{deg}(p) < k} \|(p(\Lambda) - \Lambda^{-1})d\|^2_{\Lambda} = \inf_{\mathrm{deg}(p) < k} \sum_{i=1}^n\frac{d_i^2 (\lambda_ip(\lambda_i) - 1)^2}{\lambda_i} = \inf_{\mathrm{deg}(q) \leq k, q(0) = 1} \sum_{i=1}^n\frac{d_i^2 q(\lambda_i)^2}{\lambda_i} $$ Сведём задачу к поиску некоторого многочлена $$ f_k - f^ \leq \left(\sum_{i=1}^n \frac{d_i^2}{2\lambda_i}\right) \inf_{\mathrm{deg}(q) \leq k, q(0) = 1}\left(\max_{i=1,\ldots,n} q(\lambda_i)^2 \right) = \frac{1}{2}\|x^\|^2_A \inf_{\mathrm{deg}(q) \leq k, q(0) = 1}\left(\max_{i=1,\ldots,n} q(\lambda_i)^2 \right) $$ Пусть $A$ имеет $m$ различных собственных значений, тогда для $$ r(y) = \frac{(-1)^m}{\lambda_1 \cdot \ldots \cdot \lambda_m}(y - \lambda_i)\cdot \ldots \cdot (y - \lambda_m) $$ выполнено $\mathrm{deg}(r) = m$ и $r(0) = 1$ - Значение для оптимального полинома степени не выше $k$ оценим сверху значением для полинома $r$ степени $m$ $$ 0 \leq f_k - f^ \leq \frac{1}{2}\|x^\|A^2 \max{i=1,\ldots,m} r(\lambda_i) = 0 $$ - Метод сопряжённых градиентов сошёлся за $m$ итераций Улучшенная версия метода сопряжённых градиентов На практике используются следующие формулы для шага $\alpha_k$ и коэффициента $\beta_{k}$: $$ \alpha_k = \dfrac{r^{\top}k r_k}{p^{\top}{k}Ap_{k}} \qquad \beta_k = \dfrac{r^{\top}k r_k}{r^{\top}{k-1} r_{k-1}} $$ Вопрос: чем они лучше базовой версии? Псевдокод метода сопряжённых градиентов python def ConjugateGradientQuadratic(x0, A, b, eps): r = A.dot(x0) - b p = -r while np.linalg.norm(r) &gt; eps: alpha = r.dot(r) / p.dot(A.dot(p)) x = x + alpha * p r_next = r + alpha * A.dot(p) beta = r_next.dot(r_next) / r.dot(r) p = -r_next + beta * p r = r_next return x Метод сопряжённых градиентов для неквадратичной функции Идея: использовать градиенты $f'(x_k)$ неквадратичной функции вместо остатков $r_k$ и линейный поиск шага $\alpha_k$ вместо аналитического вычисления. Получим метод Флетчера-Ривса. python def ConjugateGradientFR(f, gradf, x0, eps): x = x0 grad = gradf(x) p = -grad while np.linalg.norm(gradf(x)) &gt; eps: alpha = StepSearch(x, f, gradf, **kwargs) x = x + alpha * p grad_next = gradf(x) beta = grad_next.dot(grad_next) / grad.dot(grad) p = -grad_next + beta * p grad = grad_next if restart_condition: p = -gradf(x) return x Теорема сходимости Теорема. Пусть - множество уровней $\mathcal{L}$ ограничено - существует $\gamma > 0$: $\| f'(x) \|_2 \leq \gamma$ для $x \in \mathcal{L}$ Тогда $$ \lim_{j \to \infty} \| f'(x_{k_j}) \|_2 = 0 $$ Перезапуск (restart) Для ускорения метода сопряжённых градиентов используют технику перезапусков: удаление ранее накопленной истории и перезапуск метода с текущей точки, как будто это точка $x_0$ Существуют разные условия, сигнализирующие о том, что надо делать перезапуск, например $k = n$ $\dfrac{|\langle f'(x_k), f'(x_{k-1}) \rangle |}{\| f'(x_k) \|_2^2} \geq \nu \approx 0.1$ Можно показать (см. Nocedal, Wright Numerical Optimization, Ch. 5, p. 125), что запуск метода Флетчера-Ривза без использования перезапусков на некоторых итерациях может приводить к крайне медленной сходимости! Метод Полака-Рибьера и его модификации лишены подобного недостатка. Комментарии Замечательная методичка "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" размещена тут Помимо метода Флетчера-Ривса существуют другие способы вычисления $\beta_k$: метод Полака-Рибьера, метод Хестенса-Штифеля... Для метода сопряжённых градиентов требуется 4 вектора: каких? Самой дорогой операцией является умножение матрицы на вектор Эксперименты Квадратичная целевая функция End of explanation """ USE_COLAB = False %matplotlib inline import matplotlib.pyplot as plt if not USE_COLAB: plt.rc("text", usetex=True) plt.rc("font", family='serif') if USE_COLAB: !pip install git+https://github.com/amkatrutsa/liboptpy import seaborn as sns sns.set_context("talk") eigs = np.linalg.eigvalsh(A) plt.semilogy(np.unique(eigs)) plt.ylabel("Eigenvalues", fontsize=20) plt.xticks(fontsize=18) _ = plt.yticks(fontsize=18) """ Explanation: Распределение собственных значений End of explanation """ import scipy.optimize as scopt def callback(x, array): array.append(x) scopt_cg_array = [] scopt_cg_callback = lambda x: callback(x, scopt_cg_array) x = scopt.minimize(f, x0, method="CG", jac=grad_f, callback=scopt_cg_callback) x = x.x print("||f'(x*)|| =", np.linalg.norm(A.dot(x) - b)) print("f* =", f(x)) """ Explanation: Правильный ответ End of explanation """ def ConjugateGradientQuadratic(x0, A, b, tol=1e-8, callback=None): x = x0 r = A.dot(x0) - b p = -r while np.linalg.norm(r) > tol: alpha = r.dot(r) / p.dot(A.dot(p)) x = x + alpha * p if callback is not None: callback(x) r_next = r + alpha * A.dot(p) beta = r_next.dot(r_next) / r.dot(r) p = -r_next + beta * p r = r_next return x import liboptpy.unconstr_solvers as methods import liboptpy.step_size as ss print("\t CG quadratic") cg_quad = methods.fo.ConjugateGradientQuad(A, b) x_cg = cg_quad.solve(x0, tol=1e-7, disp=True) print("\t Gradient Descent") gd = methods.fo.GradientDescent(f, grad_f, ss.ExactLineSearch4Quad(A, b)) x_gd = gd.solve(x0, tol=1e-7, disp=True) print("Condition number of A =", abs(max(eigs)) / abs(min(eigs))) """ Explanation: Реализация метода сопряжённых градиентов End of explanation """ plt.figure(figsize=(8,6)) plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()], label=r"$\|f'(x_k)\|^{CG}_2$", linewidth=2) plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array[:50]], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2) plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2) plt.legend(loc="best", fontsize=20) plt.xlabel(r"Iteration number, $k$", fontsize=20) plt.ylabel("Convergence rate", fontsize=20) plt.xticks(fontsize=18) _ = plt.yticks(fontsize=18) print([np.linalg.norm(grad_f(x)) for x in cg_quad.get_convergence()]) plt.figure(figsize=(8,6)) plt.plot([f(x) for x in cg_quad.get_convergence()], label=r"$f(x^{CG}_k)$", linewidth=2) plt.plot([f(x) for x in scopt_cg_array], label=r"$f(x^{CG_{PR}}_k)$", linewidth=2) plt.plot([f(x) for x in gd.get_convergence()], label=r"$f(x^{G}_k)$", linewidth=2) plt.legend(loc="best", fontsize=20) plt.xlabel(r"Iteration number, $k$", fontsize=20) plt.ylabel("Function value", fontsize=20) plt.xticks(fontsize=18) _ = plt.yticks(fontsize=18) """ Explanation: График сходимости End of explanation """ import numpy as np import sklearn.datasets as skldata import scipy.special as scspec n = 300 m = 1000 X, y = skldata.make_classification(n_classes=2, n_features=n, n_samples=m, n_informative=n//3) C = 1 def f(w): return np.linalg.norm(w)**2 / 2 + C * np.mean(np.logaddexp(np.zeros(X.shape[0]), -y * X.dot(w))) def grad_f(w): denom = scspec.expit(-y * X.dot(w)) return w - C * X.T.dot(y * denom) / X.shape[0] # f = lambda x: -np.sum(np.log(1 - A.T.dot(x))) - np.sum(np.log(1 - x*x)) # grad_f = lambda x: np.sum(A.dot(np.diagflat(1 / (1 - A.T.dot(x)))), axis=1) + 2 * x / (1 - np.power(x, 2)) x0 = np.zeros(n) print("Initial function value = {}".format(f(x0))) print("Initial gradient norm = {}".format(np.linalg.norm(grad_f(x0)))) """ Explanation: Неквадратичная функция End of explanation """ def ConjugateGradientFR(f, gradf, x0, num_iter=100, tol=1e-8, callback=None, restart=False): x = x0 grad = gradf(x) p = -grad it = 0 while np.linalg.norm(gradf(x)) > tol and it < num_iter: alpha = utils.backtracking(x, p, method="Wolfe", beta1=0.1, beta2=0.4, rho=0.5, f=f, grad_f=gradf) if alpha < 1e-18: break x = x + alpha * p if callback is not None: callback(x) grad_next = gradf(x) beta = grad_next.dot(grad_next) / grad.dot(grad) p = -grad_next + beta * p grad = grad_next.copy() it += 1 if restart and it % restart == 0: grad = gradf(x) p = -grad return x """ Explanation: Реализация метода Флетчера-Ривса End of explanation """ import scipy.optimize as scopt import liboptpy.restarts as restarts n_restart = 60 tol = 1e-5 max_iter = 600 scopt_cg_array = [] scopt_cg_callback = lambda x: callback(x, scopt_cg_array) x = scopt.minimize(f, x0, tol=tol, method="CG", jac=grad_f, callback=scopt_cg_callback, options={"maxiter": max_iter}) x = x.x print("\t CG by Polak-Rebiere") print("Norm of garient = {}".format(np.linalg.norm(grad_f(x)))) print("Function value = {}".format(f(x))) print("\t CG by Fletcher-Reeves") cg_fr = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.)) x = cg_fr.solve(x0, tol=tol, max_iter=max_iter, disp=True) print("\t CG by Fletcher-Reeves with restart n") cg_fr_rest = methods.fo.ConjugateGradientFR(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.), restarts.Restart(n // n_restart)) x = cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter, disp=True) print("\t Gradient Descent") gd = methods.fo.GradientDescent(f, grad_f, ss.Backtracking("Wolfe", rho=0.9, beta1=0.1, beta2=0.4, init_alpha=1.)) x = gd.solve(x0, max_iter=max_iter, tol=tol, disp=True) plt.figure(figsize=(8, 6)) plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ no restart", linewidth=2) plt.semilogy([np.linalg.norm(grad_f(x)) for x in cg_fr_rest.get_convergence()], label=r"$\|f'(x_k)\|^{CG_{FR}}_2$ restart", linewidth=2) plt.semilogy([np.linalg.norm(grad_f(x)) for x in scopt_cg_array], label=r"$\|f'(x_k)\|^{CG_{PR}}_2$", linewidth=2) plt.semilogy([np.linalg.norm(grad_f(x)) for x in gd.get_convergence()], label=r"$\|f'(x_k)\|^{G}_2$", linewidth=2) plt.legend(loc="best", fontsize=16) plt.xlabel(r"Iteration number, $k$", fontsize=20) plt.ylabel("Convergence rate", fontsize=20) plt.xticks(fontsize=18) _ = plt.yticks(fontsize=18) """ Explanation: График сходимости End of explanation """ %timeit scopt.minimize(f, x0, method="CG", tol=tol, jac=grad_f, options={"maxiter": max_iter}) %timeit cg_fr.solve(x0, tol=tol, max_iter=max_iter) %timeit cg_fr_rest.solve(x0, tol=tol, max_iter=max_iter) %timeit gd.solve(x0, tol=tol, max_iter=max_iter) """ Explanation: Время выполнения End of explanation """
briennakh/BIOF509
Wk08/Wk08_Numpy_model_package_survey_inclass_exercises.ipynb
mit
import matplotlib.pyplot as plt import numpy as np %matplotlib inline n = 20 x = np.random.random((n,1)) y = 5 + 6 * x ** 2 + np.random.normal(0,0.5, size=(n,1)) plt.plot(x, y, 'b.') plt.show() """ Explanation: Week 8 - Implementing a model in numpy and a survey of machine learning packages for python This week we will be looking in detail at how to implement a supervised regression model using the base scientific computing packages available with python. We will also be looking at the different packages available for python that implement many of the algorithms we might want to use. Regression with numpy Why implement algorithms from scratch when dedicated packages already exist? The packages available are very powerful and a real time saver but they can obscure some issues we might encounter if we don't know to look for them. By starting with just numpy these problems will be more obvious. We can address them here and then when we move on we will know what to look for and will be less likely to miss them. The dedicated machine learning packages implement the different algorithms but we are still responsible for getting our data in a suitable format. End of explanation """ intercept_x = np.hstack((np.ones((n,1)), x)) intercept_x """ Explanation: This is a very simple dataset. There is only one input value for each record and then there is the output value. Our goal is to determine the output value or dependent variable, shown on the y-axis, from the input or independent variable, shown on the x-axis. Our approach should scale to handle multiple input, or independent, variables. The independent variables can be stored in a vector, a 1-dimensional array: $$X^T = (X_{1}, X_{2}, X_{3})$$ As we have multiple records these can be stacked in a 2-dimensional array. Each record becomes one row in the array. Our x variable is already set up in this way. In linear regression we can compute the value of the dependent variable using the following formula: $$f(X) = \beta_{0} + \sum_{j=1}^p X_j\beta_j$$ The $\beta_{0}$ term is the intercept, and represents the value of the dependent variable when the independent variable is zero. Calculating a solution is easier if we don't treat the intercept as special. Instead of having an intercept co-efficient that is handled separately we can instead add a variable to each of our records with a value of one. End of explanation """ np.linalg.lstsq(intercept_x,y) """ Explanation: Numpy contains the linalg module with many common functions for performing linear algebra. Using this module finding a solution is quite simple. End of explanation """ coeff, residuals, rank, sing_vals = np.linalg.lstsq(intercept_x,y) intercept_x.shape, coeff.T.shape np.sum(intercept_x * coeff.T, axis=1) predictions = np.sum(intercept_x * coeff.T, axis=1) plt.plot(x, y, 'bo') plt.plot(x, predictions, 'ko') plt.show() predictions.shape np.sum((predictions.reshape((20,1)) - y) ** 2), residuals """ Explanation: The values returned are: The least-squares solution The sum of squared residuals The rank of the independent variables The singular values of the independent variables Exercise Calculate the predictions our model would make Calculate the sum of squared residuals from our predictions. Does this match the value returned by lstsq? End of explanation """ our_coeff = np.dot(np.dot(np.linalg.inv(np.dot(intercept_x.T, intercept_x)), intercept_x.T), y) print(coeff, '\n', our_coeff) our_predictions = np.dot(intercept_x, our_coeff) predictions, our_predictions plt.plot(x, y, 'ko', label='True values') plt.plot(x, our_predictions, 'ro', label='Predictions') plt.legend(numpoints=1, loc=4) plt.show() np.arange(12).reshape((3,4)) """ Explanation: Least squares refers to the cost function for this algorithm. The objective is to minimize the residual sum of squares. The difference between the actual and predicted values is calculated, it is squared and then summed over all records. The function is as follows: $$RSS(\beta) = \sum_{i=1}^{N}(y_i - x_i^T\beta)^2$$ Matrix arithmetic Within lstsq all the calculations are performed using matrix arithmetic rather than the more familiar element-wise arithmetic numpy arrays generally perform. Numpy does have a matrix type but matrix arithmetic can also be performed on standard arrays using dedicated methods. Source: Wikimedia Commons (User:Bilou) In matrix multiplication the resulting value in any position is the sum of multiplying each value in a row in the first matrix by the corresponding value in a column in the second matrix. The residual sum of squares can be calculated with the following formula: $$RSS(\beta) = (y - X\beta)^T(y-X\beta)$$ The value of our co-efficients can be calculated with: $$\hat\beta = (X^TX)^{-1}X^Ty$$ Unfortunately, the result is not as visually appealing as in languages that use matrix arithmetic by default. End of explanation """ plt.plot(x, y - our_predictions, 'ko') plt.show() plt.plot(x, y, 'ko', label='True values') all_x = np.linspace(0, 1, 1000).reshape((1000,1)) intercept_all_x = np.hstack((np.ones((1000,1)), all_x)) print(intercept_all_x.shape, our_coeff.shape) #all_x_predictions = np.dot(intercept_all_x, our_coeff) all_x_predictions = np.sum(intercept_all_x * our_coeff.T, axis=1) plt.plot(all_x, all_x_predictions, 'r-', label='Predictions') plt.legend(numpoints=1, loc=4) plt.show() """ Explanation: Exercise Plot the residuals. The x axis will be the independent variable (x) and the y axis the residual between our prediction and the true value. Plot the predictions generated for our model over the entire range of 0-1. One approach is to use the np.linspace method to create equally spaced values over a specified range. End of explanation """ x_expanded = np.hstack((x**i for i in range(1,20))) b, residuals, rank, s = np.linalg.lstsq(x_expanded, y) print(b) plt.plot(x, y, 'ko', label='True values') plt.plot(x, np.dot(x_expanded, b), 'ro', label='Predictions') plt.legend(numpoints=1, loc=4) plt.show() """ Explanation: Types of independent variable The independent variables can be many different types. Quantitative inputs Categorical inputs coded using dummy values Interactions between multiple inputs Tranformations of other inputs, e.g. logs, raised to different powers, etc. It is important to note that a linear model is only linear with respect to its inputs. Those input variables can take any form. One approach we can take to improve the predictions from our model would be to add in the square, cube, etc of our existing variable. End of explanation """ n = 20 p = 12 training = [] val = [] for i in range(1, p): np.random.seed(0) x = np.random.random((n,1)) y = 5 + 6 * x ** 2 + np.random.normal(0,0.5, size=(n,1)) x = np.hstack((x**j for j in np.arange(i))) our_coeff = np.dot( np.dot( np.linalg.inv( np.dot( x.T, x ) ), x.T ), y ) our_predictions = np.dot(x, our_coeff) our_training_rss = np.sum((y - our_predictions) ** 2) training.append(our_training_rss) val_x = np.random.random((n,1)) val_y = 5 + 6 * val_x ** 2 + np.random.normal(0,0.5, size=(n,1)) val_x = np.hstack((val_x**j for j in np.arange(i))) our_val_pred = np.dot(val_x, our_coeff) our_val_rss = np.sum((val_y - our_val_pred) ** 2) val.append(our_val_rss) #print(i, our_training_rss, our_val_rss) plt.plot(range(1, p), training, 'ko-', label='training') plt.plot(range(1, p), val, 'ro-', label='validation') plt.legend(loc=2) plt.show() """ Explanation: There is a tradeoff with model complexity. As we add more complexity to our model we can fit our training data increasingly well but eventually will lose our ability to generalize to new data. Very simple models underfit the data and have high bias. Very complex models overfit the data and have high variance. The goal is to detect true sources of variation in the data and ignore variation that is just noise. How do we know if we have a good model? A common approach is to break up our data into a training set, a validation set, and a test set. We train models with different parameters on the training set. We evaluate each model on the validation set, and choose the best We then measure the performance of our best model on the test set. What would our best model look like? Because we are using dummy data here we can easily make more. End of explanation """ np.random.seed(0) n = 200 x = np.random.random((n,1)) y = 5 + 6 * x ** 2 + np.random.normal(0,0.5, size=(n,1)) intercept_x = np.hstack((np.ones((n,1)), x)) coeff, residuals, rank, sing_vals = np.linalg.lstsq(intercept_x,y) print('lstsq', coeff) def gradient_descent(x, y, rounds = 1000, alpha=0.01): theta = np.zeros((x.shape[1], 1)) costs = [] for i in range(rounds): prediction = np.dot(x, theta) error = prediction - y gradient = np.dot(x.T, error / y.shape[0]) theta -= gradient * alpha costs.append(np.sum(error ** 2)) return (theta, costs) theta, costs = gradient_descent(intercept_x, y, rounds=10000) print(theta, costs[::500]) np.random.seed(0) n = 200 x = np.random.random((n,1)) y = 5 + 6 * x ** 2 + np.random.normal(0,0.5, size=(n,1)) x = np.hstack((x**j for j in np.arange(20))) coeff, residuals, rank, sing_vals = np.linalg.lstsq(x,y) print('lstsq', coeff) theta, costs = gradient_descent(x, y, rounds=10000) print(theta, costs[::500]) plt.plot(x[:,1], y, 'ko') plt.plot(x[:,1], np.dot(x, coeff), 'co') plt.plot(x[:,1], np.dot(x, theta), 'ro') plt.show() """ Explanation: Gradient descent One limitation of our current implementation is that it is resource intensive. For very large datasets an alternative is needed. Gradient descent is often preferred, and particularly stochastic gradient descent for very large datasets. Gradient descent is an iterative process, repetitively calculating the error and changing the coefficients slightly to reduce that error. It does this by calculating a gradient and then descending to a minimum in small steps. Stochastic gradient descent calculates the gradient on a small batch of the data, updates the coefficients, loads the next chunk of the data and repeats the process. We will just look at a basic gradient descent model. End of explanation """
GoogleCloudPlatform/training-data-analyst
courses/machine_learning/deepdive/06_structured/4_preproc_tft.ipynb
apache-2.0
%%bash conda update -y -n base -c defaults conda source activate py2env pip uninstall -y google-cloud-dataflow conda install -y pytz pip install apache-beam[gcp]==2.9.0 pip install apache-beam[gcp] tensorflow_transform==0.8.0 %%bash pip freeze | grep -e 'flow\|beam' """ Explanation: <h1> Preprocessing using tf.transform and Dataflow </h1> This notebook illustrates: <ol> <li> Creating datasets for Machine Learning using tf.transform and Dataflow </ol> <p> While Pandas is fine for experimenting, for operationalization of your workflow, it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam also allows for streaming. Apache Beam only works in Python 2 at the moment, so we're going to switch to the Python 2 kernel. In the above menu, click the dropdown arrow and select `python2`. ![image.png](attachment:image.png) Then activate a Python 2 environment and install Apache Beam. Only specific combinations of TensorFlow/Beam are supported by tf.transform. So make sure to get a combo that is. * TFT 0.8.0 * TF 1.8 or higher * Apache Beam [GCP] 2.5.0 or higher End of explanation """ import tensorflow as tf import apache_beam as beam print(tf.__version__) # change these to try this notebook out BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR PROJECT ID PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' import os os.environ['BUCKET'] = BUCKET os.environ['PROJECT'] = PROJECT os.environ['REGION'] = REGION !gcloud config set project $PROJECT %%bash if ! gsutil ls | grep -q gs://${BUCKET}/; then gsutil mb -l ${REGION} gs://${BUCKET} fi """ Explanation: You need to restart your kernel to register the new installs running the below cells End of explanation """ query=""" SELECT weight_pounds, is_male, mother_age, mother_race, plurality, gestation_weeks, mother_married, ever_born, cigarette_use, alcohol_use, FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth FROM publicdata.samples.natality WHERE year > 2000 """ import google.datalab.bigquery as bq df = bq.Query(query + " LIMIT 100").execute().result().to_dataframe() df.head() """ Explanation: <h2> Save the query from earlier </h2> The data is natality data (record of births in the US). My goal is to predict the baby's weight given a number of factors about the pregnancy and the baby's mother. Later, we will want to split the data into training and eval datasets. The hash of the year-month will be used for that. End of explanation """ %writefile requirements.txt tensorflow-transform==0.8.0 import datetime import apache_beam as beam import tensorflow_transform as tft from tensorflow_transform.beam import impl as beam_impl def preprocess_tft(inputs): import copy import numpy as np def center(x): return x - tft.mean(x) result = copy.copy(inputs) # shallow copy result['mother_age_tft'] = center(inputs['mother_age']) result['gestation_weeks_centered'] = tft.scale_to_0_1(inputs['gestation_weeks']) result['mother_race_tft'] = tft.string_to_int(inputs['mother_race']) return result #return inputs def cleanup(rowdict): import copy, hashlib CSV_COLUMNS = 'weight_pounds,is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use'.split(',') STR_COLUMNS = 'key,is_male,mother_race,mother_married,cigarette_use,alcohol_use'.split(',') FLT_COLUMNS = 'weight_pounds,mother_age,plurality,gestation_weeks'.split(',') # add any missing columns, and correct the types def tofloat(value, ifnot): try: return float(value) except (ValueError, TypeError): return ifnot result = { k : str(rowdict[k]) if k in rowdict else 'None' for k in STR_COLUMNS } result.update({ k : tofloat(rowdict[k], -99) if k in rowdict else -99 for k in FLT_COLUMNS }) # modify opaque numeric race code into human-readable data races = dict(zip([1,2,3,4,5,6,7,18,28,39,48], ['White', 'Black', 'American Indian', 'Chinese', 'Japanese', 'Hawaiian', 'Filipino', 'Asian Indian', 'Korean', 'Samaon', 'Vietnamese'])) if 'mother_race' in rowdict and rowdict['mother_race'] in races: result['mother_race'] = races[rowdict['mother_race']] else: result['mother_race'] = 'Unknown' # cleanup: write out only the data we that we want to train on if result['weight_pounds'] > 0 and result['mother_age'] > 0 and result['gestation_weeks'] > 0 and result['plurality'] > 0: data = ','.join([str(result[k]) for k in CSV_COLUMNS]) result['key'] = hashlib.sha224(data).hexdigest() yield result def preprocess(query, in_test_mode): import os import os.path import tempfile import tensorflow as tf from apache_beam.io import tfrecordio from tensorflow_transform.coders import example_proto_coder from tensorflow_transform.tf_metadata import dataset_metadata from tensorflow_transform.tf_metadata import dataset_schema from tensorflow_transform.beam.tft_beam_io import transform_fn_io job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S') if in_test_mode: import shutil print('Launching local job ... hang on') OUTPUT_DIR = './preproc_tft' shutil.rmtree(OUTPUT_DIR, ignore_errors=True) else: print('Launching Dataflow job {} ... hang on'.format(job_name)) OUTPUT_DIR = 'gs://{0}/babyweight/preproc_tft/'.format(BUCKET) import subprocess subprocess.call('gsutil rm -r {}'.format(OUTPUT_DIR).split()) options = { 'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'), 'temp_location': os.path.join(OUTPUT_DIR, 'tmp'), 'job_name': job_name, 'project': PROJECT, 'region': REGION, 'num_workers': 4, 'max_num_workers': 5, 'teardown_policy': 'TEARDOWN_ALWAYS', 'no_save_main_session': True, 'requirements_file': 'requirements.txt' } opts = beam.pipeline.PipelineOptions(flags=[], **options) if in_test_mode: RUNNER = 'DirectRunner' else: RUNNER = 'DataflowRunner' # set up metadata raw_data_schema = { colname : dataset_schema.ColumnSchema(tf.string, [], dataset_schema.FixedColumnRepresentation()) for colname in 'key,is_male,mother_race,mother_married,cigarette_use,alcohol_use'.split(',') } raw_data_schema.update({ colname : dataset_schema.ColumnSchema(tf.float32, [], dataset_schema.FixedColumnRepresentation()) for colname in 'weight_pounds,mother_age,plurality,gestation_weeks'.split(',') }) raw_data_metadata = dataset_metadata.DatasetMetadata(dataset_schema.Schema(raw_data_schema)) def read_rawdata(p, step, test_mode): if step == 'train': selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) < 3'.format(query) else: selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) = 3'.format(query) if in_test_mode: selquery = selquery + ' LIMIT 100' #print('Processing {} data from {}'.format(step, selquery)) return (p | '{}_read'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query=selquery, use_standard_sql=True)) | '{}_cleanup'.format(step) >> beam.FlatMap(cleanup) ) # run Beam with beam.Pipeline(RUNNER, options=opts) as p: with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, 'tmp')): # analyze and transform training raw_data = read_rawdata(p, 'train', in_test_mode) raw_dataset = (raw_data, raw_data_metadata) transformed_dataset, transform_fn = ( raw_dataset | beam_impl.AnalyzeAndTransformDataset(preprocess_tft)) transformed_data, transformed_metadata = transformed_dataset _ = transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord( os.path.join(OUTPUT_DIR, 'train'), coder=example_proto_coder.ExampleProtoCoder( transformed_metadata.schema)) # transform eval data raw_test_data = read_rawdata(p, 'eval', in_test_mode) raw_test_dataset = (raw_test_data, raw_data_metadata) transformed_test_dataset = ( (raw_test_dataset, transform_fn) | beam_impl.TransformDataset()) transformed_test_data, _ = transformed_test_dataset _ = transformed_test_data | 'WriteTestData' >> tfrecordio.WriteToTFRecord( os.path.join(OUTPUT_DIR, 'eval'), coder=example_proto_coder.ExampleProtoCoder( transformed_metadata.schema)) _ = (transform_fn | 'WriteTransformFn' >> transform_fn_io.WriteTransformFn(os.path.join(OUTPUT_DIR, 'metadata'))) job = p.run() if in_test_mode: job.wait_until_finish() print("Done!") preprocess(query, in_test_mode=False) %bash gsutil ls gs://${BUCKET}/babyweight/preproc_tft/*-00000* """ Explanation: <h2> Create ML dataset using tf.transform and Dataflow </h2> <p> Let's use Cloud Dataflow to read in the BigQuery data and write it out as CSV files. Along the way, let's use tf.transform to do scaling and transforming. Using tf.transform allows us to save the metadata to ensure that the appropriate transformations get carried out during prediction as well. <p> Note that after you launch this, the notebook won't show you progress. Go to the GCP webconsole to the Dataflow section and monitor the running job. It took about <b>30 minutes</b> for me. If you wish to continue without doing this step, you can copy my preprocessed output: <pre> gsutil -m cp -r gs://cloud-training-demos/babyweight/preproc_tft gs://your-bucket/ </pre> End of explanation """