code
stringlengths
2.5k
150k
kind
stringclasses
1 value
``` import numpy as np import os, sys, datetime, string sys.path.append('/Volumes/SANDISK128/Documents/Thesis/Python/') sys.path.append('/Volumes/SANDISK128/Documents/Thesis/Python/MEPS/') import matplotlib.pyplot as plt from scipy import stats from mpl_toolkits.basemap import Basemap import netCDF4 import matplotlib as mpl import save_fig as sF import createFolder as cF from calc_station_properties import find_station_yx import matplotlib.colors as colors import gdal from mpl_toolkits.mplot3d import Axes3D from matplotlib.cbook import get_sample_data import pandas as pd from matplotlib.colors import LightSource from pyproj import Proj, transform savefig = 1 # 1 = yes, 0 = no fig_dir = '../../Figures/Norway_map' cF.createFolder(fig_dir) form = 'png' def Lambert_map(lllon, lllat, urlon, urlat, lat0, lon0, res='l', fill=False, zoom=False): """lllon - lon lower left corner ... lat0 - latitude standard parallel, should be somewhere in the center of the domain lon0 - the parallel longitude lllon - lower left longitude ... http://matplotlib.org/basemap/api/basemap_api.html""" rsphere=(6378137.00,6356752.3142) map = Basemap(llcrnrlon=lllon, llcrnrlat=lllat, urcrnrlon=urlon, urcrnrlat=urlat, rsphere=rsphere, resolution=res,area_thresh=1000.,projection='lcc', lat_0=lat0,lon_0=lon0) # map = Basemap(llcrnrlon=lllon, llcrnrlat=lllat, urcrnrlon=urlon, urcrnrlat=urlat, rsphere=rsphere, # resolution=res,area_thresh=1000.,projection='lcc', lat_1=lllon,lon_0=lon0) # Draw the latitudes and the longitudes # parallels = np.arange(0.,90,5.) # map.drawparallels(parallels,labels=[True,False,False,False],fontsize=16) # meridians = np.arange(10.,361.,10.) # map.drawmeridians(meridians,labels=[False,False,False,True],fontsize=16) if zoom == False: map.drawmeridians(np.arange(0,90,10),labels=[0,0,0,1],fontsize=16) map.drawparallels(np.arange(10,361,4),labels=[1,0,0,0],fontsize=16) else: # map.drawmeridians(np.arange(0,90,3),labels=[0,0,0,1],fontsize=16) # map.drawparallels(np.arange(0,361,2),labels=[1,0,0,0],fontsize=16) map.drawmeridians(np.arange(0,90,0.1),labels=[0,0,0,1],fontsize=16) map.drawparallels(np.arange(0,361,0.05),labels=[1,0,0,0],fontsize=16) # Draw the coastline map.drawcoastlines()#color='0.5') if fill: map.drawlsmask(#land_color='0.8', ocean_color='gainsboro') # if zoom == False: ### plot MEPS area # for i in range(0,lato.shape[0],12): # xs, ys = map(lono[i], lato[i]) # map.plot(xs,ys, color = 'orange', marker = 'o', markersize = 10, linestyle = '-', linewidth = 10) # for i in range(0,lato2.shape[0],12): # xs2, ys2 = map(lono2[i], lato2[i]) # map.plot(xs2,ys2, color = 'orange', marker = 'o', markersize = 10, linestyle = '-', linewidth = 10) # xs, ys = map(lono[739], lato[739]) #map.plot(xs,ys, color = 'orange', marker ='o', markersize = 10, linestyle = '-', linewidth = 10, label = 'MEPS domain') #lgd = plt.legend(loc='lower left',fontsize=18) #map.drawmapboundary(fill_color='gainsboro') return map def PlotContours(Lon, Lat, psurf, map, nrlevels=10, leveldist=None,levels=None, numbers=True, color= 'k'): """ contours for example the pressure nrlevels - gives the number of displayed levels leveldist - gives distance between levels, if specified the nlevels is ignored levels - can be an array that specifies the levels to display, if specified nrlevels and leveldist are ignored numbers - True if the contours are labeled color - color of the contours (None is s color map)""" if levels is not None: cs= map.contour(Lon, Lat, psurf, levels, linewidths= 1. , colors= color) elif leveldist is not None: levels= np.arange(np.round(np.min(psurf)- np.min(psurf)%leveldist), np.round(np.max(psurf)+ leveldist), leveldist) cs= map.contour(Lon, Lat, psurf, levels, linewidths= 1. , colors= color) else: cs= map.contour(Lon, Lat, psurf, nrlevels, linewidths= 1. , colors= color)#, colors= 6*['b']+ 6*['r'],) if numbers == True: plt.clabel(cs, fontsize=10, inline=1, fmt='%1.0f', color= 'black') #plt.tight_layout() def PlotColorMap4(Lon, Lat, data, map, maxlevel= None, symetric=True, bounds=None, label='', color= 'RdBu', boxnr= 21): """ plot a color map, e.g. vertical velocity if symetric == True it is symetric around 0 and the maxlevel is calculated automatically best version of PlotColorMap""" if color== 'RdBu': colors= [(plt.cm.RdBu_r(h)) for h in range(256)] elif color== 'seismic': colors= [(plt.cm.seismic(h)) for h in range(256)] elif color== 'blue': colors= [(plt.cm.Blues(h)) for h in range(256)] elif color== 'inverse_blue': colors= [(plt.cm.Blues(h)) for h in range(255, 0, -1)] elif color == 'red': colors= ['azure']+[(plt.cm.Reds(h)) for h in range(256)] else: print('wrong color') # if bounds != None: boxnr = len(bounds) new_map = plt.matplotlib.colors.LinearSegmentedColormap.from_list('new_map', colors) #, N=boxnr) if bounds is None: if maxlevel is not None: minlevel= maxlevel if maxlevel is None and bounds is None: if symetric is True: maxlevel, minlevel= np.max(np.abs(data)), -np.max(np.abs(data)) else: maxlevel, minlevel= np.max(data), np.min(data) bounds= np.round(np.linspace(minlevel, maxlevel, boxnr+1), int(np.log10(85/maxlevel))) # bounds= np.round(list(np.linspace(-maxlevel, 0, boxnr//2+1))+list(np.linspace(0, maxlevel, boxnr//2+1)), int(np.log10(85/maxlevel))) # print(maxlevel) Lon= 0.5* (Lon[1:, 1:]+ Lon[:-1, :-1]) Lat= 0.5* (Lat[1:, 1:]+ Lat[:-1, :-1]) norm= mpl.colors.BoundaryNorm(bounds, new_map.N) cs= map.pcolormesh(Lon, Lat, data[1:, 1:], norm= norm, cmap=new_map, alpha= 1.) cb = map.colorbar(cs, boundaries= bounds, norm= norm, location='right',pad='3%',extend='max') cb.set_label(label, size=18) cb.ax.tick_params(labelsize=16) #### Plot kartverket elevation ### champ = 255. no0 = np.array([0,155,88])/champ #700 no1 = np.array([0,160,79])/champ #750 no2 = np.array([0,164,72])/champ #800 no3 = np.array([55,168,76])/champ #850 no4 = np.array([81,171,79])/champ #900 no5 = np.array([104,174,82])/champ #950 no6 = np.array([119,177,84])/champ #1000 no7 = np.array([136,180,85])/champ #1050 no8 = np.array([151,183,87])/champ #1100 no9 = np.array([165,185,88])/champ #1150 no10 = np.array([179,187,89])/champ #1200 no11 = np.array([185,180,92])/champ #1250 no12 = np.array([190,173,94])/champ #1300 no13 = np.array([197,164,98])/champ #1350 no14 = np.array([205,168,117])/champ #1400 no15 = np.array([214,173,134])/champ #1450 no16 = np.array([223,180,154])/champ #1500 no17 = np.array([231,190,174])/champ #1550 no18 = np.array([239,204,195])/champ #1600 no19 = np.array([246,221,220])/champ #1650 no20 = np.array([252,241,242])/champ #1700 no21 = np.array([255,255,255])/champ #1750 no22 = np.array([80,80,81])/champ #url = ('http://thredds.met.no/thredds/dodsC/meps25epsarchive/2016/12/24/meps_mbr0_pp_2_5km_20161224T12Z.nc') url = ('http://thredds.met.no/thredds/dodsC/meps25epsarchive/2016/12/23/meps_mbr0_pp_2_5km_20161223T00Z.nc') #url = ('http://thredds.met.no/thredds/dodsC/meps25epsarchive/2016/12/21/meps_mbr0_pp_2_5km_20161221T00Z.nc') dataset = netCDF4.Dataset(url) land= dataset.variables['land_area_fraction'][:] lonpp= dataset.variables['longitude'][:] latpp= dataset.variables['latitude'][:] x_wind = dataset.variables['x_wind_10m'][14,:,:] y_wind = dataset.variables['y_wind_10m'][14,:,:] #T_2m= dataset.variables['air_temperature_2m'][:] alti= dataset.variables['altitude'][:] dataset.close() grid_x, grid_y = find_station_yx(latpp, lonpp, 59+48.73/60, 7+12.87/60) alti[grid_y[0],grid_x[0]] lonpp[grid_y[0],grid_x[0]] latpp[grid_y[0],grid_x[0]] #### Station map #plt.figure(1) fig = plt.figure(figsize=(9,8)) plt.clf() map= Lambert_map(lllon=7.05, lllat=59.73, urlon=7.35, urlat=59.91, lat0= 63.5, lon0= 15, res='i', fill=False,zoom=True) Lonpp,Latpp = map(lonpp,latpp) #levels = [850, 900,950,1000,1050,1100,1150, 1200, 1250, 1300,1350,1400] levels = np.arange(700,1800,50) PlotContours(Lonpp, Latpp, alti, map, leveldist=None,levels=levels, numbers=True, color= 'gray') cmap = colors.ListedColormap([no0, no1, no2, no3, no4, no5, no6, no7, no8, no9, no10, \ no11, no12, no13, no14, no15, no16, no17, no18, no19, no20, \ no21]) norm = colors.BoundaryNorm(boundaries = levels, ncolors=cmap.N) #PlotColorMap4(Lonpp, Latpp, alti, map, bounds= levels,color=cmap, label='Altitude') cs = map.pcolormesh(Lonpp, Latpp, alti, norm= norm, cmap=cmap, alpha= 1.) cb = plt.colorbar(cs, boundaries= levels, #location='right', extend='max') ### plot wind barbs map.barbs(Lonpp,Latpp,x_wind,y_wind,barbcolor = [no22], pivot = 'middle') cb.set_label('Altitude [m]', size=18) cb.ax.tick_params(labelsize=16) plt.gca().set_aspect('equal', adjustable='box') cb.ax.set_xticklabels([700, '', '', '', 900,'','','',1100,'','','',1300,'','','',1500, '','','',1700,'']) # horizontal colorbar namestat = ['Haukeliseter']#,'Model']#, 'lower left', 'upper right']#,'grid point'] lonstat = [7+12.87/60]#,lonpp[grid_y[0],grid_x[0]]]#,7.05, 7.4]#,7.2] latstat= [59+48.73/60]#,latpp[grid_y[0],grid_x[0]]]#,59.65, 59.9]#,59.8] xpt, ypt= map(lonstat, latstat) map.plot(xpt,ypt,color='k', marker='X',markersize=12) namestat2 = ['Model (1041 m a.s.l.)']#, 'lower left', 'upper right']#,'grid point'] lonstat2 = [lonpp[grid_y[0],grid_x[0]]]#,7.05, 7.4]#,7.2] latstat2= [latpp[grid_y[0],grid_x[0]]]#,59.65, 59.9]#,59.8] xpt2, ypt2= map(lonstat2, latstat2) map.plot(xpt2,ypt2,'ko') for i in range(len(namestat)): plt.text(xpt2[i], ypt2[i], namestat2[i], fontsize=18,fontweight='bold', ha='center',va='bottom',color='black') fig_name = 'MEPS_elevation_Haukeli_wind.png' if savefig == 1: sF.save_figure_portrait(fig_dir,fig_name,form) print('saved: %s/%s' %(fig_dir,fig_name)) else: plt.show() plt.close() #### South Norway #### #plt.figure(1) fig = plt.figure(figsize=(9,8)) #plt.clf() map = Lambert_map(lllon=4., lllat=57.6, urlon=10.9, urlat=62.1, lat0= 63.5, lon0= 15, res='i', fill=True,zoom=True) Lonpp,Latpp = map(lonpp,latpp) #alti[alti<3] = np.nan #PlotColorMap4(Lonpp, Latpp, alti, map, bounds= [3, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1250, 1500,],color='red', label='Altitude') levels = np.arange(0,np.nanmax(alti),50) norm = colors.BoundaryNorm(boundaries = levels, ncolors=plt.cm.gist_earth.N) cs = map.contourf(Lonpp, Latpp, alti, levels, norm=norm, cmap = plt.cm.gist_earth, extend='max') cb = plt.colorbar(cs, boundaries= levels, #location='right', extend='max') cb.set_label('Altitude [m]', size=18) cb.ax.tick_params(labelsize=16) plt.gca().set_aspect('equal', adjustable='box') namestat = ['Haukeliseter'] lonstat = [7+12.87/60]#,7.2] latstat= [59+48.73/60]#,59.8] xpt, ypt= map(lonstat, latstat) map.plot(xpt,ypt,'ko') for i in range(len(namestat)): plt.text(xpt[i]+10000, ypt[i]+10000, namestat[i], fontsize=18,fontweight='bold', ha='center',va='bottom',color='black') fig_name = 'South_Norway_MEPS.png' if savefig == 1: sF.save_figure_portrait(fig_dir,fig_name,form) print('saved: %s/%s' %(fig_dir,fig_name)) else: plt.show() plt.close() lato = np.concatenate((latpp[0,:],latpp[-1,:]), axis = 0) lono = np.concatenate((lonpp[0,:], lonpp[-1,:]), axis = 0) lato2 = np.concatenate((latpp[:,0],latpp[:,-1]), axis = 0) lono2 = np.concatenate((lonpp[:,0], lonpp[:,-1]), axis = 0) ### Norway ### #plt.figure(1) fig = plt.figure(figsize=(9,8)) #map = Lambert_map(lllon=0., lllat=49., urlon=50., urlat=72, lat0= 63.5, lon0= 15, res='l', fill=True,zoom=False) map = Lambert_map(lllon=lonpp[0,:].min(), lllat=latpp[0,:].min(), urlon=lonpp[-1,:].max(), urlat=latpp[-1,:].max(), lat0= 63.5, lon0= 15, res='l', fill=True,zoom=False) Lonpp,Latpp = map(lonpp,latpp) #alti[alti<3] = np.nan #PlotContours(Lonpp, Latpp, alti, map, leveldist=None,levels=[0, 25, 50, 100, 200, 300, 400, 500, 600,1000,1500,2000], numbers=True, color= 'k') #PlotColorMap4(Lonpp, Latpp, alti, map, bounds= [3, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1250, 1500,],color='red', label='Altitude') levels = np.arange(0,np.nanmax(alti),50) norm = colors.BoundaryNorm(boundaries = levels, ncolors=plt.cm.gist_earth.N) cs = map.contourf(Lonpp, Latpp, alti, levels, norm=norm, cmap = plt.cm.gist_earth, extend='max') cb = plt.colorbar(cs, boundaries= levels, #location='right', extend='max') cb.set_label('Altitude [m]', size=18) cb.ax.tick_params(labelsize=16) plt.gca().set_aspect('equal', adjustable='box') namestat = ['Haukeliseter'] lonstat = [7+12.87/60]#,7.2] latstat= [59+48.73/60]#,59.8] xpt, ypt= map(lonstat, latstat) map.plot(xpt,ypt,'ko') for i in range(len(namestat)): plt.text(xpt[i]+100000, ypt[i]+100000, namestat[i], fontsize=18,fontweight='bold', ha='center',va='bottom',color='black') #plt.tight_layout(pad=2.5) fig_name = 'Norway_elevation_MEPS.png' if savefig == 1: sF.save_figure_portrait(fig_dir,fig_name,form) print('saved: %s/%s' %(fig_dir,fig_name)) else: plt.show() plt.close() gdal_data = gdal.Open('6600_1_10m_z33.tif') gt = gdal_data.GetGeoTransform() gdal_band = gdal_data.GetRasterBand(1) nodataval = gdal_band.GetNoDataValue() # convert to a numpy array data_array = gdal_data.ReadAsArray().astype(np.float) data_array # replace missing values if necessary if np.any(data_array == nodataval): data_array[data_array == nodataval] = np.nan gdal_data2 = gdal.Open('6600_2_10m_z33.tif') gt2 = gdal_data2.GetGeoTransform() gdal_band2 = gdal_data2.GetRasterBand(1) nodataval2 = gdal_band2.GetNoDataValue() # convert to a numpy array data_array2 = gdal_data2.ReadAsArray().astype(np.float) data_array2 # replace missing values if necessary if np.any(data_array2 == nodataval): data_array2[data_array2 == nodataval] = np.nan xres = gt[1] yres = gt[5] x = np.arange(gt[0], gt[0] + data_array.shape[1]*xres, xres) y = np.arange(gt[3], gt[3] + data_array.shape[0]*yres, yres) X, Y = np.meshgrid(x,y) xres2 = gt2[1] yres2 = gt2[5] x2 = np.arange(gt2[0], gt2[0] + data_array2.shape[1]*xres2, xres2) y2 = np.arange(gt2[3], gt2[3] + data_array2.shape[0]*yres2, yres2) X2, Y2 = np.meshgrid(x2,y2) inProj = Proj(init='EPSG:32633') # UTM coords, zone 33N, WGS84 datum outProj = Proj(init='EPSG:4326') # LatLon with WGS84 datum used by GPS units and Google Earth lon1,lat1 = X,Y LONO1,LATO1 = transform(inProj,outProj,lon1,lat1) # lon2,lat2 = X2,Y2 LONO2,LATO2 = transform(inProj,outProj,lon2,lat2) levels = np.arange(700,1800,50) #### Plot around Haukeliseter #### fig = plt.figure(figsize=(9,8)) plt.clf() #map = Lambert_map(lllon=lon2.min(), lllat=lat2.min(), # urlon=lon2.max(), urlat=lat2.max(), # lat0= 63.5, lon0= 15, res='i', fill=False,zoom=True) map= Lambert_map(lllon=7.05, lllat=59.73, urlon=7.35, urlat=59.91, lat0= 63.5, lon0= 15, res='i', fill=False,zoom=True) LON1,LAT1 = map(LONO1,LATO1) LON2,LAT2 = map(LONO2,LATO2) cmap = colors.ListedColormap([no0, no1, no2, no3, no4, no5, no6, no7, no8, no9, no10, \ no11, no12, no13, no14, no15, no16, no17, no18, no19, no20, \ no21]) norm = colors.BoundaryNorm(boundaries = levels, ncolors=cmap.N) PlotContours(LON1, LAT1, data_array, map, leveldist=None,levels=levels[::2], numbers=True, color= 'gray') PlotContours(LON2, LAT2, data_array2, map, leveldist=None,levels=levels[::2], numbers=True, color= 'gray') cs = map.contourf(LON1, LAT1, data_array, levels, norm=norm, cmap = cmap, extend='max') map.contourf(LON2, LAT2, data_array2, levels, norm=norm, cmap=cmap, extend='max') cb = plt.colorbar(cs, boundaries= levels, #location='right', extend='max') cb.set_label('Altitude [m]', size=18) cb.ax.tick_params(labelsize=16) plt.gca().set_aspect('equal', adjustable='box') cb.ax.set_xticklabels([700, '', '', '', 900,'','','',1100,'','','',1300,'','','',1500, '','','',1700,'']) # horizontal colorbar namestat = ['Haukeliseter (991 m a.s.l.)']#, 'lower left', 'upper right']#,'grid point'] lonstat = [7+12.87/60]#,7.05, 7.4]#,7.2] latstat= [59+48.73/60]#,59.65, 59.9]#,59.8] xpt, ypt= map(lonstat, latstat) map.plot(xpt,ypt,'ko') for i in range(len(namestat)): plt.text(xpt[i], ypt[i], namestat[i], fontsize=18,fontweight='bold', ha='center',va='bottom',color='black') fig_name = 'elevation_Haukeli.png' if savefig == 1: sF.save_figure_portrait(fig_dir,fig_name,form) print('saved: %s/%s' %(fig_dir,fig_name)) else: plt.show() plt.close() url = ('http://thredds.met.no/thredds/dodsC/meps25epsarchive/2016/12/24/meps_mbr0_pp_2_5km_20161224T12Z.nc') dataset = netCDF4.Dataset(url) land= dataset.variables['land_area_fraction'][:] lonpp= dataset.variables['longitude'][:] latpp= dataset.variables['latitude'][:] #T_2m= dataset.variables['air_temperature_2m'][:] alti= dataset.variables['altitude'][:] dataset.close() """ altitude map""" #plt.figure(1) fig = plt.figure(figsize=(9,8)) plt.clf() map= Lambert_map(lllon=7.05, lllat=59.73, urlon=7.35, urlat=59.91, lat0= 63.5, lon0= 15, res='i', fill=False,zoom=True) Lonpp,Latpp = map(lonpp,latpp) #levels = [850, 900,950,1000,1050,1100,1150, 1200, 1250, 1300,1350,1400] levels = np.arange(700,1800,50) PlotContours(Lonpp, Latpp, alti, map, leveldist=None,levels=levels, numbers=True, color= 'gray') cmap = colors.ListedColormap([no0, no1, no2, no3, no4, no5, no6, no7, no8, no9, no10, \ no11, no12, no13, no14, no15, no16, no17, no18, no19, no20, \ no21]) norm = colors.BoundaryNorm(boundaries = levels, ncolors=cmap.N) #PlotColorMap4(Lonpp, Latpp, alti, map, bounds= levels,color=cmap, label='Altitude') cs = map.contourf(Lonpp, Latpp, alti, levels, norm=norm, cmap = cmap, extend='max') cb = plt.colorbar(cs, boundaries= levels, #location='right', extend='max') cb.set_label('Altitude [m]', size=18) cb.ax.tick_params(labelsize=16) plt.gca().set_aspect('equal', adjustable='box') cb.ax.set_xticklabels([700, '', '', '', 900,'','','',1100,'','','',1300,'','','',1500, '','','',1700,'']) # horizontal colorbar namestat = ['Haukeliseter']#, 'lower left', 'upper right']#,'grid point'] lonstat = [7+12.87/60]#,7.05, 7.4]#,7.2] latstat= [59+48.73/60]#,59.65, 59.9]#,59.8] xpt, ypt= map(lonstat, latstat) map.plot(xpt,ypt,'ko') for i in range(len(namestat)): plt.text(xpt[i], ypt[i], namestat[i], fontsize=18,fontweight='bold', ha='center',va='bottom',color='black') fig_name = 'MEPS_elevation_Haukeli_2.png' if savefig == 1: sF.save_figure_portrait(fig_dir,fig_name,form) print('saved: %s/%s' %(fig_dir,fig_name)) else: plt.show() plt.close() ```
github_jupyter
``` # Visualization of the KO+ChIP Gold Standard from: # Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells" # TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load # NOTE: Default limits networks to TF-TF edges in top 1 TF / gene model (.93 quantile), to see the full # network hit "restore" (in the drop-down menu in cell below) and set threshold to 0 and hit "threshold" # You can search for gene names in the search box below the network (hit "Match"), and find regulators ("targeted by") # Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels # Change "SVG" to "canvas" to speed up layout operations # More info about jp_gene_viz and user interface instructions are available on Github: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb # directory containing gene expression data and network folder directory = "." # folder containing networks netPath = 'Networks' # network file name networkFile = 'ChIP_A17_KOall_ATh_bias25_TFmRNA_sp.tsv' # title for network figure netTitle = 'ChIP/ATAC(Th17)+KO+ATAC(Th), bias = 25_TFmRNA, TFA = TF mRNA' # name of gene expression file expressionFile = 'Th0_Th17_48hTh.txt' # column of gene expression file to color network nodes rnaSampleOfInt = 'Th17(48h)' # edge cutoff -- for Inferelator TRNs, corresponds to signed quantile (rank of edges in 15 TFs / gene models), # increase from 0 --> 1 to get more significant edges (e.g., .33 would correspond to edges only in 10 TFs / gene # models) edgeCutoff = .93 import sys if ".." not in sys.path: sys.path.append("..") from jp_gene_viz import dNetwork dNetwork.load_javascript_support() # from jp_gene_viz import multiple_network from jp_gene_viz import LExpression LExpression.load_javascript_support() # Load network linked to gene expression data L = LExpression.LinkedExpressionNetwork() L.show() # Load Network and Heatmap L.load_network(directory + '/' + netPath + '/' + networkFile) L.load_heatmap(directory + '/' + expressionFile) N = L.network N.set_title(netTitle) N.threshhold_slider.value = edgeCutoff N.apply_click(None) N.draw() # Add labels to nodes N.labels_button.value=True # Limit to TFs only, remove unconnected TFs, choose and set network layout N.restore_click() N.tf_only_click() N.connected_only_click() N.layout_dropdown.value = 'fruchterman_reingold' N.layout_click() # Interact with Heatmap # Limit genes in heatmap to network genes L.gene_click(None) # Z-score heatmap values L.expression.transform_dropdown.value = 'Z score' L.expression.apply_transform() # Choose a column in the heatmap (e.g., 48h Th17) to color nodes L.expression.col = rnaSampleOfInt L.condition_click(None) # Switch SVG layout to get line colors, then switch back to faster canvas mode N.force_svg(None) ```
github_jupyter
``` from round import lc import matplotlib.pyplot as pl import glob %matplotlib inline pl.rc('xtick', labelsize=20) pl.rc('ytick', labelsize=20) pl.rc('axes', labelsize=25) pl.rc('axes', titlesize=30) pl.rc('legend', handlelength=3) pl.rc('legend', fontsize=20) files = glob.glob("../light_curves/*.fits") i = 0 # 38, 62 i += 1 light_curve = lc.LightCurve.everest(files[i]) print(i) fig = pl.figure(figsize=(12, 5)) light_curve.plot_raw(fig.gca(), 'k.') light_curve.compute(mcmc=True, mcmc_draws=500, tune=500, target_accept=0.9, prior_sig=3.0, with_SHOTerm=False, cores=2) fig, axs = pl.subplots(2, 1, figsize=(15, 15)) #light_curve.plot_trend(axs[0], linewidth=3, color="#f55649", label="third order polynomial fit") light_curve.plot_raw(axs[0], 'k.') #fig.gca().plot(light_curve.raw_t[light_curve.masked], light_curve.raw_flux[light_curve.masked], # 'r.', label="masked outliers") pl.rc('xtick', labelsize=20) pl.rc('ytick', labelsize=20) pl.rc('axes', labelsize=35) pl.rc('axes', titlesize=35) pl.rc('legend', handlelength=3) pl.rc('legend', fontsize=20) #axs[0].set_title("Everest Light Curve for EPIC 220279363") axs[0].annotate("EPIC {0}".format(light_curve.ident), xy=(0.1, 0.85), xycoords="axes fraction", fontsize=30) axs[0].set_xlabel("Time (BJD - 2454833)") axs[0].set_ylabel("Flux") light_curve.plot_autocor(axs[1], "k", linewidth=3) axs[1].set_ylabel("ACF") axs[1].set_xlabel("Lag (days)") pl.savefig("/Users/tgordon/Desktop/everest_{0}.pdf".format(i)) pl.rc('xtick', labelsize=20) pl.rc('ytick', labelsize=20) pl.rc('axes', labelsize=25) pl.rc('axes', titlesize=30) pl.rc('legend', handlelength=3) pl.rc('legend', fontsize=20) fig = pl.figure(figsize=(20, 8)) ax = fig.gca() light_curve.plot_autocor(ax, "k", linewidth=3) #ax.set_title("Autocorrelation Function for EPIC 220279363") ax.set_ylabel("ACF") ax.set_xlabel("Lag (days)") pl.savefig("/Users/tgordon/Desktop/acf_63.png") fig = pl.figure(figsize=(20, 8)) ax = fig.gca() light_curve.plot(ax, 'k.', label="normalized everest flux") light_curve.plot_map_soln(ax, t=np.linspace(light_curve.t[0], light_curve.t[-1], 1000), linewidth=3, color="#f55649", label="GP prediction") ax.set_title("Maximum-likelihood GP Prediction", fontsize=20) ax.set_ylabel("Normalized Flux", fontsize=15) light_curve.plot_corner(smooth=True, truths=light_curve.mcmc_summary["mean"].values, truth_color="#f55649"); import pymc3 as pm import corner pl.rc('xtick', labelsize=12) pl.rc('ytick', labelsize=12) pl.rc('axes', labelsize=25) pl.rc('axes', titlesize=30) pl.rc('legend', handlelength=3) pl.rc('legend', fontsize=20) samples = pm.trace_to_dataframe(light_curve.trace, varnames=["logperiod", "logamp", "logs2"]) corn = corner.corner(samples, smooth=1, labels=[r"$\log(P_\mathrm{rot})$", r"$\log(S_0)$", r"$\log(\sigma)$"]) #pl.annotate("EPIC {0}".format(light_curve.ident), xy=(0.4, 0.95), xycoords="figure fraction", fontsize=30) pl.savefig("/Users/tgordon/Desktop/corner_{0}.pdf".format(i)) ```
github_jupyter
[this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/polyglot) # Visualizing the Johns Hopkins COVID-19 time series data **This is a work in progress.** It doesn't work yet in [Binder](https://mybinder.org/v2/gh/dotnet/interactive/master?urlpath=lab) because it relies on HTTP communication between the kernel and the Jupyter frontend. Also, due to travel restrictions, you should run this at home on isolated compute. *And don't forget to wash your hands.* Since Johns Hopkins has put COVID-19 time series data on [GitHub](https://github.com/CSSEGISandData/COVID-19), let's take a look at it. We can download it using PowerShell: ``` #!pwsh Invoke-WebRequest -Uri "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv" -OutFile "./Confirmed.csv" Invoke-WebRequest -Uri "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv" -OutFile "./Deaths.csv" Invoke-WebRequest -Uri "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv" -OutFile "./Recovered.csv" ``` It needs a little cleaning up: ``` using System.IO; using System.Text.RegularExpressions; Clean("Confirmed.csv"); Clean("Deaths.csv"); Clean("Recovered.csv"); void Clean(string filePath) { var raw = File.ReadAllText(filePath); var regex = new Regex("\\\"(.*?)\\\""); var cleaned = regex.Replace(raw, m => m.Value.Replace(",", " in ")); File.WriteAllText(filePath, cleaned); } "All cleaned up!" ``` Next, let's load it into a data frame. ``` #r "nuget:Microsoft.Data.Analysis,0.2.0" using Microsoft.Data.Analysis; var deaths = DataFrame.LoadCsv("./Deaths.csv"); var confirmed = DataFrame.LoadCsv("./Confirmed.csv"); var recovered = DataFrame.LoadCsv("./Recovered.csv"); var displayedValue = display("Processing data"); var offset = 4; var series = new List<object>(); for(var i = offset; i < deaths.Columns.Count; i++){ await Task.Delay(100); var date = deaths.Columns[i].Name; var deathFiltered = deaths[deaths.Columns[i].ElementwiseNotEquals(0)]; var confirmedFiltered = confirmed[confirmed.Columns[i].ElementwiseNotEquals(0)]; var recoveredFiltered = recovered[recovered.Columns[i].ElementwiseNotEquals(0)]; displayedValue.Update($"processing {date}"); series.Add(new { date = date, deathsSeries = new { latitude = deathFiltered["Lat"], longitude = deathFiltered["Long"], data = deathFiltered.Columns[i] }, confirmedSeries = new { latitude = confirmedFiltered["Lat"], longitude = confirmedFiltered["Long"], data = confirmedFiltered.Columns[i] }, recoveredSeries = new { latitude = recoveredFiltered["Lat"], longitude = recoveredFiltered["Long"], data = recoveredFiltered.Columns[i] } }); } displayedValue.Update("Ready."); ``` Because we've stored our data in top-level variables (`deathsSeries`, `confirmedSeries`, `recoveredSeries`, etc.) in the C# kernel, they're accessible from JavaScript by calling `interactive.csharp.getVariable`. The data will be returned as JSON and we can plot it using the library of our choice, pulled in using [RequireJS](https://requirejs.org/). We'll use [Plotly](https://plot.ly/). ``` #!js notebookScope.plot = function (plotTarget) { let loadPlotly = getJsLoader({ context: "COVID", paths: { plotly: "https://cdn.plot.ly/plotly-latest.min" } }); loadPlotly(["plotly"], (Plotly) => { if (typeof (notebookScope.updateInterval) !== 'undefined') { clearInterval(notebookScope.updateInterval); } let index = 0; if (typeof (document.getElementById(plotTarget)) !== 'undefined') { interactive.csharp.getVariable("series") .then(series => { var { deathsSeries, confirmedSeries, recoveredSeries, date } = series[index]; var recovered = { name: "Recovered", type: "scattergeo", mode: "markers", geo: "geo", lat: recoveredSeries.latitude, lon: recoveredSeries.longitude, text: recoveredSeries.data, marker: { symbol: "square", color: "Green" } }; var deaths = { name: "Fatal", type: "scattergeo", geo: "geo2", mode: "markers", lat: deathsSeries.latitude, lon: deathsSeries.longitude, text: deathsSeries.data, marker: { symbol: "circle", color: "Black" } }; var confirmed = { name: "Total confirmed", type: "scattergeo", geo: "geo3", mode: "markers", lat: confirmedSeries.latitude, lon: confirmedSeries.longitude, text: confirmedSeries.data, marker: { symbol: "diamond", color: "#DC7633" } }; var traces = [recovered, deaths, confirmed]; var layout = { title: "COVID-19 cases (" + date + ")", grid: { columns: 3, rows: 1 }, geo: { scope: "world", showland: true, showcountries: true, bgcolor: "rgb(90,90,90)", landcolor: "rgb(250,250,250)", domain: { row: 0, column: 0 } }, geo2: { scope: "world", showland: true, showcountries: true, bgcolor: "rgb(90,90,90)", landcolor: "rgb(250,250,250)", domain: { row: 0, column: 1 } }, geo3: { scope: "world", showland: true, showcountries: true, bgcolor: "rgb(90,90,90)", landcolor: "rgb(250,250,250)", domain: { row: 0, column: 2 } } }; if (typeof (document.getElementById(plotTarget)) !== 'undefined') { Plotly.newPlot(plotTarget, traces, layout); } let updateCovidPlot = () => { if (typeof (document.getElementById(plotTarget)) !== 'undefined') { index++; if (index === series.length) { clearInterval(notebookScope.updateInterval); return; } var { deathsSeries, confirmedSeries, recoveredSeries, currentSeries, date } = series[index]; Plotly.animate("plotlyChartCovid", { data: [ { lat: recoveredSeries.latitude, lon: recoveredSeries.longitude, text: recoveredSeries.data }, { lat: deathsSeries.latitude, lon: deathsSeries.longitude, text: deathsSeries.data }, { lat: confirmedSeries.latitude, lon: confirmedSeries.longitude, text: confirmedSeries.data }], layout: { title: "COVID-19 " + date } }); } } notebookScope.updateInterval = setInterval(() => updateCovidPlot(), 250); }); } }); }; ``` Notice the `setInterval` call near the end of the previous cell. This rechecks the data in the kernel and updates the plot. Back on the kernel, we can now update the data so that the kernel can see it. Yes, this is a contrived example, and we're planning to support true streaming data, but it's a start. ``` #!html <div id="plotlyChartCovid"></div> #!js notebookScope.plot("plotlyChartCovid"); #!about ```
github_jupyter
# Examples Below you will find various examples for you to experiment with HOG. For each image, you can modify the `cell_size`, `num_cells_per_block`, and `num_bins` (the number of angular bins in your histograms), to see how those parameters affect the resulting HOG descriptor. These examples, will help you get some intuition for what each parameter does and how they can be *tuned* to pick out the amount of detail required. Below is a list of the available images that you can load: * cat.jpeg * jeep1.jpeg * jeep2.jpeg * jeep3.jpeg * man.jpeg * pedestrian_bike.jpeg * roundabout.jpeg * scrabble.jpeg * shuttle.jpeg * triangle_tile.jpeg * watch.jpeg * woman.jpeg **NOTE**: If you are running this notebook in the Udacity workspace, there is around a 2 second lag in the interactive plot. This means that if you click in the image to zoom in, it will take about 2 seconds for the plot to refresh. ``` %matplotlib notebook import cv2 import copy import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches # Set the default figure size plt.rcParams['figure.figsize'] = [9.8, 9] # -------------------------- Select the Image and Specify the parameters for our HOG descriptor -------------------------- # Load the image image = cv2.imread('./images/jeep2.jpeg') # Cell Size in pixels (width, height). Must be smaller than the size of the detection window # and must be chosen so that the resulting Block Size is smaller than the detection window. cell_size = (8, 8) # Number of cells per block in each direction (x, y). Must be chosen so that the resulting # Block Size is smaller than the detection window num_cells_per_block = (2, 2) # Number of gradient orientation bins num_bins = 9 # ------------------------------------------------------------------------------------------------------------------------- # Convert the original image to RGB original_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert the original image to gray scale gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Block Size in pixels (width, height). Must be an integer multiple of Cell Size. # The Block Size must be smaller than the detection window block_size = (num_cells_per_block[0] * cell_size[0], num_cells_per_block[1] * cell_size[1]) # Calculate the number of cells that fit in our image in the x and y directions x_cells = gray_image.shape[1] // cell_size[0] y_cells = gray_image.shape[0] // cell_size[1] # Horizontal distance between blocks in units of Cell Size. Must be an integer and it must # be set such that (x_cells - num_cells_per_block[0]) / h_stride = integer. h_stride = 1 # Vertical distance between blocks in units of Cell Size. Must be an integer and it must # be set such that (y_cells - num_cells_per_block[1]) / v_stride = integer. v_stride = 1 # Block Stride in pixels (horizantal, vertical). Must be an integer multiple of Cell Size block_stride = (cell_size[0] * h_stride, cell_size[1] * v_stride) # Specify the size of the detection window (Region of Interest) in pixels (width, height). # It must be an integer multiple of Cell Size and it must cover the entire image. Because # the detection window must be an integer multiple of cell size, depending on the size of # your cells, the resulting detection window might be slightly smaller than the image. # This is perfectly ok. win_size = (x_cells * cell_size[0] , y_cells * cell_size[1]) # Print the shape of the gray scale image for reference print('\nThe gray scale image has shape: ', gray_image.shape) print() # Print the parameters of our HOG descriptor print('HOG Descriptor Parameters:\n') print('Window Size:', win_size) print('Cell Size:', cell_size) print('Block Size:', block_size) print('Block Stride:', block_stride) print('Number of Bins:', num_bins) print() # Set the parameters of the HOG descriptor using the variables defined above hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, num_bins) # Compute the HOG Descriptor for the gray scale image hog_descriptor = hog.compute(gray_image) # Calculate the total number of blocks along the width of the detection window tot_bx = np.uint32(((x_cells - num_cells_per_block[0]) / h_stride) + 1) # Calculate the total number of blocks along the height of the detection window tot_by = np.uint32(((y_cells - num_cells_per_block[1]) / v_stride) + 1) # Calculate the total number of elements in the feature vector tot_els = (tot_bx) * (tot_by) * num_cells_per_block[0] * num_cells_per_block[1] * num_bins # Reshape the feature vector to [blocks_y, blocks_x, num_cells_per_block_x, num_cells_per_block_y, num_bins]. # The blocks_x and blocks_y will be transposed so that the first index (blocks_y) referes to the row number # and the second index to the column number. This will be useful later when we plot the feature vector, so # that the feature vector indexing matches the image indexing. hog_descriptor_reshaped = hog_descriptor.reshape(tot_bx, tot_by, num_cells_per_block[0], num_cells_per_block[1], num_bins).transpose((1, 0, 2, 3, 4)) # Create an array that will hold the average gradients for each cell ave_grad = np.zeros((y_cells, x_cells, num_bins)) # Create an array that will count the number of histograms per cell hist_counter = np.zeros((y_cells, x_cells, 1)) # Add up all the histograms for each cell and count the number of histograms per cell for i in range (num_cells_per_block[0]): for j in range(num_cells_per_block[1]): ave_grad[i:tot_by + i, j:tot_bx + j] += hog_descriptor_reshaped[:, :, i, j, :] hist_counter[i:tot_by + i, j:tot_bx + j] += 1 # Calculate the average gradient for each cell ave_grad /= hist_counter # Calculate the total number of vectors we have in all the cells. len_vecs = ave_grad.shape[0] * ave_grad.shape[1] * ave_grad.shape[2] # Create an array that has num_bins equally spaced between 0 and 180 degress in radians. deg = np.linspace(0, np.pi, num_bins, endpoint = False) # Each cell will have a histogram with num_bins. For each cell, plot each bin as a vector (with its magnitude # equal to the height of the bin in the histogram, and its angle corresponding to the bin in the histogram). # To do this, create rank 1 arrays that will hold the (x,y)-coordinate of all the vectors in all the cells in the # image. Also, create the rank 1 arrays that will hold all the (U,V)-components of all the vectors in all the # cells in the image. Create the arrays that will hold all the vector positons and components. U = np.zeros((len_vecs)) V = np.zeros((len_vecs)) X = np.zeros((len_vecs)) Y = np.zeros((len_vecs)) # Set the counter to zero counter = 0 # Use the cosine and sine functions to calculate the vector components (U,V) from their maginitudes. Remember the # cosine and sine functions take angles in radians. Calculate the vector positions and magnitudes from the # average gradient array for i in range(ave_grad.shape[0]): for j in range(ave_grad.shape[1]): for k in range(ave_grad.shape[2]): U[counter] = ave_grad[i,j,k] * np.cos(deg[k]) V[counter] = ave_grad[i,j,k] * np.sin(deg[k]) X[counter] = (cell_size[0] / 2) + (cell_size[0] * i) Y[counter] = (cell_size[1] / 2) + (cell_size[1] * j) counter = counter + 1 # Create the bins in degress to plot our histogram. angle_axis = np.linspace(0, 180, num_bins, endpoint = False) angle_axis += ((angle_axis[1] - angle_axis[0]) / 2) # Create a figure with 4 subplots arranged in 2 x 2 fig, ((a,b),(c,d)) = plt.subplots(2,2) # Set the title of each subplot a.set(title = 'Gray Scale Image\n(Click to Zoom)') b.set(title = 'HOG Descriptor\n(Click to Zoom)') c.set(title = 'Zoom Window', xlim = (0, 18), ylim = (0, 18), autoscale_on = False) d.set(title = 'Histogram of Gradients') # Plot the gray scale image a.imshow(gray_image, cmap = 'gray') a.set_aspect(aspect = 1) # Plot the feature vector (HOG Descriptor) b.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 5) b.invert_yaxis() b.set_aspect(aspect = 1) b.set_facecolor('black') # Define function for interactive zoom def onpress(event): #Unless the left mouse button is pressed do nothing if event.button != 1: return # Only accept clicks for subplots a and b if event.inaxes in [a, b]: # Get mouse click coordinates x, y = event.xdata, event.ydata # Select the cell closest to the mouse click coordinates cell_num_x = np.uint32(x / cell_size[0]) cell_num_y = np.uint32(y / cell_size[1]) # Set the edge coordinates of the rectangle patch edgex = x - (x % cell_size[0]) edgey = y - (y % cell_size[1]) # Create a rectangle patch that matches the the cell selected above rect = patches.Rectangle((edgex, edgey), cell_size[0], cell_size[1], linewidth = 1, edgecolor = 'magenta', facecolor='none') # A single patch can only be used in a single plot. Create copies # of the patch to use in the other subplots rect2 = copy.copy(rect) rect3 = copy.copy(rect) # Update all subplots a.clear() a.set(title = 'Gray Scale Image\n(Click to Zoom)') a.imshow(gray_image, cmap = 'gray') a.set_aspect(aspect = 1) a.add_patch(rect) b.clear() b.set(title = 'HOG Descriptor\n(Click to Zoom)') b.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 5) b.invert_yaxis() b.set_aspect(aspect = 1) b.set_facecolor('black') b.add_patch(rect2) c.clear() c.set(title = 'Zoom Window') c.quiver(Y, X, U, V, color = 'white', headwidth = 0, headlength = 0, scale_units = 'inches', scale = 1) c.set_xlim(edgex - cell_size[0], edgex + (2 * cell_size[0])) c.set_ylim(edgey - cell_size[1], edgey + (2 * cell_size[1])) c.invert_yaxis() c.set_aspect(aspect = 1) c.set_facecolor('black') c.add_patch(rect3) d.clear() d.set(title = 'Histogram of Gradients') d.grid() d.set_xlim(0, 180) d.set_xticks(angle_axis) d.set_xlabel('Angle') d.bar(angle_axis, ave_grad[cell_num_y, cell_num_x, :], 180 // num_bins, align = 'center', alpha = 0.5, linewidth = 1.2, edgecolor = 'k') fig.canvas.draw() # Create a connection between the figure and the mouse click fig.canvas.mpl_connect('button_press_event', onpress) plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/ai-fast-track/icevision-gradio/blob/master/IceApp_pets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # IceVision Deployment App: PETS Dataset This example uses Faster RCNN trained weights using the [PETS dataset](https://airctic.github.io/icedata/pets/) About IceVision: - an Object-Detection Framework that connects to different libraries/frameworks such as Fastai, Pytorch Lightning, and Pytorch with more to come. - Features a Unified Data API with out-of-the-box support for common annotation formats (COCO, VOC, etc.) - Provides flexible model implementations with pluggable backbones ## Installing packages ``` !pip install icevision[inference] !pip install icedata !pip install gradio ``` ## Imports ``` from icevision.all import * import icedata import PIL, requests import torch from torchvision import transforms import gradio as gr ``` ## Loading trained model ``` class_map = icedata.pets.class_map() model = icedata.pets.trained_models.faster_rcnn_resnet50_fpn() ``` ## Defininig the predict() method ``` def predict( model, image, detection_threshold: float = 0.5, mask_threshold: float = 0.5 ): tfms_ = tfms.A.Adapter([tfms.A.Normalize()]) # Whenever you have images in memory (numpy arrays) you can use `Dataset.from_images` infer_ds = Dataset.from_images([image], tfms_) batch, samples = faster_rcnn.build_infer_batch(infer_ds) preds = faster_rcnn.predict( model=model, batch=batch, detection_threshold=detection_threshold ) return samples[0]["img"], preds[0] ``` ## Defining the `show_preds` method: called by `gr.Interface(fn=show_preds, ...)` ``` def show_preds(input_image, display_list, detection_threshold): display_label = ("Label" in display_list) display_bbox = ("BBox" in display_list) if detection_threshold==0: detection_threshold=0.5 img, pred = predict(model=model, image=input_image, detection_threshold=detection_threshold) # print(pred) img = draw_pred(img=img, pred=pred, class_map=class_map, denormalize_fn=denormalize_imagenet, display_label=display_label, display_bbox=display_bbox) img = PIL.Image.fromarray(img) # print("Output Image: ", img.size, type(img)) return img ``` ## Gradio User Interface ``` display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display") detection_threshold_slider = gr.inputs.Slider(minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold") outputs = gr.outputs.Image(type="pil") gr_interface = gr.Interface(fn=show_preds, inputs=["image", display_chkbox, detection_threshold_slider], outputs=outputs, title='IceApp - PETS') gr_interface.launch(inline=False, share=True, debug=True) ``` ## Enjoy! If you have any questions, please feel free to [join us](https://discord.gg/JDBeZYK)
github_jupyter
<div class="alert alert-info" role="alert"> This tutorial contains a lot of bokeh plots, which may take a little while to load and render. </div> ``Element``s are the basic building blocks for any HoloViews visualization. These are the objects that can be composed together using the various [Container](Containers.ipynb) types. Here in this overview, we show an example of how to build each of these ``Element``s directly out of Python or Numpy data structures. An even more powerful way to use them is by collecting similar ``Element``s into a HoloMap, as described in [Exploring Data](Exploring_Data.ipynb), so that you can explore, select, slice, and animate them flexibly, but here we focus on having small, self-contained examples. Complete reference material for each type can be accessed using our [documentation system](Introduction.ipynb#ParamDoc). This tutorial uses the default matplotlib plotting backend; see the [Bokeh Elements](Bokeh_Elements.ipynb) tutorial for the corresponding bokeh plots. ## Element types This class hierarchy shows each of the ``Element`` types. Each type is named for the default or expected way that the underlying data can be visualized. E.g., if your data is wrapped into a ``Surface`` object, it will display as a 3D surface by default, whereas the same data embedded in an ``Image`` object will display as a 2D raster image. But please note that the specification and implementation for each ``Element`` type does not actually include *any* such visualization -- the name merely serves as a semantic indication that you ordinarily think of the data as being laid out visually in that way. The actual plotting is done by a separate plotting subsystem, while the objects themselves focus on storing your data and the metadata needed to describe and use it. This separation of data and visualization is described in detail in the [Options tutorial](Options.ipynb), which describes all about how to find out the options available for each ``Element`` type and change them if necessary, from either Python or IPython Notebook. When using this tutorial interactively in an IPython/Jupyter notebook session, we suggest adding ``%output info=True`` after the call to ``notebook_extension`` below, which will pop up a detailed list and explanation of the available options for visualizing each ``Element`` type, after that notebook cell is executed. Then, to find out all the options for any of these ``Element`` types, just press ``<Shift-Enter>`` on the corresponding cell in the live notebook. The types available: <dl class="dl-horizontal"> <dt><a href="#Element"><code>Element</code></a></dt><dd>The base class of all <code>Elements</code>.</dd> </dl> ### <a id='ChartIndex'></a> <a href="#Chart Elements"><code>Charts:</code></a> <dl class="dl-horizontal"> <dt><a href="#Curve"><code>Curve</code></a></dt><dd>A continuous relation between a dependent and an independent variable. <font color='green'>&#x2713;</font></dd> <dt><a href="#ErrorBars"><code>ErrorBars</code></a></dt><dd>A collection of x-/y-coordinates with associated error magnitudes. <font color='green'>&#x2713;</font></dd> <dt><a href="#Spread"><code>Spread</code></a></dt><dd>Continuous version of ErrorBars. <font color='green'>&#x2713;</font></dd> <dt><a href="#Area"><code>Area</code></a></dt><dd>Area under the curve or between curves. <font color='green'>&#x2713;</font></dd> <dt><a href="#Bars"><code>Bars</code></a></dt><dd>Data collected and binned into categories. <font color='green'>&#x2713;</font></dd> <dt><a href="#Histogram"><code>Histogram</code></a></dt><dd>Data collected and binned in a continuous space using specified bin edges. <font color='green'>&#x2713;</font></dd> <dt><a href="#BoxWhisker"><code>BoxWhisker</code></a></dt><dd>Distributions of data varying by 0-N key dimensions.<font color='green'>&#x2713;</font></dd> <dt><a href="#Scatter"><code>Scatter</code></a></dt><dd>Discontinuous collection of points indexed over a single dimension. <font color='green'>&#x2713;</font></dd> <dt><a href="#Points"><code>Points</code></a></dt><dd>Discontinuous collection of points indexed over two dimensions. <font color='green'>&#x2713;</font></dd> <dt><a href="#VectorField"><code>VectorField</code></a></dt><dd>Cyclic variable (and optional auxiliary data) distributed over two-dimensional space. <font color='green'>&#x2713;</font></dd> <dt><a href="#Spikes"><code>Spikes</code></a></dt><dd>A collection of horizontal or vertical lines at various locations with fixed height (1D) or variable height (2D). <font color='green'>&#x2713;</font></dd> <dt><a href="#SideHistogram"><code>SideHistogram</code></a></dt><dd>Histogram binning data contained by some other <code>Element</code>. <font color='green'>&#x2713;</font></dd> </dl> ### <a id='Chart3DIndex'></a> <a href="#Chart3D Elements"><code>Chart3D Elements:</code></a> <dl class="dl-horizontal"> <dt><a href="#Surface"><code>Surface</code></a></dt><dd>Continuous collection of points in a three-dimensional space. <font color='red'>&#x2717;</font></dd> <dt><a href="#Scatter3D"><code>Scatter3D</code></a></dt><dd>Discontinuous collection of points in a three-dimensional space. <font color='red'>&#x2717;</font></dd> <dt><a href="#TriSurface"><code>TriSurface</code></a></dt><dd>Continuous but irregular collection of points interpolated into a Surface using Delaunay triangulation. <font color='red'>&#x2717;</font></dd> </dl> ### <a id='RasterIndex'></a> <a href="#Raster Elements"><code>Raster Elements:</code></a> <dl class="dl-horizontal"> <dt><a href="#Raster"><code>Raster</code></a></dt><dd>The base class of all rasters containing two-dimensional arrays. <font color='green'>&#x2713;</font></dd> <dt><a href="#QuadMesh"><code>QuadMesh</code></a></dt><dd>Raster type specifying 2D bins with two-dimensional array of values. <font color='green'>&#x2713;</font></dd> <dt><a href="#HeatMap"><code>HeatMap</code></a></dt><dd>Raster displaying sparse, discontinuous data collected in a two-dimensional space. <font color='green'>&#x2713;</font></dd> <dt><a href="#Image"><code>Image</code></a></dt><dd>Raster containing a two-dimensional array covering a continuous space (sliceable). <font color='green'>&#x2713;</font></dd> <dt><a href="#RGB"><code>RGB</code></a></dt><dd>Image with 3 (R,G,B) or 4 (R,G,B,Alpha) color channels. <font color='green'>&#x2713;</font></dd> <dt><a href="#HSV"><code>HSV</code></a></dt><dd>Image with 3 (Hue, Saturation, Value) or 4 channels. <font color='green'>&#x2713;</font></dd> </dl> ### <a id='TabularIndex'></a> <a href="#Tabular Elements"><code>Tabular Elements:</code></a> <dl class="dl-horizontal"> <dt><a href="#ItemTable"><code>ItemTable</code></a></dt><dd>Ordered collection of key-value pairs (ordered dictionary). <font color='green'>&#x2713;</font></dd> <dt><a href="#Table"><code>Table</code></a></dt><dd>Collection of arbitrary data with arbitrary key and value dimensions. <font color='green'>&#x2713;</font></dd> </dl> ### <a id='AnnotationIndex'></a> <a href="#Annotation Elements"><code>Annotations:</code></a> <dl class="dl-horizontal"> <dt><a href="#VLine"><code>VLine</code></a></dt><dd>Vertical line annotation. <font color='green'>&#x2713;</font></dd> <dt><a href="#HLine"><code>HLine</code></a></dt><dd>Horizontal line annotation. <font color='green'>&#x2713;</font></dd> <dt><a href="#Spline"><code>Spline</code></a></dt><dd>Bezier spline (arbitrary curves). <font color='green'>&#x2713;</font></dd> <dt><a href="#Text"><code>Text</code></a></dt><dd>Text annotation on an <code>Element</code>. <font color='green'>&#x2713;</font></dd> <dt><a href="#Arrow"><code>Arrow</code></a></dt><dd>Arrow on an <code>Element</code> with optional text label. <font color='red'>&#x2717;</font></dd> </dl> ### <a id='PathIndex'></a> <a href="#Path Elements"><code>Paths:</code></a> <dl class="dl-horizontal"> <dt><a href="#Path"><code>Path</code></a></dt><dd>Collection of paths. <font color='green'>&#x2713;</font></dd> <dt><a href="#Contours"><code>Contours</code></a></dt><dd>Collection of paths, each with an associated value. <font color='green'>&#x2713;</font></dd> <dt><a href="#Polygons"><code>Polygons</code></a></dt><dd>Collection of filled, closed paths with an associated value. <font color='green'>&#x2713;</font></dd> <dt><a href="#Bounds"><code>Bounds</code></a></dt><dd>Box specified by corner positions. <font color='green'>&#x2713;</font></dd> <dt><a href="#Box"><code>Box</code></a></dt><dd>Box specified by center position, radius, and aspect ratio. <font color='green'>&#x2713;</font></dd> <dt><a href="#Ellipse"><code>Ellipse</code></a></dt><dd>Ellipse specified by center position, radius, and aspect ratio. <font color='green'>&#x2713;</font></dd> </dl> ## ``Element`` <a id='Element'></a> **The basic or fundamental types of data that can be visualized.** ``Element`` is the base class for all the other HoloViews objects shown in this section. All ``Element`` objects accept ``data`` as the first argument to define the contents of that element. In addition to its implicit type, each element object has a ``group`` string defining its category, and a ``label`` naming this particular item, as described in the [Introduction](Introduction.ipynb#value). When rich display is off, or if no visualization has been defined for that type of ``Element``, the ``Element`` is presented with a default textual representation: ``` import holoviews as hv hv.notebook_extension(bokeh=True) hv.Element(None, group='Value', label='Label') ``` In addition, ``Element`` has key dimensions (``kdims``), value dimensions (``vdims``), and constant dimensions (``cdims``) to describe the semantics of indexing within the ``Element``, the semantics of the underlying data contained by the ``Element``, and any constant parameters associated with the object, respectively. Dimensions are described in the [Introduction](Introduction.ipynb). The remaining ``Element`` types each have a rich, graphical display as shown below. ## ``Chart`` Elements <a id='Chart Elements'></a> **Visualization of a dependent variable against an independent variable** The first large class of ``Elements`` is the ``Chart`` elements. These objects have at least one fully indexable, sliceable key dimension (typically the *x* axis in a plot), and usually have one or more value dimension(s) (often the *y* axis) that may or may not be indexable depending on the implementation. The key dimensions are normally the parameter settings for which things are measured, and the value dimensions are the data points recorded at those settings. As described in the [Columnar Data tutorial](Columnar_Data.ipynb), the data can be stored in several different internal formats, such as a NumPy array of shape (N, D), where N is the number of samples and D the number of dimensions. A somewhat larger list of formats can be accepted, including any of the supported internal formats, or 1. As a list of length N containing tuples of length D. 2. As a tuple of length D containing iterables of length N. ### ``Curve`` <a id='Curve'></a> ``` import numpy as np points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] hv.Curve(points) ``` A ``Curve`` is a set of values provided for some set of keys from a [continuously indexable 1D coordinate system](Continuous_Coordinates.ipynb), where the plotted values will be connected up because they are assumed to be samples from a continuous relation. ### ``ErrorBars`` <a id='ErrorBars'></a> ``` np.random.seed(7) points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] errors = [(0.1*i, np.sin(0.1*i), np.random.rand()/2) for i in np.linspace(0, 100, 11)] hv.Curve(points) * hv.ErrorBars(errors) ``` ``ErrorBars`` is a set of x-/y-coordinates with associated error values. Error values may be either symmetric or asymmetric, and thus can be supplied as an Nx3 or Nx4 array (or any of the alternative constructors Chart Elements allow). ``` %%opts ErrorBars points = [(0.1*i, np.sin(0.1*i)) for i in range(100)] errors = [(0.1*i, np.sin(0.1*i), np.random.rand()/2, np.random.rand()/4) for i in np.linspace(0, 100, 11)] hv.Curve(points) * hv.ErrorBars(errors, vdims=['y', 'yerrneg', 'yerrpos']) ``` ### ``Area`` <a id='Area'></a> ** *Area under the curve* ** By default the Area Element draws just the area under the curve, i.e. the region between the curve and the origin. ``` xs = np.linspace(0, np.pi*4, 40) hv.Area((xs, np.sin(xs))) ``` ** * Area between curves * ** When supplied a second value dimension the area is defined as the area between two curves. ``` X = np.linspace(0,3,200) Y = X**2 + 3 Y2 = np.exp(X) + 2 Y3 = np.cos(X) hv.Area((X, Y, Y2), vdims=['y', 'y2']) * hv.Area((X, Y, Y3), vdims=['y', 'y3']) ``` #### Stacked areas Areas are also useful to visualize multiple variables changing over time, but in order to be able to compare them the areas need to be stacked. Therefore the ``operation`` module provides the ``stack_area`` operation which makes it trivial to stack multiple Area in an (Nd)Overlay. In this example we will generate a set of 5 arrays representing percentages and create an Overlay of them. Then we simply call the ``stack_area`` operation on the Overlay to get a stacked area chart. ``` values = np.random.rand(5, 20) percentages = (values/values.sum(axis=0)).T*100 overlay = hv.Overlay([hv.Area(percentages[:, i], vdims=[hv.Dimension('value', unit='%')]) for i in range(5)]) overlay + hv.Area.stack(overlay) ``` ### ``Spread`` <a id='Spread'></a> ``Spread`` elements have the same data format as the ``ErrorBars`` element, namely x- and y-values with associated symmetric or asymmetric errors, but are interpreted as samples from a continuous distribution (just as ``Curve`` is the continuous version of ``Scatter``). These are often paired with an overlaid ``Curve`` to show both the mean (as a curve) and the spread of values; see the [Columnar Data tutorial](Columnar_Data.ipynb) for examples. ##### Symmetric ``` np.random.seed(42) xs = np.linspace(0, np.pi*2, 20) err = 0.2+np.random.rand(len(xs)) hv.Spread((xs, np.sin(xs), err)) ``` ##### Asymmetric ``` %%opts Spread (fill_color='indianred' fill_alpha=1) xs = np.linspace(0, np.pi*2, 20) hv.Spread((xs, np.sin(xs), 0.1+np.random.rand(len(xs)), 0.1+np.random.rand(len(xs))), vdims=['y', 'yerrneg', 'yerrpos']) ``` ### ``Bars`` <a id='Bars'></a> ``` data = [('one',8),('two', 10), ('three', 16), ('four', 8), ('five', 4), ('six', 1)] bars = hv.Bars(data, kdims=[hv.Dimension('Car occupants', values='initial')], vdims=['Count']) bars + bars[['one', 'two', 'three']] ``` ``Bars`` is an ``NdElement`` type, so by default it is sorted. To preserve the initial ordering specify the ``Dimension`` with values set to 'initial', or you can supply an explicit list of valid dimension keys. ``Bars`` support up to two key dimensions which can be laid by ``'group'`` and ``'stack'`` dimensions. By default the key dimensions are mapped onto the first, second ``Dimension`` of the ``Bars`` object, but this behavior can be overridden via the ``group_index`` and ``stack_index`` options. ``` %%opts Bars [group_index=0 stack_index=1] from itertools import product np.random.seed(3) groups, stacks = ['A', 'B'], ['a', 'b'] keys = product(groups, stacks) hv.Bars([k+(np.random.rand()*100.,) for k in keys], kdims=['Group', 'Stack'], vdims=['Count']) ``` ### ``BoxWhisker`` <a id='BoxWhisker'></a> The ``BoxWhisker`` Element allows representing distributions of data varying by 0-N key dimensions. To represent the distribution of a single variable, we can create a BoxWhisker Element with no key dimensions and a single value dimension: ``` hv.BoxWhisker(np.random.randn(200), kdims=[], vdims=['Value']) ``` BoxWhisker Elements support any number of dimensions and may also be rotated. To style the boxes and whiskers, supply ``boxprops``, ``whiskerprops``, and ``flierprops``. ``` %%opts BoxWhisker [invert_axes=True width=600] groups = [chr(65+g) for g in np.random.randint(0, 3, 200)] hv.BoxWhisker((groups, np.random.randint(0, 5, 200), np.random.randn(200)), kdims=['Group', 'Category'], vdims=['Value']).sort() ``` ### ``Histogram`` <a id='Histogram'></a> ``` np.random.seed(1) data = [np.random.normal() for i in range(10000)] frequencies, edges = np.histogram(data, 20) hv.Histogram(frequencies, edges) ``` ``Histogram``s partition the `x` axis into discrete (but not necessarily regular) bins, showing counts in each as a bar. Almost all Element types, including ``Histogram``, may be projected onto a polar axis by supplying ``projection='polar'`` as a plot option. ``` %%opts Histogram [projection='polar' show_grid=True] data = [np.random.rand()*np.pi*2 for i in range(100)] frequencies, edges = np.histogram(data, 20) hv.Histogram(frequencies, edges, kdims=['Angle']) ``` ### ``Scatter`` <a id='Scatter'></a> ``` %%opts Scatter (color='k', marker='s', s=10) np.random.seed(42) points = [(i, np.random.random()) for i in range(20)] hv.Scatter(points) + hv.Scatter(points)[12:20] ``` Scatter is the discrete equivalent of Curve, showing *y* values for discrete *x* values selected. See [``Points``](#Points) for more information. The marker shape specified above can be any supported by [matplotlib](http://matplotlib.org/api/markers_api.html), e.g. ``s``, ``d``, or ``o``; the other options select the color and size of the marker. For convenience with the [bokeh backend](Bokeh_Backend), the matplotlib marker options are supported using a compatibility function in HoloViews. ### ``Points`` <a id='Points'></a> ``` np.random.seed(12) points = np.random.rand(50,2) hv.Points(points) + hv.Points(points)[0.6:0.8,0.2:0.5] ``` As you can see, ``Points`` is very similar to ``Scatter``, and can produce some plots that look identical. However, the two ``Element``s are very different semantically. For ``Scatter``, the dots each show a dependent variable *y* for some *x*, such as in the ``Scatter`` example above where we selected regularly spaced values of *x* and then created a random number as the corresponding *y*. I.e., for ``Scatter``, the *y* values are the data; the *x*s are just where the data values are located. For ``Points``, both *x* and *y* are independent variables, known as ``key_dimensions`` in HoloViews: ``` for o in [hv.Points(points,name="Points "), hv.Scatter(points,name="Scatter")]: for d in ['key','value']: print("%s %s_dimensions: %s " % (o.name, d, o.dimensions(d,label=True))) ``` The ``Scatter`` object expresses a dependent relationship between *x* and *y*, making it useful for combining with other similar ``Chart`` types, while the ``Points`` object expresses the relationship of two independent keys *x* and *y* with optional ``vdims`` (zero in this case), which makes ``Points`` objects meaningful to combine with the ``Raster`` types below. Of course, the ``vdims`` need not be empty for ``Points``; here is an example with two additional quantities for each point, as ``value_dimension``s *z* and &alpha; visualized as the color and size of the dots, respectively: ``` %%opts Points [color_index=2 size_index=3 scaling_factor=50] np.random.seed(10) data = np.random.rand(100,4) points = hv.Points(data, vdims=['z', 'alpha']) points + points[0.3:0.7, 0.3:0.7].hist() ``` Such a plot wouldn't be meaningful for ``Scatter``, but is a valid use for ``Points``, where the *x* and *y* locations are independent variables representing coordinates, and the "data" is conveyed by the size and color of the dots. ### ``Spikes`` <a id='Spikes'></a> Spikes represent any number of horizontal or vertical line segments with fixed or variable heights. There are a number of disparate uses for this type. First of all, they may be used as a rugplot to give an overview of a one-dimensional distribution. They may also be useful in more domain-specific cases, such as visualizing spike trains for neurophysiology or spectrograms in physics and chemistry applications. In the simplest case, a Spikes object represents coordinates in a 1D distribution: ``` %%opts Spikes (line_alpha=0.4) [spike_length=0.1] xs = np.random.rand(50) ys = np.random.rand(50) hv.Points((xs, ys)) * hv.Spikes(xs) ``` When supplying two dimensions to the Spikes object, the second dimension will be mapped onto the line height. Optionally, you may also supply a cmap and color_index to map color onto one of the dimensions. This way we can, for example, plot a mass spectrogram: ``` %%opts Spikes (cmap='Reds') hv.Spikes(np.random.rand(20, 2), kdims=['Mass'], vdims=['Intensity']) ``` Another possibility is to draw a number of spike trains as you would encounter in neuroscience. Here we generate 10 separate random spike trains and distribute them evenly across the space by setting their ``position``. By also declaring some ``yticks``, each spike train can be labeled individually: ``` %%opts Spikes [spike_length=0.1] NdOverlay [show_legend=False] hv.NdOverlay({i: hv.Spikes(np.random.randint(0, 100, 10), kdims=['Time']).opts(plot=dict(position=0.1*i)) for i in range(10)}).opts(plot=dict(yticks=[((i+1)*0.1-0.05, i) for i in range(10)])) ``` Finally, we may use ``Spikes`` to visualize marginal distributions as adjoined plots using the ``<<`` adjoin operator: ``` %%opts Spikes (line_alpha=0.2) points = hv.Points(np.random.randn(500, 2)) points << hv.Spikes(points['y']) << hv.Spikes(points['x']) ``` ### ``VectorField`` <a id='VectorField'></a> ``` %%opts VectorField [size_index=3] x,y = np.mgrid[-10:10,-10:10] * 0.25 sine_rings = np.sin(x**2+y**2)*np.pi+np.pi exp_falloff = 1/np.exp((x**2+y**2)/8) vector_data = (x,y,sine_rings, exp_falloff) hv.VectorField(vector_data) ``` As you can see above, here the *x* and *y* positions are chosen to make a regular grid. The arrow angles follow a sinsoidal ring pattern, and the arrow lengths fall off exponentially from the center, so this plot has four dimensions of data (direction and length for each *x,y* position). Using the IPython ``%%opts`` cell-magic (described in the [Options tutorial](Options), along with the Python equivalent), we can also use color as a redundant indicator to the direction or magnitude: ``` %%opts VectorField [size_index=3] VectorField.A [color_index=2] VectorField.M [color_index=3] hv.VectorField(vector_data, group='A') + hv.VectorField(vector_data, group='M') ``` ### ``SideHistogram`` <a id='SideHistogram'></a> The ``.hist`` method conveniently adjoins a histogram to the side of any ``Chart``, ``Surface``, or ``Raster`` component, as well as many of the container types (though it would be reporting data from one of these underlying ``Element`` types). For a ``Raster`` using color or grayscale to show values (see ``Raster`` section below), the side histogram doubles as a color bar or key. ``` import numpy as np np.random.seed(42) points = [(i, np.random.normal()) for i in range(800)] hv.Scatter(points).hist() ``` ## ``Chart3D`` Elements <a id='Chart3D Elements'></a> ### ``Surface`` <a id='Surface'></a> ``` %%opts Surface (cmap='jet' rstride=20, cstride=2) hv.Surface(np.sin(np.linspace(0,100*np.pi*2,10000)).reshape(100,100)) ``` Surface is used for a set of gridded points whose associated value dimension represents samples from a continuous surface; it is the equivalent of a ``Curve`` but with two key dimensions instead of just one. ### ``Scatter3D`` <a id='Scatter3D'></a> ``` %%opts Scatter3D [azimuth=40 elevation=20] x,y = np.mgrid[-5:5, -5:5] * 0.1 heights = np.sin(x**2+y**2) hv.Scatter3D(zip(x.flat,y.flat,heights.flat)) ``` ``Scatter3D`` is the equivalent of ``Scatter`` but for two key dimensions, rather than just one. ### ``TriSurface`` <a id='TriSurface'></a> The ``TriSurface`` Element renders any collection of 3D points as a Surface by applying Delaunay triangulation. It thus supports arbitrary, non-gridded data, but it does not support indexing to find data values, since finding the closest ones would require a search. ``` %%opts TriSurface [fig_size=200] (cmap='hot_r') hv.TriSurface((x.flat,y.flat,heights.flat)) ``` ## ``Raster`` Elements <a id='Raster Elements'></a> **A collection of raster image types** The second large class of ``Elements`` is the raster elements. Like ``Points`` and unlike the other ``Chart`` elements, ``Raster Elements`` live in a 2D key-dimensions space. For the ``Image``, ``RGB``, and ``HSV`` elements, the coordinates of this two-dimensional key space are defined in a [continuously indexable coordinate system](Continuous_Coordinates.ipynb). ### ``Raster`` <a id='Raster'></a> A ``Raster`` is the base class for image-like ``Elements``, but may be used directly to visualize 2D arrays using a color map. The coordinate system of a ``Raster`` is the raw indexes of the underlying array, with integer values always starting from (0,0) in the top left, with default extents corresponding to the shape of the array. The ``Image`` subclass visualizes similarly, but using a continuous Cartesian coordinate system suitable for an array that represents some underlying continuous region. ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 hv.Raster(np.sin(x**2+y**2)) ``` ### ``QuadMesh`` <a id='QuadMesh'></a> The basic ``QuadMesh`` is a 2D grid of bins specified as x-/y-values specifying a regular sampling or edges, with arbitrary sampling and an associated 2D array containing the bin values. The coordinate system of a ``QuadMesh`` is defined by the bin edges, therefore any index falling into a binned region will return the appropriate value. Unlike ``Image`` objects, slices must be inclusive of the bin edges. ``` n = 21 xs = np.logspace(1, 3, n) ys = np.linspace(1, 10, n) hv.QuadMesh((xs, ys, np.random.rand(n-1, n-1))) ``` QuadMesh may also be used to represent an arbitrary mesh of quadrilaterals by supplying three separate 2D arrays representing the coordinates of each quadrilateral in a 2D space. Note that when using ``QuadMesh`` in this mode, slicing and indexing semantics and most operations will currently not work. ``` coords = np.linspace(-1.5,1.5,n) X,Y = np.meshgrid(coords, coords); Qx = np.cos(Y) - np.cos(X) Qz = np.sin(Y) + np.sin(X) Z = np.sqrt(X**2 + Y**2) hv.QuadMesh((Qx, Qz, Z)) ``` ### ``HeatMap`` <a id='HeatMap'></a> A ``HeatMap`` displays like a typical raster image, but the input is a dictionary indexed with two-dimensional keys, not a Numpy array or Pandas dataframe. As many rows and columns as required will be created to display the values in an appropriate grid format. Values unspecified are left blank, and the keys can be any Python datatype (not necessarily numeric). One typical usage is to show values from a set of experiments, such as a parameter space exploration, and many other such visualizations are shown in the [Containers](Containers.ipynb) and [Exploring Data](Exploring_Data.ipynb) tutorials. Each value in a ``HeatMap`` is labeled explicitly by default, and so this component is not meant for very large numbers of samples. With the default color map, high values (in the upper half of the range present) are colored orange and red, while low values (in the lower half of the range present) are colored shades of blue. ``` data = {(chr(65+i),chr(97+j)): i*j for i in range(5) for j in range(5) if i!=j} hv.HeatMap(data).sort() ``` ### ``Image`` <a id='Image'></a> Like ``Raster``, a HoloViews ``Image`` allows you to view 2D arrays using an arbitrary color map. Unlike ``Raster``, an ``Image`` is associated with a [2D coordinate system in continuous space](Continuous_Coordinates.ipynb), which is appropriate for values sampled from some underlying continuous distribution (as in a photograph or other measurements from locations in real space). Slicing, sampling, etc. on an ``Image`` all use this continuous space, whereas the corresponding operations on a ``Raster`` work on the raw array coordinates. ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 bounds=(-1,-1,1,1) # Coordinate system: (left, bottom, top, right) (hv.Image(np.sin(x**2+y**2), bounds=bounds) + hv.Image(np.sin(x**2+y**2), bounds=bounds)[-0.5:0.5, -0.5:0.5]) ``` Notice how, because our declared coordinate system is continuous, we can slice with any floating-point value we choose. The appropriate range of the samples in the input numpy array will always be displayed, whether or not there are samples at those specific floating-point values. It is also worth noting that the name ``Image`` can clash with other common libraries, which is one reason to avoid unqualified imports like ``from holoviews import *``. For instance, the Python Imaging Libray provides an ``Image`` module, and IPython itself supplies an ``Image`` class in ``IPython.display``. Python namespaces allow you to avoid such problems, e.g. using ``from PIL import Image as PILImage`` or using ``import holoviews as hv`` and then ``hv.Image()``, as we do in these tutorials. ### ``RGB`` <a id='RGB'></a> The ``RGB`` element is an ``Image`` that supports red, green, blue channels: ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 r = 0.5*np.sin(np.pi +3*x**2+y**2)+0.5 g = 0.5*np.sin(x**2+2*y**2)+0.5 b = 0.5*np.sin(np.pi/2+x**2+y**2)+0.5 hv.RGB(np.dstack([r,g,b])) ``` You can see how the RGB object is created from the original channels: ``` %%opts Image (cmap='gray') hv.Image(r,label="R") + hv.Image(g,label="G") + hv.Image(b,label="B") ``` ``RGB`` also supports an optional alpha channel, which will be used as a mask revealing or hiding any ``Element``s it is overlaid on top of: ``` %%opts Image (cmap='gray') mask = 0.5*np.sin(0.2*(x**2+y**2))+0.5 rgba = hv.RGB(np.dstack([r,g,b,mask])) bg = hv.Image(0.5*np.cos(x*3)+0.5, label="Background") * hv.VLine(x=0,label="Background") overlay = bg*rgba overlay.label="RGBA Overlay" bg + hv.Image(mask,label="Mask") + overlay ``` ### ``HSV`` <a id='HSV'></a> HoloViews makes it trivial to work in any color space that can be converted to ``RGB`` by making a simple subclass of ``RGB`` as appropriate. For instance, we also provide the HSV (hue, saturation, value) color space, which is useful for plotting cyclic data (as the Hue) along with two additional dimensions (controlling the saturation and value of the color, respectively): ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 h = 0.5 + np.sin(0.2*(x**2+y**2)) / 2.0 s = 0.5*np.cos(y*3)+0.5 v = 0.5*np.cos(x*3)+0.5 hsv = hv.HSV(np.dstack([h, s, v])) hsv ``` You can see how this is created from the original channels: ``` %%opts Image (cmap='gray') hv.Image(h, label="H") + hv.Image(s, label="S") + hv.Image(v, label="V") ``` # ``Tabular`` Elements <a id='Tabular Elements'></a> **General data structures for holding arbitrary information** ## ``ItemTable`` <a id='ItemTable'></a> An ``ItemTable`` is an ordered collection of key, value pairs. It can be used to directly visualize items in a tabular format where the items may be supplied as an ``OrderedDict`` or a list of (key,value) pairs. A standard Python dictionary can be easily visualized using a call to the ``.items()`` method, though the entries in such a dictionary are not kept in any particular order, and so you may wish to sort them before display. One typical usage for an ``ItemTable`` is to list parameter values or measurements associated with an adjacent ``Element``. ``` hv.ItemTable([('Age', 10), ('Weight',15), ('Height','0.8 meters')]) ``` ## ``Table`` <a id='Table'></a> A table is more general than an ``ItemTable``, as it allows multi-dimensional keys and multidimensional values. ``` keys = [('M',10), ('M',16), ('F',12)] values = [(15, 0.8), (18, 0.6), (10, 0.8)] table = hv.Table(zip(keys,values), kdims = ['Gender', 'Age'], vdims=['Weight', 'Height']) table ``` Note that you can use select using tables, and once you select using a full, multidimensional key, you get an ``ItemTable`` (shown on the right): ``` table.select(Gender='M') + table.select(Gender='M', Age=10) ``` The ``Table`` is used as a common data structure that may be converted to any other HoloViews data structure using the ``TableConversion`` class. The functionality of the ``TableConversion`` class may be conveniently accessed using the ``.to`` property. For more extended usage of table conversion see the [Columnar Data](Columnnar_Data.ipynb) and [Pandas Conversion](Pandas_Conversion.ipynb) Tutorials. ``` table.select(Gender='M').to.curve(kdims=["Age"], vdims=["Weight"]) ``` # ``Annotation`` Elements <a id='Annotation Elements'></a> **Useful information that can be overlaid onto other components** Annotations are components designed to be overlaid on top of other ``Element`` objects. To demonstrate annotation and paths, we will be drawing many of our elements on top of an RGB Image: ``` scene = hv.RGB.load_image('../assets/penguins.png') ``` ### ``VLine`` and ``HLine`` <a id='VLine'></a><a id='HLine'></a> ``` scene * hv.VLine(-0.05) + scene * hv.HLine(-0.05) ``` ### ``Spline`` <a id='Spline'></a> The ``Spline`` annotation is used to draw Bezier splines using the same semantics as [matplotlib splines](http://matplotlib.org/api/path_api.html). In the overlay below, the spline is in dark blue and the control points are in light blue. ``` points = [(-0.3, -0.3), (0,0), (0.25, -0.25), (0.3, 0.3)] codes = [1,4,4,4] scene * hv.Spline((points,codes)) * hv.Curve(points) ``` ### Text and Arrow <a id='Text'></a><a id='Arrow'></a> ``` scene * hv.Text(0, 0.2, 'Adult\npenguins') + scene * hv.Arrow(0,-0.1, 'Baby penguin', 'v') ``` # Paths <a id='Path Elements'></a> **Line-based components that can be overlaid onto other components** Paths are a subclass of annotations that involve drawing line-based components on top of other elements. Internally, Path Element types hold a list of Nx2 arrays, specifying the x/y-coordinates along each path. The data may be supplied in a number of ways, including: 1. A list of Nx2 numpy arrays. 2. A list of lists containing x/y coordinate tuples. 3. A tuple containing an array of length N with the x-values and a second array of shape NxP, where P is the number of paths. 4. A list of tuples each containing separate x and y values. ## ``Path`` <a id='Path'></a> A ``Path`` object is actually a collection of paths which can be arbitrarily specified. Although there may be multiple unconnected paths in a single ``Path`` object, they will all share the same style. Only by overlaying multiple ``Path`` objects do you iterate through the defined color cycle (or any other style options that have been defined). ``` angle = np.linspace(0, 2*np.pi, 100) baby = list(zip(0.15*np.sin(angle), 0.2*np.cos(angle)-0.2)) adultR = [(0.25, 0.45), (0.35,0.35), (0.25, 0.25), (0.15, 0.35), (0.25, 0.45)] adultL = [(-0.3, 0.4), (-0.3, 0.3), (-0.2, 0.3), (-0.2, 0.4),(-0.3, 0.4)] scene * hv.Path([adultL, adultR, baby]) * hv.Path([baby]) ``` ## ``Contours`` <a id='Contours'></a> A ``Contours`` object is similar to ``Path`` object except each of the path elements is associated with a numeric value, called the ``level``. Sadly, our penguins are too complicated to give a simple example so instead we will simply mark the first couple of rings of our earlier ring pattern: ``` x,y = np.mgrid[-50:51, -50:51] * 0.1 def circle(radius, x=0, y=0): angles = np.linspace(0, 2*np.pi, 100) return np.array( list(zip(x+radius*np.sin(angles), y+radius*np.cos(angles)))) hv.Image(np.sin(x**2+y**2)) * hv.Contours([circle(0.22)], level=0) * hv.Contours([circle(0.33)], level=1) ``` ## ``Polygons`` <a id='Polygons'></a> A ``Polygons`` object is similar to a ``Contours`` object except that each supplied path is closed and filled. Just like ``Contours``, optionally a ``level`` may be supplied; the Polygons will then be colored according to the supplied ``cmap``. Non-finite values such as ``np.NaN`` or ``np.inf`` will default to the supplied ``facecolor``. Polygons with values can be used to build heatmaps with arbitrary shapes. ``` %%opts Polygons (cmap='hot' line_color='black' line_width=2) np.random.seed(35) hv.Polygons([np.random.rand(4,2)], level=0.5) *\ hv.Polygons([np.random.rand(4,2)], level=1.0) *\ hv.Polygons([np.random.rand(4,2)], level=1.5) *\ hv.Polygons([np.random.rand(4,2)], level=2.0) ``` Polygons without a value are useful as annotation, but also allow us to draw arbitrary shapes. ``` def rectangle(x=0, y=0, width=1, height=1): return np.array([(x,y), (x+width, y), (x+width, y+height), (x, y+height)]) (hv.Polygons([rectangle(width=2), rectangle(x=6, width=2)]).opts(style={'fill_color': '#a50d0d'}) * hv.Polygons([rectangle(x=2, height=2), rectangle(x=5, height=2)]).opts(style={'fill_color': '#ffcc00'}) * hv.Polygons([rectangle(x=3, height=2, width=2)]).opts(style={'fill_color': 'cyan'})) ``` ## ``Bounds`` <a id='Bounds'></a> A bounds is a rectangular area specified as a tuple in ``(left, bottom, right, top)`` format. It is useful for denoting a region of interest defined by some bounds, whereas ``Box`` (below) is useful for drawing a box at a specific location. ``` scene * hv.Bounds(0.2) * hv.Bounds((0.2, 0.2, 0.45, 0.45,)) ``` ## ``Box`` <a id='Box'></a> and ``Ellipse`` <a id='Ellipse'></a> A ``Box`` is similar to a ``Bounds`` except you specify the box position, width, and aspect ratio instead of the coordinates of the box corners. An ``Ellipse`` is specified just as for ``Box``, but has a rounded shape. ``` scene * hv.Box( -0.25, 0.3, 0.3, aspect=0.5) * hv.Box( 0, -0.2, 0.1) + \ scene * hv.Ellipse(-0.25, 0.3, 0.3, aspect=0.5) * hv.Ellipse(0, -0.2, 0.1) ```
github_jupyter
``` import numpy as np import pandas as pd size = 300 X = np.random.rand(size)*5-2.5 w4, w3, w2, w1, w0 = 1, 2, 1, -4, 2 y = w4*(X**4) + w3*(X**3) + w2*(X**2) + w1*X + w0 + np.random.randn(size)*8-4 df = pd.DataFrame({'x': X, 'y': y}) df.to_csv('dane_do_regresji.csv',index=None) df.plot.scatter(x='x',y='y') X = X.reshape(-1, 1) y = y.reshape(-1, 1) # Podziel ww. zbiór na zbiory: uczący oraz testujący w proporcji 80:20 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) ###REGRESJA #LINIOWA from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt lin_reg = LinearRegression() lin_reg.fit(X_train, y_train) y_train_pred_lin_reg = lin_reg.predict(X_train) y_test_pred_lin_reg = lin_reg.predict(X_test) wynik = pd.DataFrame({'x': X_train[:, 0], 'y': y_train_pred_lin_reg[:, 0]}) wynik.plot(x='x',y='y') #KNN, dla 𝑘 = 3 import sklearn.neighbors knn_3_reg = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3) knn_3_reg.fit(X_train, y_train) y_train_pred_knn_3 = knn_3_reg.predict(X_train) y_test_pred_knn_3 = knn_3_reg.predict(X_test) wynik = pd.DataFrame({'x': X_test[:, 0], 'y': y_test_pred_knn_3[:, 0]}) wynik.plot.scatter(x='x',y='y') #KNN, dla 𝑘 = 5 import sklearn.neighbors knn_5_reg = sklearn.neighbors.KNeighborsRegressor(n_neighbors=5) knn_5_reg.fit(X_train, y_train) y_train_pred_knn_5 = knn_5_reg.predict(X_train) y_test_pred_knn_5 = knn_5_reg.predict(X_test) wynik = pd.DataFrame({'x': X_test[:, 0], 'y': y_test_pred_knn_5[:, 0]}) wynik.plot.scatter(x='x',y='y') # Wielomianowy 2 rzędu: from sklearn.preprocessing import PolynomialFeatures poly_feature_2 = PolynomialFeatures(degree=2, include_bias=False) #uczace X_2_poly_train = poly_feature_2.fit_transform(X_train) #testowe X_2_poly_test = poly_feature_2.fit_transform(X_test) poly_2_reg = LinearRegression() poly_2_reg.fit(X_2_poly_train, y_train) #predykcja uczacych y_train_pred_poly_2 = poly_2_reg.predict(X_2_poly_train) #predykcja testowych y_test_pred_poly_2 = poly_2_reg.predict(X_2_poly_test) wynik = pd.DataFrame({'x': X_test[:, 0], 'y': y_test_pred_poly_2[:, 0]}) wynik.plot.scatter(x='x',y='y') # Wielomianowy 3 rzędu: from sklearn.preprocessing import PolynomialFeatures poly_feature_3 = PolynomialFeatures(degree=3, include_bias=False) #uczace X_3_poly_train = poly_feature_3.fit_transform(X_train) #testowe X_3_poly_test = poly_feature_3.fit_transform(X_test) poly_3_reg = LinearRegression() poly_3_reg.fit(X_3_poly_train, y_train) #predykcja uczacych y_train_pred_poly_3 = poly_3_reg.predict(X_3_poly_train) #predykcja testowych y_test_pred_poly_3 = poly_3_reg.predict(X_3_poly_test) wynik = pd.DataFrame({'x': X_test[:, 0], 'y': y_test_pred_poly_3[:, 0]}) wynik.plot.scatter(x='x',y='y') # Wielomianowy 4 rzędu: from sklearn.preprocessing import PolynomialFeatures poly_feature_4 = PolynomialFeatures(degree=4, include_bias=False) #uczące X_4_poly_train = poly_feature_4.fit_transform(X_train) #testowe X_4_poly_test = poly_feature_4.fit_transform(X_test) poly_4_reg = LinearRegression() poly_4_reg.fit(X_4_poly_train, y_train) #predykcja uczących y_train_pred_poly_4 = poly_4_reg.predict(X_4_poly_train) #predykcja testowych y_test_pred_poly_4 = poly_4_reg.predict(X_4_poly_test) wynik = pd.DataFrame({'x': X_test[:, 0], 'y': y_test_pred_poly_4[:, 0]}) wynik.plot.scatter(x='x',y='y') # Wielomianowy 5 rzędu: from sklearn.preprocessing import PolynomialFeatures poly_feature_5 = PolynomialFeatures(degree=5, include_bias=False) #uczace X_5_poly_train = poly_feature_5.fit_transform(X_train) #testowe X_5_poly_test = poly_feature_5.fit_transform(X_test) poly_5_reg = LinearRegression() poly_5_reg.fit(X_5_poly_train, y_train) #predykcja uczacych y_train_pred_poly_5 = poly_5_reg.predict(X_5_poly_train) #predykcja testowych y_test_pred_poly_5 = poly_5_reg.predict(X_5_poly_test) wynik = pd.DataFrame({'x': X_test[:, 0], 'y': y_test_pred_poly_5[:, 0]}) wynik.plot.scatter(x='x',y='y') #2. Przeanalizuj działanie każdej z otrzymanych funkcji regresyjnych. Porównaj ich przebiegi z #rozkładem zbioru danych. PRZEANALIZOWANE #Zapisz w osobnym DataFrame wartości MSE dla zbiorów uczących i testujących dla ww. #regresorów; kolumny: train_mse, test_mse, wiersze: lin_reg, knn_3_reg, knn_5_reg, #poly_2_reg, poly_3_reg, poly_4_reg, poly_5_reg. Zapisz ww. DataFrame do pliku Pickle #o nazwie: mse.pkl #liniowe from sklearn.metrics import mean_squared_error train_mse_lin_reg = mean_squared_error(y_train, y_train_pred_lin_reg) test_mse_lin_reg = mean_squared_error(y_test, y_test_pred_lin_reg) #KNN dla 𝑘 = 3 oraz 𝑘 = 5 train_mse_knn_3_reg = mean_squared_error(y_train, y_train_pred_knn_3) test_mse_knn_3_reg = mean_squared_error(y_test, y_test_pred_knn_3) train_mse_knn_5_reg = mean_squared_error(y_train, y_train_pred_knn_5) test_mse_knn_5_reg = mean_squared_error(y_test, y_test_pred_knn_5) #Wielomianową 2, 3, 4 i 5 rzędu train_mse_knn_poly_2 = mean_squared_error(y_train, y_train_pred_poly_2) test_mse_knn_poly_2 = mean_squared_error(y_test, y_test_pred_poly_2) train_mse_knn_poly_3 = mean_squared_error(y_train, y_train_pred_poly_3) test_mse_knn_poly_3 = mean_squared_error(y_test, y_test_pred_poly_3) train_mse_knn_poly_4 = mean_squared_error(y_train, y_train_pred_poly_4) test_mse_knn_poly_4 = mean_squared_error(y_test, y_test_pred_poly_4) train_mse_knn_poly_5 = mean_squared_error(y_train, y_train_pred_poly_5) test_mse_knn_poly_5 = mean_squared_error(y_test, y_test_pred_poly_5) mse = [[train_mse_lin_reg, test_mse_lin_reg], [train_mse_knn_3_reg, test_mse_knn_3_reg], [train_mse_knn_5_reg, test_mse_knn_5_reg], [train_mse_knn_poly_2, test_mse_knn_poly_2], [train_mse_knn_poly_3, test_mse_knn_poly_3], [train_mse_knn_poly_4, test_mse_knn_poly_4], [train_mse_knn_poly_5, test_mse_knn_poly_5]] mse_df = pd.DataFrame(mse, index=["lin_reg", "knn_3_reg", "knn_5_reg", "poly_2_reg", "poly_3_reg", "poly_4_reg", "poly_5_reg"], columns=["train_mse", "test_mse"]) mse_df import pickle with open('mse.pkl', 'wb') as fp: pickle.dump(mse_df, fp) reg = [(lin_reg, None), (knn_3_reg, None), (knn_5_reg, None), (poly_2_reg, poly_feature_2), (poly_3_reg, poly_feature_3), (poly_4_reg, poly_feature_4), (poly_5_reg, poly_feature_5)] with open('reg.pkl', 'wb') as fp: pickle.dump(reg, fp) ```
github_jupyter
# Defining your own classes ## User Defined Types A **class** is a user-programmed Python type (since Python 2.2!) It can be defined like: ``` class Room(object): pass ``` Or: ``` class Room(): pass ``` Or: ``` class Room: pass ``` What's the difference? Before Python 2.2 a class was distinct from all other Python types, which caused some odd behaviour. To fix this, classes were redefined as user programmed types by extending `object`, e.g., class `room(object)`. So most Python 2 code will use this syntax as very few people want to use old style python classes. Python 3 has formalised this by removing old-style classes, so they can be defined without extending `object`, or indeed without braces. Just as with other python types, you use the name of the type as a function to make a variable of that type: ``` zero = int() type(zero) myroom = Room() type(myroom) ``` In the jargon, we say that an **object** is an **instance** of a particular **class**. `__main__` is the name of the scope in which top-level code executes, where we've defined the class `Room`. Once we have an object with a type of our own devising, we can add properties at will: ``` myroom.name = "Living" myroom.name ``` The most common use of a class is to allow us to group data into an object in a way that is easier to read and understand than organising data into lists and dictionaries. ``` myroom.capacity = 3 myroom.occupants = ["James", "Sue"] ``` ## Methods So far, our class doesn't do much! We define functions **inside** the definition of a class, in order to give them capabilities, just like the methods on built-in types. ``` class Room: def overfull(self): return len(self.occupants) > self.capacity myroom = Room() myroom.capacity = 3 myroom.occupants = ["James", "Sue"] myroom.overfull() myroom.occupants.append(["Clare"]) myroom.occupants.append(["Bob"]) myroom.overfull() ``` When we write methods, we always write the first function argument as `self`, to refer to the object instance itself, the argument that goes "before the dot". This is just a convention for this variable name, not a keyword. You could call it something else if you wanted. ## Constructors Normally, though, we don't want to add data to the class attributes on the fly like that. Instead, we define a **constructor** that converts input data into an object. ``` class Room: def __init__(self, name, exits, capacity, occupants=[]): self.name = name self.occupants = occupants # Note the default argument, occupants start empty self.exits = exits self.capacity = capacity def overfull(self): return len(self.occupants) > self.capacity living = Room("Living Room", {"north": "garden"}, 3) living.capacity ``` Methods which begin and end with **two underscores** in their names fulfil special capabilities in Python, such as constructors. ## Object-oriented design In building a computer system to model a problem, therefore, we often want to make: * classes for each *kind of thing* in our system * methods for each *capability* of that kind * properties (defined in a constructor) for each *piece of information describing* that kind For example, the below program might describe our "Maze of Rooms" system: We define a "Maze" class which can hold rooms: ``` class Maze: def __init__(self, name): self.name = name self.rooms = {} def add_room(self, room): room.maze = self # The Room needs to know # which Maze it is a part of self.rooms[room.name] = room def occupants(self): return [ occupant for room in self.rooms.values() for occupant in room.occupants.values() ] def wander(self): """Move all the people in a random direction""" for occupant in self.occupants(): occupant.wander() def describe(self): for room in self.rooms.values(): room.describe() def step(self): self.describe() print("") self.wander() print("") def simulate(self, steps): for _ in range(steps): self.step() ``` And a "Room" class with exits, and people: ``` class Room: def __init__(self, name, exits, capacity, maze=None): self.maze = maze self.name = name self.occupants = {} # Note the default argument, occupants start empty self.exits = exits # Should be a dictionary from directions to room names self.capacity = capacity def has_space(self): return len(self.occupants) < self.capacity def available_exits(self): return [ exit for exit, target in self.exits.items() if self.maze.rooms[target].has_space() ] def random_valid_exit(self): import random if not self.available_exits(): return None return random.choice(self.available_exits()) def destination(self, exit): return self.maze.rooms[self.exits[exit]] def add_occupant(self, occupant): occupant.room = self # The person needs to know which room it is in self.occupants[occupant.name] = occupant def delete_occupant(self, occupant): del self.occupants[occupant.name] def describe(self): if self.occupants: print(f"{self.name}: " + " ".join(self.occupants.keys())) ``` We define a "Person" class for room occupants: ``` class Person: def __init__(self, name, room=None): self.name = name def use(self, exit): self.room.delete_occupant(self) destination = self.room.destination(exit) destination.add_occupant(self) print( "{some} goes {action} to the {where}".format( some=self.name, action=exit, where=destination.name ) ) def wander(self): exit = self.room.random_valid_exit() if exit: self.use(exit) ``` And we use these classes to define our people, rooms, and their relationships: ``` james = Person("James") sue = Person("Sue") bob = Person("Bob") clare = Person("Clare") living = Room( "livingroom", {"outside": "garden", "upstairs": "bedroom", "north": "kitchen"}, 2 ) kitchen = Room("kitchen", {"south": "livingroom"}, 1) garden = Room("garden", {"inside": "livingroom"}, 3) bedroom = Room("bedroom", {"jump": "garden", "downstairs": "livingroom"}, 1) house = Maze("My House") for room in [living, kitchen, garden, bedroom]: house.add_room(room) living.add_occupant(james) garden.add_occupant(sue) garden.add_occupant(clare) bedroom.add_occupant(bob) ``` And we can run a "simulation" of our model: ``` house.simulate(3) ``` ## Alternative object models There are many choices for how to design programs to do this. Another choice would be to separately define exits as a different class from rooms. This way, we can use arrays instead of dictionaries, but we have to first define all our rooms, then define all our exits. ``` class Maze: def __init__(self, name): self.name = name self.rooms = [] self.occupants = [] def add_room(self, name, capacity): result = Room(name, capacity) self.rooms.append(result) return result def add_exit(self, name, source, target, reverse=None): source.add_exit(name, target) if reverse: target.add_exit(reverse, source) def add_occupant(self, name, room): self.occupants.append(Person(name, room)) room.occupancy += 1 def wander(self): "Move all the people in a random direction" for occupant in self.occupants: occupant.wander() def describe(self): for occupant in self.occupants: occupant.describe() def step(self): self.describe() print("") self.wander() print("") def simulate(self, steps): for _ in range(steps): self.step() class Room: def __init__(self, name, capacity): self.name = name self.capacity = capacity self.occupancy = 0 self.exits = [] def has_space(self): return self.occupancy < self.capacity def available_exits(self): return [exit for exit in self.exits if exit.valid()] def random_valid_exit(self): import random if not self.available_exits(): return None return random.choice(self.available_exits()) def add_exit(self, name, target): self.exits.append(Exit(name, target)) class Person: def __init__(self, name, room=None): self.name = name self.room = room def use(self, exit): self.room.occupancy -= 1 destination = exit.target destination.occupancy += 1 self.room = destination print( "{some} goes {action} to the {where}".format( some=self.name, action=exit.name, where=destination.name ) ) def wander(self): exit = self.room.random_valid_exit() if exit: self.use(exit) def describe(self): print("{who} is in the {where}".format(who=self.name, where=self.room.name)) class Exit: def __init__(self, name, target): self.name = name self.target = target def valid(self): return self.target.has_space() house = Maze("My New House") living = house.add_room("livingroom", 2) bed = house.add_room("bedroom", 1) garden = house.add_room("garden", 3) kitchen = house.add_room("kitchen", 1) house.add_exit("north", living, kitchen, "south") house.add_exit("upstairs", living, bed, "downstairs") house.add_exit("outside", living, garden, "inside") house.add_exit("jump", bed, garden) house.add_occupant("James", living) house.add_occupant("Sue", garden) house.add_occupant("Bob", bed) house.add_occupant("Clare", garden) house.simulate(3) ``` This is a huge topic, about which many books have been written. The differences between these two designs are important, and will have long-term consequences for the project. That is the how we start to think about **software engineering**, as opposed to learning to program, and is an important part of this course. ## Exercise: Your own solution Compare the two solutions above. Discuss with a partner which you like better, and why. Then, starting from scratch, design your own. What choices did you make that are different from mine?
github_jupyter
# Testing Click-Through-Rates for Banner Ads (A/B Testing) * Lets say we are a new apparel store; after thorough market research, we decide to open up an <b> Online Apparel Store.</b> We hire Developers, Digital Media Strategists and Data Scientists, who help develop the store, place products and conduct controlled experiments on the website. * Traditionally, companies ran controlled experiments, either A/B Tests or Multivariate tests, based on requirements. <b>Multiple versions of Banner Ads, Text Ads and Video Ads are created, tested and placed on the website. Website layouts, Ad positions, transitions and many other attributes can be tested.</b> * Our version-A (Still in red colored background after the Holiday season), was on our website for 2 months or so, and we think its time for a change. Assuming everything else kept constant, we develop <b>version-B with subtle, earthy colored banner with the same text.</b> ### How do we decide if we should go for the switch (replace version-a with version-b) ? ### Controlled A/B Test * Content, color, text style, text size, banner location and placement and many other things need to be taken into account when trying to conduct a controlled experiment. If we plan to replace version-A with version-B, we need <b>strong evidence that click-through-rate (clicks/ impression) for version-B is significantly higher than version-A.</b> * Every visitor who visits our homepage, is <b>randomly (with equal probability) going to see either version-A (Older version) or version-B (New creative) on our homepage.</b> We observe, that the older version has a CTR (Click-through-rate) of <b>9 % (9 clicks every 100 impressions).</b> Let us say we have an <b>average of 200 visitors every day (new + returning users).</b> * We assume and test for the hypothesis that our new banner Ad (version-B), can provide some boost to the CTR. 25 % boost would mean an average-CTR of 11.25 % (11.25 clicks every 100 impressions). ``` # importing necessary libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # CTR previous version ctr_version_a = 0.09 # CTR new version with 25 % expected boost ctr_version_b = 0.09 + (0.25)*(0.09) ctr_version_a, ctr_version_b ``` * Our null hypothesis is that there is no difference in CTR for version a and b, with alternate hypothesis that CTR of version-B sees a boost in CTR. We conduct a Two-Sample Proportion Test to validate our hypotheses. $$H_0: \mu_b > \mu_a$$ $$H_a: \mu_b <= \mu_a $$ We know, t-stat is calculated by the following $$t = \frac{(\mu_b - \mu_a) - 0}{SE}$$ $$t = \frac{(\mu_b - \mu_a) - 0}{\sqrt{\frac{CTR_b(1-CTR_b)}{N_b} + \frac{CTR_a(1-CTR_a)}{N_a}}} $$ * Let us choose a type-I error rate of 5 % (alpha = 0.05). Now, we simluate the test by sending customers to either of the pages randomly with equal chance. Let us say we start pushing these two version randomly on day 1. On Average, we expect around 200 customers to open the website, of which approximately 100 of them are exposed to version-A, and 100 are exposed to version-B. ``` # function to flip between version-a and b. def flipVersion(version_a): if version_a: return False else: return True ``` ### End of Day 1 * After end of day 1, we observe that there were 202 customers who visited the webiste, and 101 customers were shown version-a and another 101 were shown version-b. ``` # total customer incoming per day are normally distributed with mean 200 an deviation 10. np.random.seed(25) num_cust = int(np.random.normal(200, 10)) # total number of impressions and clicks at start of experiment are zero num_imps_version_a = 0 num_imps_version_b = 0 num_clicks_version_a = 0 num_clicks_version_b = 0 # start by showing version-A version_a = True # send each customer to a or b for customer_number in range(num_cust): # if version-a is exposed if version_a is True: # increase impression count num_imps_version_a += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_a += np.random.binomial(1, ctr_version_a) # if version-b is exposed else: # increase impression count num_imps_version_b += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_b += np.random.binomial(1, ctr_version_b) # flip version after each customer version_a = flipVersion(version_a) num_cust, num_imps_version_a, num_imps_version_b num_clicks_version_a, num_clicks_version_b ``` * We observe that 6 customers clicked on version-a, and 12 clicked on version-b. Plugging it into the above t-stat formula, we obtain the following. The Day-1 CTRs after running the experiment are as follows: ``` ctr_day_one_version_a = num_clicks_version_a/num_imps_version_a ctr_day_one_version_b = num_clicks_version_b/num_imps_version_b ctr_day_one_version_a, ctr_day_one_version_b p = (num_clicks_version_a + num_clicks_version_b)/(num_imps_version_a + num_imps_version_b) SE = np.sqrt(p*(1-p)*( (1/num_imps_version_a) + (1/num_imps_version_b) )) p, SE t = (ctr_day_one_version_b - ctr_day_one_version_a)/(SE) t ``` * After Day-1, we observe the t-stat is 1.48 (We did not find a significant set of observations to conclude that verion-b is better than version-a). ### How long do we run the test for ? When do we know exactly that Version-B is better than Version-A ? * In few cases, sample size is pre-defined to control Type-II error along with Type-I error, and once enough samples are collected, choice is made. In few cases, analysis is done over how t-stat improves as samples are collected. * In our case, we can observe how t-stat changes (increases or decreases with time and sample size), and then decide when to stop or continue the experiment. Note that it is always better to estimate the Power and decide on sample size to allocate budgets before the experiment. ``` def conductExperiment(n_days): list_num_cust = [] list_t_stat = [] list_ctr_version_a, list_ctr_version_b = [], [] list_imp_version_a, list_imp_version_b = [], [] for i in range(0,n_days): # total customer incoming per day are normally distributed with mean 200 an deviation 10. num_cust = int(np.random.normal(200,10)) list_num_cust.append(num_cust) # total number of impressions and clicks at start of experiment are zero num_imps_version_a = 0 num_imps_version_b = 0 num_clicks_version_a = 0 num_clicks_version_b = 0 # start by showing version-A version_a = True # send each customer to a or b for customer_number in range(num_cust): # if version-a is exposed if version_a is True: # increase impression count num_imps_version_a += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_a += np.random.binomial(1, ctr_version_a) # if version-b is exposed else: # increase impression count num_imps_version_b += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_b += np.random.binomial(1, ctr_version_b) # flip version after each customer version_a = flipVersion(version_a) ctr_day_one_version_a = num_clicks_version_a/num_imps_version_a ctr_day_one_version_b = num_clicks_version_b/num_imps_version_b list_ctr_version_a.append(ctr_day_one_version_a) list_ctr_version_b.append(ctr_day_one_version_b) list_imp_version_a.append(num_imps_version_a) list_imp_version_b.append(num_imps_version_b) df_abtest = pd.DataFrame() df_abtest['num_cust'] = list_num_cust df_abtest['IMP_version_a'] = list_imp_version_a df_abtest['IMP_version_b'] = list_imp_version_b df_abtest['CTR_version_a'] = list_ctr_version_a df_abtest['CTR_version_b'] = list_ctr_version_b df_abtest['Clicks_version_b'] = df_abtest['IMP_version_b']*df_abtest['CTR_version_b'] df_abtest['Clicks_version_a'] = df_abtest['IMP_version_a']*df_abtest['CTR_version_a'] return df_abtest ``` ## Simulating experiment results for 3 Days * Now, let us simulate the results for first 3-days, we have the impressions and CTRs for both versions. We can calculate a rolling t-statistic, which can help decide if the CTR of version-b outperforms CTR of version-a. * As days pass by and we collect more data, the sample size (N) increases, decreasing the Standard Error term over time (Daily standard error are probably be very close). Conducting t-test on daily level does not make sense, on Day-2, we need to include the numbers from Day-1 and Day-2 as well, and calculate the t-statistics cumulatively. ``` df_abtest = conductExperiment(3) df_abtest ``` * Below, we re-write the previous function to get cumulative t-stat and Standard Error terms. ``` def tStatAfterNDays(n_days): # total customer incoming per day are normally distributed with mean 200 an deviation 10. np.random.seed(25) num_cust = 200*n_days # total number of impressions and clicks at start of experiment are zero num_imps_version_a = 0 num_imps_version_b = 0 num_clicks_version_a = 0 num_clicks_version_b = 0 # start by showing version-A version_a = True # send each customer to a or b for customer_number in range(num_cust): # if version-a is exposed if version_a is True: # increase impression count num_imps_version_a += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_a += np.random.binomial(1, ctr_version_a) # if version-b is exposed else: # increase impression count num_imps_version_b += 1 # binomial sample (1 if successfully clicked, else 0) num_clicks_version_b += np.random.binomial(1, ctr_version_b) # flip version after each customer version_a = flipVersion(version_a) ctr_day_one_version_a = num_clicks_version_a/num_imps_version_a ctr_day_one_version_b = num_clicks_version_b/num_imps_version_b p = (num_clicks_version_a + num_clicks_version_b)/num_cust SE = np.sqrt(p*(1-p)*( (1/num_imps_version_a) + (1/num_imps_version_b) )) t = (ctr_day_one_version_b - ctr_day_one_version_a)/(SE) return t, SE ``` * Let us simulate the results for 3 consecutive days to obtain cumulative T-stats and Standard Errors. We observe in figure-1, that the cumulative t-stat has an increase gradually and is approximately 1.645 + after day and a half. On the right, we observe that the Standard Errors reduce cumulatively due to increase in sample size. ``` n_consecutive_days = 3 ndays = [i for i in range(1, n_consecutive_days + 1)] tStatsCumulative = [tStatAfterNDays(i)[0] for i in range(1,n_consecutive_days + 1)] SEStatsCumulative = [tStatAfterNDays(i)[1] for i in range(1,n_consecutive_days + 1)] fig = plt.figure(figsize=(18,6)) plt.subplot(1, 2, 1) plt.plot(ndays, tStatsCumulative) plt.grid() plt.title('Cumulative T-Stat') plt.xlabel('Number of Days') plt.ylabel('Cumulative T-Stat') plt.subplot(1, 2, 2) plt.plot(ndays, SEStatsCumulative) plt.grid() plt.title('Cumulative SE') plt.xlabel('Number of Days') plt.ylabel('Cumulative SE') ``` # Observartions: * We observe that after day and a half of both versions up and running, <b>there is a statistically significant difference between CTRs of version-a and version-b, with version-b outperforming version-a.</b> * Could we have <b>stopped the experiment after one and a half days ? Do we know if this effect is consistent on weekends ? Can we attribute these spike in CTR due to these changes only ? These are all design choices and can be decided only with additional Business context. Ideally, we would want to know the effects of weekdays vs weekends. Collecting more samples by experimentation provides deeper understanding of customer behaviour.</b> * Now, let us take a look at how to calculate the sample size required to control for a required Beta (Type-II Error). Note that deciding alpha and beta (Type-I and Type-II Error) rates are design choices as well, and deciding sample size before conducting the experiment is not only a best practice, but also helps decide the approximate Time and Budget it takes to provide confident and conclusive results. ### Controlling Power by varying Sample Size * Just like choosing significance level alpha (0.05), we need to choose power (1 - beta), generally chosen around 95 % power (beta = 0.05). First, let us look at the distribution of version-A, cumulatively for 3 days. Our sample size (Number of impressions for version-a) is 600 (3 days x 200 impressions per day). The average CTR for 3 days is 0.09. * Given we know sample size and proportion, we can now calculate the critical cut-off value (cut off proportion). $$p_{crit+} = p_0 + 1.645(SE)$$ ### Version-a * We observe that 95 % of data lies within 0 and 0.1171 with mean Click-Through-Rate of 0.09. ``` n_a = 100*3 ctr_a = 0.09 SE = np.sqrt(ctr_a*(1-ctr_a)/n_a) p_crit_a = ctr_a + 1.645*(SE) p_crit_a ``` ### Version-b * Let us assume that version-b, has an average CTR at the critical cutoff value of version-a (0.117). <img src="power1.png" width="400"></img> ### Type-I and Type-II Errors: * Type-I error corresponds to the green shaded region (alpha=0.05), where we are allowing upto 5 % of sampling data to be misclassified (Assume they come from version-b and not version-a). * Type-II error corresponds to data sampled from version-b, but falls within region of version-a, and hence misclassified (Shaded in red). We observe that exactly half of the version-b falls within rejection region, making 50 % Type-II errors, which is high. * <b>Increasing sampling size can help reduce Type-II error at the same version-b mean, as higher sampling size reduces the standard error, and shrinks the tails of both distributions.</b> ### Ideal sample size for alpha (0.05) and beta (0.1) * We can calculate the ideal sample size for constrained alpha and beta parameters. Essentially, the version-b needs to have a mean such that 10 % of data falls out of rejection region, and that needs to be there line where version-a has critical value. <img src="power2.png" width="400"></img> * Given we want to control for Type-II error (0.1), with Power of 90 % (1-beta), the Z-stat for 10 % Error rate is 1.29. Hence, for the given sample size, The mean of version-b needs to be atleast 1.29 Z's away from the cutoff value of version-a. $$z_{critical} = \frac{0.117 - \mu_b}{SE}$$ $$-1.29 = \frac{0.117 - \mu_b}{SE}$$ ``` p=0.143 z = (0.117 - p)/(np.sqrt(( p*(1-p))/(300))) z ``` <b>Therefore, for sample size of 3 days (600 samples, 300 each version), for alpha 0.05 and beta 0.1 (power 0.9), we reach statistical significance if the average click-through-rate of version-b is 14.3 % </b> <hr> ## Deciding on sample size before conducting the experiment * Let us construct/ pose the question in an experimental setting. <b>First, lets setup our initial hypothesis for testing. Let us conduct an experiment to test if version-b can provide 50 % boost, when compared to previous version-a.</b> * We are strict with both Type-I and Type-II errors this time, and choose alpha 0.05 and beta 0.05 (0.95 Power). Below is the stated null and alternate hypotheses. We conduct a Two-Sample, One-Tailed Proportion Test to validate our hypotheses. $$H_0: \mu_b - \mu_a <= 0.5(\mu_a)$$ $$H_a: \mu_b - \mu_a > 0.5(\mu_a) $$ We know, Z-stat is calculated by the following $$z = \frac{(\mu_b - \mu_a) - 0.5(\mu_a)}{SE}$$ To solve for n, we can check it by plugging in Z-values (for null and alternate hypothesis), mean of null and alternate hypothesis. $$ \mu_0 + z_{0-critical}(\sqrt{\frac{p_0(1-p_0)}{n}}) = \mu_a - z_{a-critical}(\sqrt{\frac{p_a(1-p_a)}{n}})$$ <b> Knowing sample size in advance can help decide budget, and also provide a good estimate of how long we might have to run the test, to understand which version works better. </b>
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/5_update_hyperparams/1_model_params/5)%20Switch%20deep%20learning%20model%20from%20default%20mode.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Goals ### Learn how to switch models post default mode # Table of Contents ## [Install](#0) ## [Load experiment with resnet defaults](#1) ## [Change model to densenet ](#2) ## [Train](#3) <a id='0'></a> # Install Monk ## Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` ## Install Monk Manually (Not recommended) ### Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git ### Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` ### Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` ## Dataset - Weather Classification - https://data.mendeley.com/datasets/4drtyfjtfy/1 ``` ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1pxe_AmHYXwpTMRkMVwGeFgHS8ZpkzwMJ" -O weather.zip && rm -rf /tmp/cookies.txt ! unzip -qq weather.zip ``` # Imports ``` #Using gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype ``` <a id='1'></a> # Load experiment with resnet defaults ``` gtf = prototype(verbose=1); gtf.Prototype("Project", "experiment-switch-models"); gtf.Default(dataset_path="weather/train", model_name="resnet18_v1", freeze_base_network=True, # If True, then freeze base num_epochs=5); #Read the summary generated once you run this cell. ``` ## As per the summary above Model Loaded on device Model name: resnet18_v1 Num of potentially trainable layers: 41 Num of actual trainable layers: 1 <a id='2'></a> # Switch now to densenet ``` gtf.update_model_name("densenet121"); # Very impotant to reload network gtf.Reload(); ``` <a id='3'></a> # Train ``` #Start Training gtf.Train(); #Read the training summary generated once you run the cell and training is completed ``` # Goals Completed ### Learn how to switch models post default mode
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import gpplot as gpp from poola import core as pool import anchors import core_functions as fns gpp.set_aesthetics(palette='Set2') def run_guide_residuals(lfc_df, paired_lfc_cols=[]): ''' Calls get_guide_residuals function from anchors package to calculate guide-level residual z-scores Inputs: 1. lfc_df: data frame with log-fold changes (relative to pDNA) 2. paired_lfc_cols: grouped list of initial populations and corresponding resistant populations ''' lfc_df = lfc_df.drop_duplicates() if not paired_lfc_cols: paired_lfc_cols = fns.pair_cols(lfc_df)[1] #get lfc pairs modified = [] unperturbed = [] #reference_df: column1 = modifier condition, column2 = unperturbed column ref_df = pd.DataFrame(columns=['modified', 'unperturbed']) row = 0 #row index for reference df for pair in paired_lfc_cols: #number of resistant pops in pair = len(pair)-1 res_idx = 1 #if multiple resistant populations, iterate while res_idx < len(pair): ref_df.loc[row, 'modified'] = pair[res_idx] ref_df.loc[row, 'unperturbed'] = pair[0] res_idx +=1 row +=1 print(ref_df) #input lfc_df, reference_df #guide-level residuals_lfcs, all_model_info, model_fit_plots = anchors.get_guide_residuals(lfc_df, ref_df) return residuals_lfcs, all_model_info, model_fit_plots ``` ## Data summary ``` reads = pd.read_excel('../../Data/Reads/Wilen/supplementary_reads_v1.xlsx', sheet_name= 'VeroE6 SARS-2 genomewide reads') reads # Gene Annotations chip = pd.read_csv('../../Data/Interim/Goujon/VeroE6/CP0070_Chlorocebus_sabeus_remapped.chip', sep ='\t') chip = chip.rename(columns={'Barcode Sequence':'Construct Barcode'}) chip_reads = pd.merge(chip[['Construct Barcode', 'Gene']], reads, on = ['Construct Barcode'], how = 'right') chip_reads = chip_reads.rename(columns={'Gene':'Gene Symbol'}) #Calculate lognorm cols = chip_reads.columns[2:].to_list() #reads columns = start at 3rd column lognorms = fns.get_lognorm(chip_reads.dropna(), cols = cols) lognorms # lognorms = lognorms.rename(columns={'count_lognorm':'pDNA_lognorm'}) ``` ## Quality Control ### Population Distributions ``` #Calculate log-fold change relative to pDNA target_cols = list(lognorms.columns[3:]) pDNA_lfc = fns.calculate_lfc(lognorms,target_cols) pDNA_lfc # Average across Cas9-v2 columns Cas9v2_data_cols = [col for col in pDNA_lfc.columns if 'Cas9-v2' in col] Cas9v2_cols = ['Construct Barcode', 'Gene Symbol']+ Cas9v2_data_cols Cas9v2_df = pDNA_lfc.copy()[Cas9v2_cols] Cas9v2_df # Replace spaces with '_' for following functions new_col_list=['Construct Barcode', 'Gene Symbol'] for col in Cas9v2_data_cols: new_col = col.replace(' ','_') new_col_list.append(new_col) Cas9v2_df.columns = new_col_list Cas9v2_df fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(5,6)) i,j = 0,0 cols=[] mock_col = [col for col in Cas9v2_df.columns if 'Mock' in col] hi_MOI_cols = mock_col+ [col for col in Cas9v2_df.columns if 'Hi-MOI' in col] cols.append(hi_MOI_cols) lo_MOI_cols = mock_col+ [col for col in Cas9v2_df.columns if 'Lo-MOI' in col] cols.append(lo_MOI_cols) for k,c in enumerate(cols): # k = sub-list index, c = list of columns in sub-list for l, c1 in enumerate(c): if 'Mock' in c1: label1 = c1 + ' (initial)'#'Initial population' else: label1 = c1 #'Resistant population-'+str(l) Cas9v2_df[c1].plot(kind='kde',c=sns.color_palette('Set2')[l],label=label1, ax=ax[i], legend=True) ax[i].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) sns.despine() t = ax[i].set_xlabel('Log-fold changes') title = ','.join(c[0].split('_')[:2]) t = ax[i].set_title(title) i+=1 fig.savefig('../../Figures/Wilen_Vero_population_distributions.png', bbox_inches="tight") ``` ### Distributions of control sets ``` # NO_SITE controls -> default controls controls = fns.get_controls(Cas9v2_df, control_name=['NO_SITE']) fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(5,6)) i,j = 0,0 for k,c in enumerate(cols): # k = sub-list index, c = list of columns in sub-list for l, c1 in enumerate(c): if l==0: label1 = c1 + ', NO_SITE'#'Initial population, NO_SITE' else: label1 = c1 + ', NO_SITE' #'Resistant population-'+str(l) + ', NO_SITE' controls[c1].plot(kind='kde',color=sns.color_palette('Set2')[l],label=label1, ax=ax[i], legend=True) ax[i].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) sns.despine() t = ax[i].set_xlabel('Log-fold changes') title = ','.join(c[0].split('_')[:2]) t = ax[i].set_title(title) i+=1 fig.savefig('../../Figures/Wilen_Vero_control_distributions.png', bbox_inches="tight") ``` ### ROC_AUC ``` ess_genes, non_ess_genes = fns.get_gene_sets() tp_genes = ess_genes.loc[:, 'Gene Symbol'].to_list() fp_genes = non_ess_genes.loc[:, 'Gene Symbol'].to_list() roc_auc, roc_df = pool.get_roc_aucs(Cas9v2_df, tp_genes, fp_genes, gene_col = 'Gene Symbol', score_col=mock_col) fig,ax=plt.subplots(figsize=(6,6)) ax=sns.lineplot(data=roc_df, x='fpr',y='tpr', ci=None, label = 'Mock,' + str(round(roc_auc,2))) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.title('ROC-AUC') plt.xlabel('False Positive Rate (non-essential)') plt.ylabel('True Positive Rate (essential)') ``` ## Gene level analysis ### Residual z-scores ``` lfc_df = Cas9v2_df.drop('Gene Symbol', axis = 1) lfc_df # run_guide_residuals(lfc_df.drop_duplicates(), cols) residuals_lfcs, all_model_info, model_fit_plots = run_guide_residuals(lfc_df, cols) residuals_lfcs guide_mapping = pool.group_pseudogenes(chip[['Construct Barcode', 'Gene']], pseudogene_size=4, gene_col='Gene', control_regex=['NO_SITE']) guide_mapping = guide_mapping.rename(columns={'Gene':'Gene Symbol'}) gene_residuals = anchors.get_gene_residuals(residuals_lfcs.drop_duplicates(), guide_mapping) gene_residuals gene_residual_sheet = fns.format_gene_residuals(gene_residuals, guide_min = 3, guide_max = 5) guide_residual_sheet = pd.merge(guide_mapping, residuals_lfcs.drop_duplicates(), on = 'Construct Barcode', how = 'inner') guide_residual_sheet with pd.ExcelWriter('../../Data/Processed/GEO_submission_v2/VeroE6_Wilen_v5.xlsx') as writer: gene_residual_sheet.to_excel(writer, sheet_name='VeroE6_avg_zscore', index =False) reads.to_excel(writer, sheet_name='VeroE6_genomewide_reads', index =False) guide_mapping.to_excel(writer, sheet_name='VeroE6_guide_mapping', index =False) with pd.ExcelWriter('../../Data/Processed/Individual_screens_v2/VeroE6_Wilen_indiv_v5.xlsx') as writer: gene_residuals.to_excel(writer, sheet_name='condition_genomewide_zscore', index =False) guide_residual_sheet.to_excel(writer, sheet_name='guide-level_zscore', index =False) ```
github_jupyter
# Now You Code 2: Paint Pricing House Depot, a big-box hardware retailer, has contracted you to create an app to calculate paint prices. The price of paint is determined by the following factors: - Everyday quality paint is `$19.99` per gallon. - Select quality paint is `$24.99` per gallon. - Premium quality paint is `$32.99` per gallon. In addition if the customer wants computerized color-matching that incurs an additional fee of `$4.99` per gallon. Write a program to ask the user to select a paint quality: 'everyday', 'select' or 'premium', prompt for color matching, and then outputs the price per gallon of the paint. Example Run 1: ``` Which paint quality do you require ['everyday', 'select', 'premium'] ?select Do you require color matching [y/n] ?y Total price of select paint with color matching is $29.98 ``` Example Run 2: ``` Which paint quality do you require ['everyday', 'select', 'premium'] ?premium Do you require color matching [y/n] ?n Total price of premium paint without color matching is $32.99 ``` ## Step 1: Problem Analysis Inputs: everydau, select, premium, yes, no Outputs: paimt pricing Algorithm (Steps in Program): select paint quality color matching output price ``` try: choices = ["everyday","select","premium"] choice2 = ["yes","no"] paint=input("What type of paint?(esp)") colormatch=input("color matching?(yes/no)") if paint in choices: if paint =="everyday": base=19.99 elif paint =="select": base=24.99 elif paint =="premium": base=32.99 if colormatch=='yes': cost=base+4.99 else: cost=base print(cost) except: print("Invalid Input.") ``` ## Step 3: Questions 1. When you enter something other than `'everyday', 'select',` or `'premium'` what happens? Modify the program to print `that is not a paint quality` and then exit in those cases. Answer: 2. What happens when you enter something other than `'y'` or `'n'` for color matching? Re-write the program to print `you must enter y or n` whenever you enter something other than those two values. Answer: 3. Why can't we use Python's `try...except` in this example? Answer: 4. How many times (at minimum) must we execute this program and check the results before we can be reasonably assured it is correct? Answer: ## Step 4: Reflection Reflect upon your experience completing this assignment. This should be a personal narrative, in your own voice, and cite specifics relevant to the activity as to help the grader understand how you arrived at the code you submitted. Things to consider touching upon: Elaborate on the process itself. Did your original problem analysis work as designed? How many iterations did you go through before you arrived at the solution? Where did you struggle along the way and how did you overcome it? What did you learn from completing the assignment? What do you need to work on to get better? What was most valuable and least valuable about this exercise? Do you have any suggestions for improvements? To make a good reflection, you should journal your thoughts, questions and comments while you complete the exercise. Keep your response to between 100 and 250 words. `--== Write Your Reflection Below Here ==--`
github_jupyter
``` from path import Path from PIL import Image import cv2 import random import pandas as pd import pickle def arg_parse(): parser = argparse.ArgumentParser() parser = argparse.ArgumentParser( prog="annotation.py", usage="annotation.py -n <<num_of_evaluation>>", description="", add_help = True ) parser.add_argument("-n", "--num", help = "num of evaluation", type = int, default = None) args = parser.parse_args() return args def get_filepath_list(dir_path): imgs = Path(dir_path).files('*.png') imgs += Path(dir_path).files('*.jpg') imgs += Path(dir_path).files('*.jpeg') return imgs def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC): h_min = min(im.shape[0] for im in im_list) im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation) for im in im_list] return cv2.hconcat(im_list_resize) def evaluate_images(path_list, rand=False, n_shows=None, username=None): df = pd.DataFrame(columns=['filename', 'score', 'user']) filename_list = [] score_list = [] rep_list = [ord(str(i)) for i in range(1, 6)] key_q = ord('q') if rand: path_list = random.sample(path_list, len(path_list)) if n_shows is None: n_shows = len(path_list) for path in path_list[:n_shows]: img = cv2.imread(path) cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 800, 600) cv2.imshow('image', img) key = 0 while ((key not in rep_list) and key is not key_q): key = cv2.waitKey(0) cv2.destroyWindow('image') if key is key_q: break filename_list.append(path.rsplit('/')[-1]) score_list.append(rep_list.index(key)+1) df = pd.DataFrame() df['filename'] = filename_list df['score'] = score_list df['user'] = username return df def evaluate_images_relative(path_list, combination_list, username=None): df = pd.DataFrame(columns=['filename', 'score', 'user']) filename_list = [path.rsplit('/')[-1] for path in path_list] score_list = [0 for i in range(len(path_list))] num_evals = [0 for i in range(len(path_list))] key_f, key_j, key_q = ord('f'), ord('j'), ord('q') rep_list = [key_f, key_j, key_q] end_flag = False for i, c_list in enumerate(combination_list): img1 = cv2.imread(path_list[i]) for c in c_list: img2 = cv2.imread(path_list[c]) merged = hconcat_resize_min([img1, img2]) cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1200, 450) cv2.moveWindow('image', 100, 200) cv2.imshow('image', merged) key = 0 while key not in rep_list: key = cv2.waitKey(0) cv2.destroyWindow('image') if key is key_f: score_list[i] = score_list[i] + 1 num_evals[i] = num_evals[i] + 1 num_evals[c] = num_evals[c] + 1 elif key is key_j: score_list[c] = score_list[c] + 1 num_evals[i] = num_evals[i] + 1 num_evals[c] = num_evals[c] + 1 else: end_flag = True break combination_list[c].remove(i) if end_flag: break df = pd.DataFrame() df['filename'] = filename_list df['score'] = score_list df['num_of_evaluations'] = num_evals df['user'] = username return df def evaluate_images_relative_random(path_list, combination_list, start_pos, num=None, username=None): def get_random_combination_list(combination_list): combination_set = set() for i, clist in enumerate(combination_list): for c in clist: tmp_tuple = tuple(sorted([i, c])) combination_set.add(tmp_tuple) return random.sample(list(combination_set), len(combination_set)) df = pd.DataFrame(columns=['filename', 'score', 'user']) filename_list = [path.rsplit('/')[-1] for path in path_list] score_list = [0 for i in range(len(path_list))] num_evals = [0 for i in range(len(path_list))] key_f, key_j, key_q = ord('f'), ord('j'), ord('q') rep_list = [key_f, key_j, key_q] end_flag = False font = cv2.FONT_HERSHEY_SIMPLEX if num is None: num = len(combination_list) random_combination_list = get_random_combination_list(combination_list[start_pos:num]) for count, (i, j) in enumerate(random_combination_list): s1, s2 = random.sample([i, j], 2) img1 = cv2.imread(path_list[s1]) img2 = cv2.imread(path_list[s2]) merged = hconcat_resize_min([img1, img2]) cv2.namedWindow("image", cv2.WINDOW_KEEPRATIO | cv2.WINDOW_NORMAL) cv2.resizeWindow('image', 1200, 450) cv2.moveWindow('image', 100, 200) text_pos = (merged.shape[1] - 250, merged.shape[0] - 50) cv2.putText(merged, "{}/{}".format(count+1, len(random_combination_list)), text_pos, font, 1.5, (0, 0, 0), 2, cv2.LINE_AA) cv2.imshow('image', merged) key = 0 while key not in rep_list: key = cv2.waitKey(0) cv2.destroyWindow('image') if key is key_f: score_list[s1] = score_list[s1] + 1 num_evals[s1] = num_evals[s1] + 1 num_evals[s2] = num_evals[s2] + 1 elif key is key_j: score_list[s2] = score_list[s2] + 1 num_evals[s1] = num_evals[s1] + 1 num_evals[s2] = num_evals[s2] + 1 else: end_flag = True break if end_flag: break df = pd.DataFrame() df['filename'] = filename_list df['score'] = score_list df['num_of_evaluations'] = num_evals df['user'] = username return df def save_evaluation_csv(df, username, save_path=None): if save_path is None: save_path = './output/' + username + '.csv' df.to_csv(save_path) def main(): print('Please write your name : ', end='') username = input() filepath_list = get_filepath_list('./images') df_result = evaluate_images(filepath_list, rand=True, username=username) save_evaluation_csv(df_result, username) print('Thank you!') def main_relative(): print('Please write your name : ', end='') username = input() filepath_list = get_filepath_list('./images/omelette_rice/')[:50] with open('./pickle/combination_list.pickle', 'rb') as f: combination_list = pickle.load(f) df_result = evaluate_images_relative(filepath_list, combination_list, username=username) save_evaluation_csv(df_result, username) print('Thank you!') def main_relative_random(): print('Please enter your name : ', end='') username = input() print('Please enter the number of ratings : ', end='') num = int(input()) filepath_list = get_filepath_list('../images/omelette_rice_500/images/') try: with open('..pickle/start_position.pickle', 'rb') as f: start_pos = pickle.load(f) except: start_pos = 0 with open('../pickle/combination500_list.pickle', 'rb') as f: combination_list = pickle.load(f) df_result = evaluate_images_relative_random(filepath_list, combination_list, start_pos, num, username=username) save_evaluation_csv(df_result, username) with open('..pickle/start_position.pickle', 'wb') as f: start_pos = start_pos + num print('Thank you!') if __name__=='__main__': main_relative_random() with open('../pickle/combination500_list.pickle', 'rb') as f: combination_list = pickle.load(f) # 集合に登録 def get_random_combination_set(combination_list): combination_set = set() for i, clist in enumerate(combination_list): for c in clist: tmp_tuple = tuple(sorted([i, c])) combination_set.add(tmp_tuple) return random.sample(list(combination_set), len(combination_set)) [i for i in range(10)][5:10] ```
github_jupyter
# Extra Trees Classifier with MinMax Scaler ### Required Packages ``` import numpy as np import pandas as pd import seaborn as se import warnings import matplotlib.pyplot as plt from sklearn.ensemble import ExtraTreesClassifier from sklearn.preprocessing import LabelEncoder, MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,plot_confusion_matrix warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= "" ``` List of features which are required for model training . ``` #x_values features=[] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X = df[features] Y = df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) def EncodeY(df): if len(df.unique())<=2: return df else: un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort') df=LabelEncoder().fit_transform(df) EncodedT=[xi for xi in range(len(un_EncodedT))] print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT)) return df x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=EncodeY(NullClearner(Y)) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` #### Distribution Of Target Variable ``` plt.figure(figsize = (10,6)) se.countplot(Y) ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123) ``` ### Data Rescaling This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one. The transformation is given by: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. ``` minmax_scaler = MinMaxScaler() X_train = minmax_scaler.fit_transform(X_train) X_test = minmax_scaler.transform(X_test) ``` ### Model ExtraTreesClassifier is an ensemble learning method fundamentally based on decision trees. ExtraTreesClassifier, like RandomForest, randomizes certain decisions and subsets of data to minimize over-learning from the data and overfitting. #### Model Tuning Parameters 1.n_estimators:int, default=100 >The number of trees in the forest. 2.criterion:{“gini”, “entropy”}, default="gini" >The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain. 3.max_depth:int, default=None >The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples. 4.max_features:{“auto”, “sqrt”, “log2”}, int or float, default=”auto” >The number of features to consider when looking for the best split: ``` model=ExtraTreesClassifier(n_jobs = -1,random_state = 123) model.fit(X_train,y_train) ``` #### Model Accuracy score() method return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted ``` print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100)) ``` #### Confusion Matrix A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known. ``` plot_confusion_matrix(model,X_test,y_test,cmap=plt.cm.Blues) ``` #### Classification Report A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False. * **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset. ``` print(classification_report(y_test,model.predict(X_test))) ``` #### Feature Importances. The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction. ``` plt.figure(figsize=(8,6)) n_features = len(X.columns) plt.barh(range(n_features), model.feature_importances_, align='center') plt.yticks(np.arange(n_features), X.columns) plt.xlabel("Feature importance") plt.ylabel("Feature") plt.ylim(-1, n_features) ``` #### Creator: Ayush Gupta , Github: [Profile](https://github.com/guptayush179)
github_jupyter
# Creating Word Vectors with word2vec Let's start with NLTK #### Load Dependencies ``` import nltk from nltk.tokenize import word_tokenize, sent_tokenize import gensim from gensim.models.word2vec import Word2Vec from sklearn.manifold import TSNE import pandas as pd from bokeh.io import output_notebook from bokeh.plotting import show, figure %matplotlib inline nltk.download('punkt') ``` #### Load Data ``` nltk.download('gutenberg') from nltk.corpus import gutenberg gutenberg.fileids() ``` #### Tokenize Text ``` # Due to lack of resources, I'm not working with the full Gutenberg dataset (18 books). gberg_sent_tokens = sent_tokenize(gutenberg.raw(fileids=['bible-kjv.txt', 'austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', 'carroll-alice.txt'])) gberg_sent_tokens[0:5] gberg_sent_tokens[1] word_tokenize(gberg_sent_tokens[1]) word_tokenize(gberg_sent_tokens[1])[14] # Due to lack of resources, I'm not working with the full Gutenberg dataset (18 books). gberg_sents = gutenberg.sents(fileids=['bible-kjv.txt', 'austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', 'carroll-alice.txt']) gberg_sents[0:5] gberg_sents[4][14] gutenberg.words() # Due to lack of resources, I'm not working with the full Gutenberg dataset (18 books). len(gutenberg.words(fileids=['bible-kjv.txt', 'austen-emma.txt', 'austen-persuasion.txt', 'austen-sense.txt', 'carroll-alice.txt'])) ``` #### Run Word2Vec ``` # size == dimensions # window 10: 20 context words, 10 to the left and 10 to the right model = Word2Vec(sentences=gberg_sents, size=64, sg=1, window=10, min_count=5, seed=42, workers=2) # We don't have to save the model if we don't want to. It's being done here as demonstration. model.save('raw_gutenberg_model.w2v') ``` #### Explore the Model ``` model = Word2Vec.load('raw_gutenberg_model.w2v') model['house'] len(model['house']) model.most_similar('house') model.most_similar('think') model.most_similar('day') model.most_similar('father') model.doesnt_match('mother father daughter house'.split()) model.similarity('father', 'mother') model.most_similar(positive=['father', 'woman'], negative=['man']) model.most_similar(positive=['son', 'woman'], negative=['man']) model.most_similar(positive=['husband', 'woman'], negative=['man']) model.most_similar(positive=['king', 'woman'], negative=['man'], topn=30) ``` #### Reduce word vector dimensionality with t-SNE t-Distributed Stochastic Name Embedding ``` len(model.wv.vocab) X = model[model.wv.vocab] tsne = TSNE(n_components=2, n_iter=200) X_2d = tsne.fit_transform(X) coords_df = pd.DataFrame(X_2d, columns=['x', 'y']) coords_df['token'] = model.wv.vocab.keys() coords_df.head() coords_df.to_csv('raw_gutenberg_tsne.csv', index=False) ``` #### Visualise 2D representation of word vectors ``` coorrds_df = pd.read_csv('raw_gutenberg_tsne.csv') coords_df.head() _ = coords_df.plot.scatter('x', 'y', figsize=(8,8), marker='o', s=10, alpha=0.2) output_notebook() subset_df = coords_df.sample(n=1000) p = figure(plot_width=600, plot_height=600) p.text(x=subset_df.x, y=subset_df.y, text=subset_df.token) show(p) ```
github_jupyter
# Co je API? ## Klient a server API (Application Programming Interface) je dohoda mezi dvěma stranami o tom, jak si mezi sebou budou povídat. Těmto stranám se říká klient a server. **Server** je ta strana, která má zajímavé informace nebo něco zajímavého umí a umožňuje ostatním na internetu, aby toho využili. Server je program, který donekonečna běží na nějakém počítači a je připraven všem ostatním na internetu odpovídat na požadavky. **Klient** je program, který posílá požadavky na server a z odpovědí se snaží poskládat něco užitečného. Klient je tedy mobilní aplikace s mráčky a sluníčky nebo náš prohlížeč, v němž si můžeme otevřít kurzovní lístek ČNB. Je to ale i Heureka robot, který za Heureku načítá informace o zboží v e-shopech. ![title](static/giphy.gif) # Základní pojmy Než se pustíme do tvorby klienta, projdeme si některé základní pojmy kolem API. ## Protokol Celé dorozumívání mezi klientem a serverem se odehrává přes tzv. protokol. To není nic jiného, než smluvený způsob, co bude kdo komu posílat a jakou strukturu to bude mít. Protokolů je v počítačovém světě spousta, ale nás bude zajímat jen HTTP, protože ten využívají webová API a ostatně i web samotný. Není to náhoda, že adresa internetových stránek v prohlížeči zpravidla začíná http:// (nebo https://). ### HTTP Dorozumívání mezi klientem a serverem probíhá formou požadavku (HTTP request), jenž posílá klient na server, a odpovědi (HTTP response), kterou server posílá zpět. Každá z těchto zpráv má své náležitosti. ### Požadavek + **metoda** (HTTP method): Například metoda GET má tu vlastnost, že pouze čte a nemůžeme s ní tedy přes API něco změnit - je tzv. bezpečná. Kromě metody GET existují ještě metody POST (vytvořit), PUT (aktualizovat) a DELETE (odstranit), které nepotřebujeme, protože data z API budeme pouze získávat. + **adresa s parametry** (URL s query parameters): Na konci běžné URL adresy otazník a za ním parametry. Pokud je parametrů víc, oddělují se znakem &. Adresa samotná nejčastěji určuje o jaká data půjde (v našem příkladě jsou to filmy) a URL parametry umožňují provést filtraci už na straně serveru a získat tím jen ta data, která nás opravdu zajímají (v našem případě dramata v délce 150 min) http://api.example.com/movies/ http://api.example.com/movies?genre=drama&duration=150 + **hlavičky** (headers): Hlavičky jsou vlastně jen další parametry. Liší se v tom, že je neposíláme jako součást adresy a na rozdíl od URL parametrů podléhají nějaké standardizaci a konvencím. + **tělo** (body): Tělo zprávy je krabice, kterou s požadavkem posíláme, a do které můžeme vložit, co chceme. Tedy nejlépe něco, čemu bude API na druhé straně rozumět. Tělo může být prázdné. V těle můžeme poslat obyčejný text, data v nějakém formátu, ale klidně i obrázek. Aby API na druhé straně vědělo, co v krabici je a jak ji má rozbalovat, je potřeba s tělem zpravidla posílat hlavičku Content-Type. Musíme vyčíst z dokumentace konkrétního API, jak požadavek správně poskládat. ### Odpověď + **status kód** (status code): Číselný kód, kterým API dává najevo, jak požadavek zpracovalo. Podle první číslice kódu se kódy dělí na různé kategorie: 1xx - informativní odpověď (požadavek byl přijat, ale jeho zpracování pokračuje) 2xx - požadavek byl v pořádku přijat a zpracován 3xx - přesměrování, klient potřebuje poslat další požadavek jinam, aby se dobral odpovědi 4xx - chyba na straně klienta (špatně jsme poskládali dotaz) 5xx - chyba na straně serveru (API nezvládlo odpovědět) + **hlavičky** (headers): Informace o odpovědi jako např. datum zpracování, formát odpovědi... + **tělo** (body): Tělo odpovědi - to, co nás zajímá většinou nejvíc ### Formáty Tělo může být v libovolném formátu. Může to být text, HTML, obrázek, PDF soubor, nebo cokoliv jiného. Hodnotě hlavičky Content-Type se dávají různé názvy: content type, media type, MIME type. Nejčastěji se skládá jen z typu a podtypu, které se oddělí lomítkem. Několik příkladů: + text/plain - obyčejný text + text/html - HTML + text/csv - CSV + image/gif - GIF obrázek + image/jpeg - JPEG obrázek + image/png - PNG obrázek + application/json - JSON + application/xml nebo text/xml - XML ### Formát JSON JSON vznikl kolem roku 2000 a brzy se uchytil jako stručnější náhrada za XML, především na webu a ve webových API. Dnes je to **nejspíš nejoblíbenější formát pro obecná strukturovaná data vůbec**. Jeho autorem je Douglas Crockford, jeden z lidí podílejících se na vývoji jazyka JavaScript. #### JSON je datový formát NE datový typ! Vstupem je libovolná datová struktura: + číslo + řetězec + pravdivostní hodnota + pole + objekt + None Výsutpem je vždy řetězec (string) ![title](static/null.jpg) Jazyk Python (a mnoho dalších) má podporu pro práci s JSON v základní instalaci (vestavěný). V případě jazyka Python si lze JSON splést především se slovníkem (dictionary). Je ale potřeba si uvědomit, že JSON je text, který může být uložený do souboru nebo odeslaný přes HTTP, ale nelze jej přímo použít při programování. Musíme jej vždy nejdříve zpracovat na slovníky a seznamy. ``` import json ``` V následujícím JSONu je pod klíčem "people" seznam slovníků s další strukturou: ``` people_info = ''' { "people": [ { "name": "John Smith", "phone": "555-246-999", "email": ["johns@gmail.com", "jsmith@gmail.com"], "is_employee": false }, { "name": "Jane Doe", "phone": "665-296-659", "email": ["janed@gmail.com", "djane@gmail.com"], "is_employee": true } ] } ''' ``` json.loads převede řetězec na objekt ``` data = json.loads(people_info) data type(data) type(data['people']) type(data['people'][0]) data['people'] data['people'][0] data['people'][0]['name'] ``` # Práce s API klienty ## Obecný klient Mobilní aplikace na počasí je klient, který někdo vytvořil pro jeden konkrétní úkol a pracovat umí jen s jedním konkrétním API. Takový klient je užitečný, pokud chceme akorát vědět, jaké je počasí, ale už méně, pokud si chceme zkoušet práci s více API zároveň. Proto existují obecní klienti. ### Prohlížeč jako obecný klient Pokud z API chceme pouze číst a API nevyžaduje žádné přihlašování, můžeme jej vyzkoušet i v prohlížeči, jako by to byla webová stránka. Pokud na stránkách ČNB navštívíme [kurzovní lístek](https://www.cnb.cz/cs/financni-trhy/devizovy-trh/kurzy-devizoveho-trhu/kurzy-devizoveho-trhu/) a úplně dole klikneme na [Textový formát](https://www.cnb.cz/cs/financni-trhy/devizovy-trh/kurzy-devizoveho-trhu/kurzy-devizoveho-trhu/denni_kurz.txt?date=19.02.2020), uvidíme odpověď z API serveru https://www.cnb.cz/cs/financni_trhy/devizovy_trh/kurzy_devizoveho_trhu/denni_kurz.txt ### Obecný klient v příkazové řádce: curl Pokud se k API budeme potřebovat přihlásit nebo s ním zkoušet dělat složitější věci než jen čtení, nebude nám prohlížeč stačit. Proto je dobré se naučit používat program curl. Spouští se v příkazové řádce a je to švýcarský nůž všech, kteří se pohybují kolem webových API. #### Příklady s curl ![title](static/curl.jpg) Když příkaz zadáme a spustíme, říkáme tím programu curl, že má poslat požadavek na uvedenou adresu a vypsat to, co mu ČNB pošle zpět. ![title](static/curl-return.jpg) ## Vlastní klient Obecného klienta musí ovládat člověk (ruční nastavování parametrů, pravidelné spuštění na základě podmínek či času atd.). To je přesně to, co potřebujeme, když si chceme nějaké API vyzkoušet, ale celý smysl API je v tom, aby je programy mohly využívat automaticky. Pokud chceme naprogramovat klienta pro konkrétní úkol, můžeme ve většině jazyků použít buď vestavěnou, nebo doinstalovanou knihovnu. V případě jazyka Python použijeme knihovnu Requests. ## Práce s veřejným API Vyzkoušíme si dotazy na API s daty zločinnosti v UK, která jsou dostupná na měsiční bázi dle přibližné lokace (viz https://data.police.uk/docs/method/stops-at-location/) ``` import requests api_url = "https://data.police.uk/api/stops-street" ``` Nastavení parametrů volání API dle dokumentace https://data.police.uk/docs/method/stops-at-location/ Jako lokaci jsem vybral nechvalně proslulý obvod Hackney v Londýně :) ``` params = { "lat" : "51.5487158", "lng" : "-0.0613842", "date" : "2018-06" } ``` Pomocí funkce `get` pošleme požadavek na URL adresu API. URL adresa doplněná o parametry vypadá takto: https://data.police.uk/api/stops-street?lat=51.5487158&lng=-0.0613842&date=2018-06 a je možné ji vyzkoušet i v prohlížeči. V proměnné response máme uložený objekt, který obsahuje odpověď od API. ``` response = requests.get(api_url, params=params) ``` Pokud je status kód jiný, než 200 (success), vyhodí skript chybu a chybový status code ``` if response.status_code != 200: print('Failed to get data:', response.status_code) else: print('First 100 characters of data are') print(response.text[:100]) ``` Hlavička s doplňujícími informacemi o opdovědi ``` response.headers response.headers['content-type'] ``` Obsah odpovědi je řetězec bytů ``` response.content[:200] ``` Vypadá jako seznam (list) nebo slovník (dictionary), ale nechová se tak: ``` response[0]["age_range"] ``` Převedeme řetězec bytů metodou .json() z knihovny requests ``` data = response.json() ``` Ověříme datový typ ``` type(data) ``` Nyní můžeme přistupovat k "data" jako ke klasickému seznamu (list) ``` data[0]["age_range"] ``` Převední seznamu(list) na řetězec s parametry pro zobrazení struktury v čitelné podobě ``` datas = json.dumps(data, sort_keys=True, indent=4) print(datas[:1600]) ``` Cyklus, kterým přistupujeme k věkovému rozpětí lidí lustrovaných policií ``` age_range = [i["age_range"] for i in data] print(age_range) ``` Cyklus, kterým přistupujeme k id ulice, kde došlo lustraci podezřelé(ho) ``` street_id = [i["location"]["street"]["id"] for i in data] print(street_id) import pandas as pd ``` Spojíme seznamy do dataframe ``` df_from_lists = pd.DataFrame(list(zip(age_range, street_id)), columns = ['age_range', 'street_id']) df_from_lists.head() ``` Jakou věkovou skupinu lustrovala policie nejčastěji? ``` %matplotlib inline df_from_lists["age_range"].value_counts().plot.bar(); ``` ### Json_normalize aneb jak jednoduše převést JSON na DataFrame ``` norm_data = pd.json_normalize(data) norm_data.head() norm_data["gender"].value_counts() norm_data["gender"].value_counts().plot.bar(); norm_data["age_range"].value_counts().plot.bar(); ``` ## Tvoříme klienta pro práci s veřejným API V následujícím bloku si vytvoříme klienta, který nám stáhne data za dva měsíce (místo jednoho) a uloží je do seznamu seznamů (list of lists). Případné chyby spojení s API ošetříme výjimkami (exceptions) - více viz [dokumentace requests](https://requests.readthedocs.io/en/master/_modules/requests/exceptions/) ``` def get_uk_crime_data(latitude, longitude, dates_list): """ Function loops through a list of dates Three arguments latitude, longitude and a list of dates Returns a dataframe with crime data for each day """ appended_data = [] for i in dates_list: api_url = "https://data.police.uk/api/stops-street" params = { "lat" : latitude, "lng" : longitude, "date" : i } response = requests.get(api_url, params=params) data_foo = response.json() data = pd.json_normalize(data_foo) # store DataFrame in list appended_data.append(data) return pd.concat(appended_data) ``` Zavolání funkce get_uk_crime_data s parametry zeměpisné šíře a délky přiřazené proměnné df_uk_crime_data ``` dates_list = ["2018-06","2018-07"] lat = "51.5487158" lng = "-0.0613842" df_uk_crime_data = get_uk_crime_data(lat, lng, dates_list) df_uk_crime_data.head() ``` ## Přistupování k tweetům přes Twitter API pomocí knihovny Tweepy Příkaz na instalaci knihovny tweepy uvnitř notebooku. Stačí odkomentovat a spustit. ``` #%pip install tweepy ``` Import knihovny Tweepy ``` import tweepy ``` Pro získání dat z Twitteru musí náš klient projít OAuth autorizací. **Jak funguje OAuth autorizace na Twitteru?** 1. vývojář aplikace se zaregistruje u poskytovatele API 2. zaregistruje aplikaci, získá consumer_key, consumer_secret, access_token a access_secret na https://developer.twitter.com/en/apps 3. aplikace volá API a prokazuje se consumer_key, consumer_secret, access_token a access_secret ``` consumer_key = "tTIzOaOSJkyiFTGJwXDSarGLI" consumer_secret = "3yhCpz7dpLgxkkZsMOWwzKmlefngngskPpO1k3HKI5jIojijzA" access_token = "1646190612-U8wKL2PwiAabeg9e9GZUhlLjiWrRgd1sqbd0oQq" access_secret = "DA5yY1PWS00OKt7OB7wRD4AnSkRQky9Wl4e8RRJQFo82q" ``` Další krok je vytvoření instance OAuthHandleru, do kterého vložíme náš consumer token a consumer secret ``` auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) ``` Ověření funkčnosti autentifikace ``` api = tweepy.API(auth) try: api.verify_credentials() print("Authentication OK") except Exception: print("Error during authentication") ``` V API dokumentaci k Tweepy http://docs.tweepy.org/en/v3.5.0/api.html najdeme metodu která např. vypíše ID přátel, resp. sledujících účtu ``` api.friends_ids('@kdnuggets') ``` Nebo vypíše ID, které účet sleduje ``` api.followers_ids('@kdnuggets') ``` Metoda, která vrátí posledních 20 tweetů podle ID uživatele ``` twitter_user = api.user_timeline('@kdnuggets') kdnuggets_tweets = [i.text for i in twitter_user] print(kdnuggets_tweets) def get_tweets(consumer_key, consumer_secret, access_token, access_secret, twitter_account): """ Function gets the last 20 tweets and adds those not in the list Five arguments consumer_key, consumer_secret, access_token, access_secret, and twitter_account name Returns a dataframe with tweets for given account """ auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = tweepy.API(auth) try: api.verify_credentials() print("Authentication OK") twitter_user = api.user_timeline(twitter_account) tweets_list = [i.text for i in twitter_user] except Exception: print("Error during authentication") return pd.DataFrame(tweets_list, columns = [twitter_account]) get_tweets(consumer_key, consumer_secret, access_token, access_secret, '@kdnuggets') ```
github_jupyter
``` from sportsreference.nfl.teams import Teams import pandas as pd import numpy as np from scipy import stats from datetime import datetime # Pull 2020 NFL Data from sportsreference api teams = Teams(year= '2020') teams_df = teams.dataframes teams_df.sort_values(by=['name'], inplace=True, ascending=True) teams_df.set_index('name', inplace=True) # Drop statistics that are not Relevant teams_df.drop(['first_downs', 'first_downs_from_penalties', 'games_played','losses', 'abbreviation','pass_attempts', 'pass_completions', 'pass_first_downs','plays', 'points_contributed_by_offense','post_season_result', 'rush_attempts', 'rush_first_downs', 'wins'], axis=1, inplace= True) # Normalize data for (columnName, columnData) in teams_df.iteritems(): if columnName != 'name': teams_df[columnName] = stats.zscore(columnData) # Invert stats that negatively affect a team teams_df[['fumbles', 'interceptions', 'penalties', 'percent_drives_with_turnovers', 'points_against', 'turnovers', 'yards_from_penalties']] *= -1 teams_df.head() rank = pd.Series() rank['defensive_simple_rating_system'] = 5 rank['fumbles'] =0 rank['interceptions'] =0 rank['margin_of_victory'] = 3 rank['offensive_simple_rating_system'] = 5 rank['pass_net_yards_per_attempt'] = .5 rank['pass_touchdowns'] = 1 rank['pass_yards'] =1 rank['penalties'] =1 rank['percent_drives_with_points'] =2 rank['percent_drives_with_turnovers'] = 2 rank['points_against'] =1 rank['rank'] = 0 rank['rush_touchdowns'] = 1 rank['rush_yards'] = 3 rank['rush_yards_per_attempt'] =1 rank['simple_rating_system'] = 7 rank['strength_of_schedule'] = 4 rank['turnovers'] = 3 rank['win_percentage'] = 6 rank['yards'] = 2 rank['yards_from_penalties'] = .5 rank['yards_per_play'] = 2 sum = rank.sum() rank/=sum for (columnName, columnData) in rank.iteritems(): teams_df[columnName]*= columnData print(rank[columnName]) teams_df['sum'] = 0.0 for i, row in teams_df.iterrows(): teams_df.at[i, 'sum'] = row['defensive_simple_rating_system':].sum() teams_df.sort_values(by=['sum'], inplace=True, ascending=False) print(teams_df['sum']) import matplotlib.pyplot as plt import seaborn as sns sns.kdeplot(teams_df['sum']) teams_df['zscores'] = stats.zscore(teams_df['sum']) teams_df['percentile'] = 1- stats.norm.sf(teams_df['zscores']) print(teams_df['percentile']) def predict (team1, team2): t1 = teams_df['percentile'].loc[team1] t2 = teams_df['percentile'].loc[team2] #return (teams_df['percentile'].loc[team1] - teams_df['percentile'].loc[team2])/2 + .5 p = 1/(10**(-(t1 - t2))+1) return probToMoneyLine(p) def probToMoneyLine (prob): ml = 0 prob*=100 if prob >50: ml = -(prob/(100 - prob)) * 100 elif prob < 50: ml = (((100 - prob)/prob) * 100) else: ml = 100 return ml predict('Tennessee Titans', 'Las Vegas Raiders') import json import requests import argparse parser = argparse.ArgumentParser(description='Sample') parser.add_argument('--api-key', type=str, default='') args, unknown = parser.parse_known_args() API_KEY = '58f860df380e5b01f108f9418584b714' SPORT = 'americanfootball_nfl' # use the sport_key from the /sports endpoint below, or use 'upcoming' to see the next 8 games across all sports REGION = 'us' # uk | us | eu | au MARKET = 'h2h' # h2h | spreads | totals ODDSFORMAT = 'american' # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Now get a list of live & upcoming games for the sport you want, along with odds for different bookmakers # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # odds_response = requests.get('https://api.the-odds-api.com/v3/odds', params={ 'api_key': API_KEY, 'sport': SPORT, 'region': REGION, 'mkt': MARKET, 'oddsFormat': ODDSFORMAT, }) odds_json = json.loads(odds_response.text) games = [] if not odds_json['success']: print(odds_json['msg']) else: print('Number of events:', len(odds_json['data'])) print(odds_json['data'][0]['commence_time']) first = odds_json['data'] for i, game in enumerate(odds_json['data'], start=0): games.append({}) games[i]['teams'] = game['teams'] games[i]['home'] = game['home_team'] for site in game['sites']: if site['site_nice'] == 'Caesars': games[i]['odds'] = site['odds']['h2h'] # Check your usage print('Remaining requests', odds_response.headers['x-requests-remaining']) print('Used requests', odds_response.headers['x-requests-used']) print(games) for game in games: team1 = game['teams'][0] team2 = game['teams'][1] print(team1, ' vs', team2) print('Predicted Line for', team1,'is', predict(team1, team2)) print('Actual Line for', team1,'is', game['odds'][0] ) ```
github_jupyter
# SSD300 Training Tutorial This tutorial explains how to train an SSD300 on the Pascal VOC datasets. The preset parameters reproduce the training of the original SSD300 "07+12" model. Training SSD512 works simiarly, so there's no extra tutorial for that. The same goes for training on other datasets. You can find a summary of a full training here to get an impression of what it should look like: [SSD300 "07+12" training summary](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md) ``` from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TerminateOnNaN, CSVLogger from tensorflow.keras import backend as K from tensorflow.keras.models import load_model from math import ceil import numpy as np from matplotlib import pyplot as plt from models.keras_ssd300 import ssd_300 from keras_loss_function.keras_ssd_loss import SSDLoss from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes from keras_layers.keras_layer_DecodeDetections import DecodeDetections from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast from keras_layers.keras_layer_L2Normalization import L2Normalization from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast from data_generator.object_detection_2d_data_generator import DataGenerator from data_generator.object_detection_2d_geometric_ops import Resize from data_generator.object_detection_2d_photometric_ops import ConvertTo3Channels from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms import tensorflow as tf gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) except RuntimeError as e: print(e) %matplotlib inline ``` ## 0. Preliminary note All places in the code where you need to make any changes are marked `TODO` and explained accordingly. All code cells that don't contain `TODO` markers just need to be executed. ## 1. Set the model configuration parameters This section sets the configuration parameters for the model definition. The parameters set here are being used both by the `ssd_300()` function that builds the SSD300 model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to run the training. Most of these parameters are needed to define the anchor boxes. The parameters as set below produce the original SSD300 architecture that was trained on the Pascal VOC datsets, i.e. they are all chosen to correspond exactly to their respective counterparts in the `.prototxt` file that defines the original Caffe implementation. Note that the anchor box scaling factors of the original SSD implementation vary depending on the datasets on which the models were trained. The scaling factors used for the MS COCO datasets are smaller than the scaling factors used for the Pascal VOC datasets. The reason why the list of scaling factors has 7 elements while there are only 6 predictor layers is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details. As mentioned above, the parameters set below are not only needed to build the model, but are also passed to the `SSDInputEncoder` constructor further down, which is responsible for matching and encoding ground truth boxes and anchor boxes during the training. In order to do that, it needs to know the anchor box parameters. ``` img_height = 480 # Height of the model input images img_width = 640 # Width of the model input images img_channels = 3 # Number of color channels of the model input images mean_color = [123, 117, 104] # The per-channel mean of the images in the dataset. Do not change this value if you're using any of the pre-trained weights. swap_channels = [2, 1, 0] # The color channel order in the original SSD is BGR, so we'll have the model reverse the color channel order of the input images. n_classes = 1 # Number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO scales_pascal = [0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05] # The anchor box scaling factors used in the original SSD300 for the Pascal VOC datasets scales_coco = [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05] # The anchor box scaling factors used in the original SSD300 for the MS COCO datasets scales = scales_pascal aspect_ratios = [[1.0, 2.0, 0.5], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5, 3.0, 1.0/3.0], [1.0, 2.0, 0.5], [1.0, 2.0, 0.5]] # The anchor box aspect ratios used in the original SSD300; the order matters two_boxes_for_ar1 = True steps = [8, 16, 32, 64, 100, 300] # The space between two adjacent anchor box center points for each predictor layer. offsets = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5] # The offsets of the first anchor box center points from the top and left borders of the image as a fraction of the step size for each predictor layer. clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries variances = [0.1, 0.1, 0.2, 0.2] # The variances by which the encoded target coordinates are divided as in the original implementation normalize_coords = True ``` ## 2. Build or load the model You will want to execute either of the two code cells in the subsequent two sub-sections, not both. ### 2.1 Create a new model and load trained VGG-16 weights into it (or trained SSD weights) If you want to create a new SSD300 model, this is the relevant section for you. If you want to load a previously saved SSD300 model, skip ahead to section 2.2. The code cell below does the following things: 1. It calls the function `ssd_300()` to build the model. 2. It then loads the weights file that is found at `weights_path` into the model. You could load the trained VGG-16 weights or you could load the weights of a trained model. If you want to reproduce the original SSD training, load the pre-trained VGG-16 weights. In any case, you need to set the path to the weights file you want to load on your local machine. Download links to all the trained weights are provided in the [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository. 3. Finally, it compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method. Normally, the optimizer of choice would be Adam (commented out below), but since the original implementation uses plain SGD with momentum, we'll do the same in order to reproduce the original training. Adam is generally the superior optimizer, so if your goal is not to have everything exactly as in the original training, feel free to switch to Adam. You might need to adjust the learning rate scheduler below slightly in case you use Adam. Note that the learning rate that is being set here doesn't matter, because further below we'll pass a learning rate scheduler to the training function, which will overwrite any learning rate set here, i.e. what matters are the learning rates that are defined by the learning rate scheduler. `SSDLoss` is a custom Keras loss function that implements the multi-task that consists of a log loss for classification and a smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper. ``` # 1: Build the Keras model. K.clear_session() # Clear previous models from memory. model = ssd_300(image_size=(img_height, img_width, img_channels), n_classes=n_classes, mode='training', l2_regularization=0.0005, scales=scales, aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, normalize_coords=normalize_coords, subtract_mean=mean_color, swap_channels=swap_channels) # 2: Load some weights into the model. # TODO: Set the path to the weights you want to load. weights_path = './VGG_ILSVRC_16_layers_fc_reduced.h5' model.load_weights(weights_path, by_name=True) # 3: Instantiate an optimizer and the SSD loss function and compile the model. # If you want to follow the original Caffe implementation, use the preset SGD # optimizer, otherwise I'd recommend the commented-out Adam optimizer. #adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) sgd = SGD(lr=0.001, momentum=0.9, decay=0.0, nesterov=False) ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) model.compile(optimizer=sgd, loss=ssd_loss.compute_loss) ``` ### 2.2 Load a previously created model If you have previously created and saved a model and would now like to load it, execute the next code cell. The only thing you need to do here is to set the path to the saved model HDF5 file that you would like to load. The SSD model contains custom objects: Neither the loss function nor the anchor box or L2-normalization layer types are contained in the Keras core library, so we need to provide them to the model loader. This next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below. ``` # TODO: Set the path to the `.h5` file of the model to be loaded. model_path = 'path/to/trained/model.h5' # We need to create an SSDLoss object in order to pass that to the model loader. ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0) K.clear_session() # Clear previous models from memory. model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes, 'L2Normalization': L2Normalization, 'compute_loss': ssd_loss.compute_loss}) ``` ## 3. Set up the data generators for the training The code cells below set up the data generators for the training and validation datasets to train the model. The settings below reproduce the original SSD training on Pascal VOC 2007 `trainval` plus 2012 `trainval` and validation on Pascal VOC 2007 `test`. The only thing you need to change here are the filepaths to the datasets on your local machine. Note that parsing the labels from the XML annotations files can take a while. Note that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images (around 9 GB total for Pascal VOC 2007 `trainval` plus 2012 `trainval` and another 2.6 GB for 2007 `test`). You can later load these HDF5 datasets directly in the constructor. The original SSD implementation uses a batch size of 32 for the training. In case you run into GPU memory issues, reduce the batch size accordingly. You need at least 7 GB of free GPU memory to train an SSD300 with 20 object classes with a batch size of 32. The `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository. The data augmentation settings defined further down reproduce the data augmentation pipeline of the original SSD training. The training generator receives an object `ssd_data_augmentation`, which is a transformation object that is itself composed of a whole chain of transformations that replicate the data augmentation procedure used to train the original Caffe implementation. The validation generator receives an object `resize`, which simply resizes the input images. An `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs. In order to train the model on a dataset other than Pascal VOC, either choose `DataGenerator`'s appropriate parser method that corresponds to your data format, or, if `DataGenerator` does not provide a suitable parser for your data format, you can write an additional parser and add it. Out of the box, `DataGenerator` can handle datasets that use the Pascal VOC format (use `parse_xml()`), the MS COCO format (use `parse_json()`) and a wide range of CSV formats (use `parse_csv()`). ``` # 1: Instantiate two `DataGenerator` objects: One for training, one for validation. # Optional: If you have enough memory, consider loading the images into memory for the reasons explained above. train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None) val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None) # 2: Parse the image and label lists for the training and validation datasets. This can take a while. # TODO: Set the paths to the datasets here. # The directories that contain the images. images_dir = 'D:\\Deeplearning\\images\\ocr_export_2\\' # Ground truth train_labels_filename = 'D:\\Deeplearning\\images\\ocr_export_2\\train.csv' val_labels_filename = 'D:\\Deeplearning\\images\\ocr_export_2\\train.csv' train_dataset.parse_csv(images_dir=images_dir, labels_filename=train_labels_filename, input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation. include_classes='all') val_dataset.parse_csv(images_dir=images_dir, labels_filename=val_labels_filename, input_format=['image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'], include_classes='all') # Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will # speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory` # option in the constructor, because in that cas the images are in memory already anyway. If you don't # want to create HDF5 datasets, comment out the subsequent two function calls. train_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_train.h5', resize=False, variable_image_size=True, verbose=True) val_dataset.create_hdf5_dataset(file_path='dataset_udacity_traffic_val.h5', resize=False, variable_image_size=True, verbose=True) # Get the number of samples in the training and validations datasets. train_dataset_size = train_dataset.get_dataset_size() val_dataset_size = val_dataset.get_dataset_size() print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size)) print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size)) # 3: Set the batch size. batch_size = 2 # Change the batch size if you like, or if you run into GPU memory issues. # 4: Set the image transformations for pre-processing and data augmentation options. # For the training generator: ssd_data_augmentation = SSDDataAugmentation(img_height=img_height, img_width=img_width, background=mean_color) # For the validation generator: convert_to_3_channels = ConvertTo3Channels() resize = Resize(height=img_height, width=img_width) # 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function. # The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes. predictor_sizes = [model.get_layer('conv4_3_norm_mbox_conf').output_shape[1:3], model.get_layer('fc7_mbox_conf').output_shape[1:3], model.get_layer('conv6_2_mbox_conf').output_shape[1:3], model.get_layer('conv7_2_mbox_conf').output_shape[1:3], model.get_layer('conv8_2_mbox_conf').output_shape[1:3], model.get_layer('conv9_2_mbox_conf').output_shape[1:3]] ssd_input_encoder = SSDInputEncoder(img_height=img_height, img_width=img_width, n_classes=n_classes, predictor_sizes=predictor_sizes, scales=scales, aspect_ratios_per_layer=aspect_ratios, two_boxes_for_ar1=two_boxes_for_ar1, steps=steps, offsets=offsets, clip_boxes=clip_boxes, variances=variances, matching_type='multi', pos_iou_threshold=0.5, neg_iou_limit=0.5, normalize_coords=normalize_coords) # 6: Create the generator handles that will be passed to Keras' `fit_generator()` function. train_generator = train_dataset.generate(batch_size=batch_size, shuffle=True, transformations=[ssd_data_augmentation], label_encoder=ssd_input_encoder, returns={'processed_images', 'encoded_labels'}, keep_images_without_gt=False) val_generator = val_dataset.generate(batch_size=batch_size, shuffle=False, transformations=[convert_to_3_channels, resize], label_encoder=ssd_input_encoder, returns={'processed_images', 'encoded_labels'}, keep_images_without_gt=False) # Get the number of samples in the training and validations datasets. train_dataset_size = train_dataset.get_dataset_size() val_dataset_size = val_dataset.get_dataset_size() print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size)) print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size)) ``` ## 4. Set the remaining training parameters We've already chosen an optimizer and set the batch size above, now let's set the remaining training parameters. I'll set one epoch to consist of 1,000 training steps. The next code cell defines a learning rate schedule that replicates the learning rate schedule of the original Caffe implementation for the training of the SSD300 Pascal VOC "07+12" model. That model was trained for 120,000 steps with a learning rate of 0.001 for the first 80,000 steps, 0.0001 for the next 20,000 steps, and 0.00001 for the last 20,000 steps. If you're training on a different dataset, define the learning rate schedule however you see fit. I'll set only a few essential Keras callbacks below, feel free to add more callbacks if you want TensorBoard summaries or whatever. We obviously need the learning rate scheduler and we want to save the best models during the training. It also makes sense to continuously stream our training history to a CSV log file after every epoch, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Finally, we'll also add a callback that makes sure that the training terminates if the loss becomes `NaN`. Depending on the optimizer you use, it can happen that the loss becomes `NaN` during the first iterations of the training. In later iterations it's less of a risk. For example, I've never seen a `NaN` loss when I trained SSD using an Adam optimizer, but I've seen a `NaN` loss a couple of times during the very first couple of hundred training steps of training a new model when I used an SGD optimizer. ``` # Define a learning rate schedule. def lr_schedule(epoch): if epoch < 80: return 0.0001 elif epoch < 100: return 0.00001 else: return 0.000001 # Define model callbacks. # TODO: Set the filepath under which you want to save the model. model_checkpoint = ModelCheckpoint(filepath='ssd300_pascal_07+12_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1) #model_checkpoint.best = csv_logger = CSVLogger(filename='ssd300_pascal_07+12_training_log.csv', separator=',', append=True) learning_rate_scheduler = LearningRateScheduler(schedule=lr_schedule, verbose=1) terminate_on_nan = TerminateOnNaN() callbacks = [model_checkpoint, csv_logger, learning_rate_scheduler, terminate_on_nan] ``` ## 5. Train In order to reproduce the training of the "07+12" model mentioned above, at 1,000 training steps per epoch you'd have to train for 120 epochs. That is going to take really long though, so you might not want to do all 120 epochs in one go and instead train only for a few epochs at a time. You can find a summary of a full training [here](https://github.com/pierluigiferrari/ssd_keras/blob/master/training_summaries/ssd300_pascal_07%2B12_training_summary.md). In order to only run a partial training and resume smoothly later on, there are a few things you should note: 1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights. 2. In order for the learning rate scheduler callback above to work properly, `fit_generator()` needs to know which epoch we're in, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`. 3. In order for the model checkpoint callback above to work correctly after a kernel restart, set `model_checkpoint.best` to the best validation loss from the previous training. If you don't do this and a new `ModelCheckpoint` object is created after a kernel restart, that object obviously won't know what the last best validation loss was, so it will always save the weights of the first epoch of your new training and record that loss as its new best loss. This isn't super-important, I just wanted to mention it. ``` # If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly. initial_epoch = 0 final_epoch = 10 steps_per_epoch = 1000 history = model.fit_generator(generator=train_generator, steps_per_epoch=steps_per_epoch, epochs=final_epoch, callbacks=callbacks, validation_data=val_generator, validation_steps=ceil(val_dataset_size/batch_size), initial_epoch=initial_epoch) ``` ## 6. Make predictions Now let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator that we've already set up above. Feel free to change the batch size. You can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training. ``` # 1: Set the generator for the predictions. predict_generator = val_dataset.generate(batch_size=1, shuffle=True, transformations=[convert_to_3_channels, resize], label_encoder=None, returns={'processed_images', 'filenames', 'inverse_transform', 'original_images', 'original_labels'}, keep_images_without_gt=False) # 2: Generate samples. batch_images, batch_filenames, batch_inverse_transforms, batch_original_images, batch_original_labels = next(predict_generator) i = 0 # Which batch item to look at print("Image:", batch_filenames[i]) print() print("Ground truth boxes:\n") print(np.array(batch_original_labels[i])) # 3: Make predictions. import cv2 import numpy as np im_test= cv2.imread('D:\Deeplearning\images\ocr\Image - 145504897.bmp') y_pred = model.predict(np.expand_dims(im_test,0)) ``` Now let's decode the raw predictions in `y_pred`. Had we created the model in 'inference' or 'inference_fast' mode, then the model's final layer would be a `DecodeDetections` layer and `y_pred` would already contain the decoded predictions, but since we created the model in 'training' mode, the model outputs raw predictions that still need to be decoded and filtered. This is what the `decode_detections()` function is for. It does exactly what the `DecodeDetections` layer would do, but using Numpy instead of TensorFlow (i.e. on the CPU instead of the GPU). `decode_detections()` with default argument values follows the procedure of the original SSD implementation: First, a very low confidence threshold of 0.01 is applied to filter out the majority of the predicted boxes, then greedy non-maximum suppression is performed per class with an intersection-over-union threshold of 0.45, and out of what is left after that, the top 200 highest confidence boxes are returned. Those settings are for precision-recall scoring purposes though. In order to get some usable final predictions, we'll set the confidence threshold much higher, e.g. to 0.5, since we're only interested in the very confident predictions. ``` # 4: Decode the raw predictions in `y_pred`. y_pred_decoded = decode_detections(y_pred, confidence_thresh=0.9, iou_threshold=0.1, top_k=200, normalize_coords=normalize_coords, img_height=img_height, img_width=img_width) ``` We made the predictions on the resized images, but we'd like to visualize the outcome on the original input images, so we'll convert the coordinates accordingly. Don't worry about that opaque `apply_inverse_transforms()` function below, in this simple case it just aplies `(* original_image_size / resized_image_size)` to the box coordinates. ``` # 5: Convert the predictions for the original image. y_pred_decoded_inv = apply_inverse_transforms(y_pred_decoded, batch_inverse_transforms) np.set_printoptions(precision=2, suppress=True, linewidth=90) print("Predicted boxes:\n") print(' class conf xmin ymin xmax ymax') print(y_pred_decoded_inv[i]) ``` Finally, let's draw the predicted boxes onto the image. Each predicted box says its confidence next to the category name. The ground truth boxes are also drawn onto the image in green for comparison. ``` # 5: Draw the predicted boxes onto the image # Set the colors for the bounding boxes colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] plt.figure(figsize=(20,12)) plt.imshow(im_test) current_axis = plt.gca() for box in batch_original_labels[i]: continue xmin = box[1] ymin = box[2] xmax = box[3] ymax = box[4] label = '{}'.format(classes[int(box[0])]) current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color='green', fill=False, linewidth=2)) #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':'green', 'alpha':1.0}) for box in y_pred_decoded_inv[i]: xmin = box[2] ymin = box[3] xmax = box[4] ymax = box[5] color = colors[int(box[0])] label = '{}: {:.2f}'.format(classes[int(box[0])], box[1]) current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2)) #current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0}) ```
github_jupyter
``` % matplotlib inline import matplotlib.pyplot as plt from matplotlib import colors, cm import numpy as np from numpy import matmul from scipy.spatial.distance import pdist, squareform from sklearn.datasets import load_diabetes import pandas as pd from scipy.linalg import cholesky from scipy.linalg import solve from scipy.optimize import minimize import time # Developer notes # 1) Cholesky decomposition produces NaNs (probably because K+I*s**2 is not pos semidef) causing solve to complain # 2) Including gradient for likelihood made optimization much faster class mintGP(): """ The implementation is based on Algorithm 2.1 of Gaussian Processes for Machine Learning (GPML) by Rasmussen and Williams. Takes 2D np-arrays """ def __init__(self): pass def fit(self, X, Y): self.yscale = np.std(Y) self.Y = Y/self.yscale self.X = X self.n = np.shape(X)[0] # initialize with heuristics self.lengthscale = np.mean(pdist(X, metric='euclidean')) self.likelihood_variance = 1 ############################################################### # Gradient descent on marginal likelihood with scipy L-BFGS-B # ############################################################### theta0 = np.array([self.lengthscale, self.likelihood_variance]) bnds = ((1e-20, None), (1e-10, None)) sol = minimize(self.neg_log_marg_like, theta0, args=(), method='L-BFGS-B', bounds=bnds, jac=True) self.lengthscale, self.likelihood_variance = sol.x self.marginal_likelihood = np.exp(-sol.fun) # for prediction: K,_ = self.K(X,X,self.lengthscale) self.L = cholesky( K + self.likelihood_variance*np.eye(self.n), lower=True) print(sol.x, theta0) ########################## # Likelihood computation # ########################## def neg_log_marg_like(self, theta): """ Compute negative log marginal likelihood for hyperparameter optimization """ jitter=0 K, D = self.K(self.X ,self.X, theta[0]) L = cholesky( K + (theta[1]+jitter)*np.eye(self.n), lower=True) self.L = L alpha = solve(L.T, solve(L,self.Y, lower=True) ) logmarglike = \ - 0.5*matmul(self.Y.T, alpha)[0,0] \ - np.sum( np.log( np.diag( L ) ) ) \ - 0.5*self.n*np.log(2*np.pi) # compute gradients prefactor = matmul(alpha, alpha.T) - solve(L.T, solve(L, np.eye(self.n) ) ) Kd_lengthscale = np.multiply( D/theta[0]**3, K) Kd_likelihood_variance = np.eye(self.n) logmarglike_grad = 0.5*np.array( [ np.trace( matmul(prefactor, Kd_lengthscale) ), np.trace( matmul(prefactor, Kd_likelihood_variance) )] ) return -logmarglike, -logmarglike_grad def nlml_grad(self): """ Return gradient of negative log marginal likelihood """ return self.logmarglike_grad ###################### # Kernel computation # ###################### def K(self, X, Z, lengthscale): n1 = np.shape(X)[0] n2 = np.shape(Z)[0] n1sq = np.sum(np.square(X), 1) n2sq = np.sum(np.square(Z), 1) D = (np.ones([n2, 1])*n1sq).T + np.ones([n1, 1])*n2sq -2*matmul(X,Z.T) return np.exp(-D/(2*lengthscale**2)), D def scalarK(self, x, z, lengthscale): return( np.exp( np.linalg.norm(x - z)**2/(2*lengthscale**2) ) ) ########################### # Predictive distribution # ########################### def predict(self, Xnew, predvar=False): alpha = solve(self.L.T, solve(self.L,self.Y*self.yscale ) ) if predvar: m = np.shape(Xnew)[0] Knew_N,_ = self.K(Xnew, self.X, self.lengthscale) Knew_new = np.array( [self.scalarK(Xnew[i], Xnew[i], self.lengthscale) for i in range(m)] ).reshape([m,1]) v = solve(self.L, Knew_N.T) return matmul(Knew_N, alpha), np.diag( Knew_new + self.likelihood_variance - matmul(v.T, v) ).reshape(m,1) else: Knew_N,_ = self.K(Xnew, self.X, self.lengthscale) return matmul(Knew_N, alpha) ############################### # Gradient of predictive mean # ############################### def predictive_grad(self, Xnew): alpha = solve(self.L.T, solve(self.L, self.Y*self.yscale ) ) Knew_N,_ = self.K(Xnew, self.X, self.lengthscale) return (-1/self.lengthscale**2)*matmul( np.tile(Xnew.T, self.n) - self.X.T, np.multiply(Knew_N.T, alpha) ) ``` ## 1D Toy example with missing data, gradient computation, likelihood surface plot ``` N = 30 Nt = 400 X = np.linspace(-4,5,N).reshape(N,1); # We can pick out some values to illustrate how the uncertainty estimate behaves # it's interesting to see what happens to likelihood below ind = np.bool8([1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1]); X = X[ ind ]; N=N-sum(~ind) Xt = np.linspace(-4,5,Nt).reshape(Nt,1) Y = np.sin(X)*np.exp(0.2*X) + np.random.randn(N,1)*0.3 t0 = time.time() m = mintGP() m.fit(X,Y) print( m.lengthscale, m.likelihood_variance ) pred, var = m.predict(Xt,predvar=True) t1 = time.time() print('time to compute ',t1-t0) fig, ax = plt.subplots() ax.plot(Xt, pred, label="GP mean") twostd = 2*np.sqrt(var) ax.fill_between(Xt.ravel(), (pred-twostd).ravel(), (pred+twostd).ravel(), alpha=0.5) ax.scatter(X,Y,label='data') ax.legend(loc='best') ``` ### Gradient ``` grad = [ m.predictive_grad(x.reshape(1,1)) for x in Xt ] grad = np.array(grad) fig, ax = plt.subplots() ax.plot(Xt, grad.ravel(), label="GP deriv") ax.plot([-4,5], [0,0], label="GP deriv") ``` ### Likelihood Surface ``` #### Plot LML landscape plt.figure(1) M = 30 theta0 = np.logspace(-0.3, 0.4,M)#np.logspace(-1, 1, M) theta1 = np.logspace(-1.5, 0, M)#np.logspace(-2.5, 0, M) Theta0, Theta1 = np.meshgrid(theta0, theta1) LML = [[m.neg_log_marg_like([Theta0[i, j], Theta1[i, j]])[0] for i in range(M)] for j in range(M)] LML = np.array(LML).T vmin, vmax = (LML).min(), (LML).max() vmax = 50 level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), 50), decimals=1) plt.contour(Theta0, Theta1, LML, levels=level, norm=colors.LogNorm(vmin=vmin, vmax=vmax)) plt.colorbar() plt.xscale("log") plt.yscale("log") plt.xlabel("Length-scale") plt.ylabel("Noise-level") plt.title("neg-log-marginal-likelihood") plt.tight_layout() ``` ### likehood surface gradient ``` import sympy # Plot LML landscape plt.figure(1) LML_grad = [[ m.neg_log_marg_like([Theta0[i, j], Theta1[i, j]])[1] for i in range(M)] for j in range(M)] LML_grad = -np.array(LML_grad).T plt.figure() plt.quiver(Theta0,Theta1,LML_grad[0],LML_grad[1]) plt.xscale("log") plt.yscale("log") plt.show() ``` ## 2D toy example ``` N = 100 # training data Nt = 400 X1 = np.random.uniform(-5,5,size = (N,1)) #np.linspace(-4,5,N).reshape(N,1) X2 = np.random.uniform(-5,5,size = (N,1)) X = np.concatenate([X1,X2],1) # test data Xt = np.concatenate([np.linspace(-4,5,Nt).reshape(Nt,1), np.linspace(-4,5,Nt).reshape(Nt,1)], 1) Y = X1**2 + X2**2 + np.random.randn(N,1)*0.3 t0 = time.time() m = mintGP() m.fit(X,Y) print( m.lengthscale, m.likelihood_variance ) #pred, var = m.predict(Xt,predvar=True) t1 = time.time() print('time to compute ',t1-t0) M = 50 grid = np.linspace(-5,5,M).reshape(M,1) XX1,XX2 = np.meshgrid(grid,grid) Z = [[m.predict( np.array([XX1[i,j], XX2[i,j] ]).reshape(1,2) )[0,0] for i in range(M)] for j in range(M)] Z = np.array(Z) # plot points and fitted surface fig = plt.figure() ax = fig.gca(projection='3d') ax.plot_surface(XX1, XX2, Z, rstride=1, cstride=1, alpha=0.2) ax.scatter(X1, X2, Y, c='r', s=30) plt.xlabel('X') plt.ylabel('Y') ax.set_zlabel('Z') ax.axis('equal') #ax.axis('tight') ``` ### Gradient as colorcode like in Emans sw33t pløts ``` # Color function by norm of gradient fig = plt.figure(figsize=[8,5]) ax = fig.gca(projection='3d') my_col = cm.jet( gradnorm / np.amax(gradnorm) ) cbar = cm.ScalarMappable(cmap=cm.jet) cbar.set_array(my_col) surf = ax.plot_surface(XX1, XX2, Z, rstride=2, cstride=2, alpha=0.5, facecolors = my_col, linewidth = 0 ) fig.colorbar(cbar, shrink=0.8, aspect=8) ``` # Tensordot experiments for gradient function to be able to take NxD arrays ``` a = np.arange(60.).reshape(3,4,5) b = np.arange(24.).reshape(4,3,2) c = np.tensordot(a,b, axes=([1,0],[0,1])) c.shape c # A slower but equivalent way of computing the same d = np.zeros((5,2)) for i in range(5): for j in range(2): for k in range(3): for n in range(4): d[i,j] += a[k,n,i] * b[n,k,j] c == d xnew = np.array([[4,9],[1,8]], ndmin=2) ```
github_jupyter
#Object Detection Framework ``` # If you forked the repository, you can replace the link. repo_url = 'https://github.com/PramukaWeerasinghe/object_detection_demo' # Number of training steps. num_steps = 1000 # 200000 # Number of evaluation steps. num_eval_steps = 50 MODELS_CONFIG = { 'ssd_mobilenet_v2': { 'model_name': 'ssd_mobilenet_v2_coco_2018_03_29', 'pipeline_file': 'ssd_mobilenet_v2_coco.config', 'batch_size': 12 }, 'faster_rcnn_inception_v2': { 'model_name': 'faster_rcnn_inception_v2_coco_2018_01_28', 'pipeline_file': 'faster_rcnn_inception_v2_pets.config', 'batch_size': 12 }, 'rfcn_resnet101': { 'model_name': 'rfcn_resnet101_coco_2018_01_28', 'pipeline_file': 'rfcn_resnet101_pets.config', 'batch_size': 8 } } # Pick the model you want to use # Select a model in `MODELS_CONFIG`. selected_model = 'faster_rcnn_inception_v2' # Name of the object detection model to use. MODEL = MODELS_CONFIG[selected_model]['model_name'] # Name of the pipline file in tensorflow object detection API. pipeline_file = MODELS_CONFIG[selected_model]['pipeline_file'] # Training batch size fits in Colabe's Tesla K80 GPU memory for selected model. batch_size = MODELS_CONFIG[selected_model]['batch_size'] ``` ## Clone the `object_detection_demo` repository or your fork. ``` import os %cd /content repo_dir_path = os.path.abspath(os.path.join('.', os.path.basename(repo_url))) !git clone {repo_url} %cd {repo_dir_path} !git pull %tensorflow_version 1.x import tensorflow as tf ``` ## Install required packages ``` %cd /content !git clone https://github.com/PramukaWeerasinghe/models !apt-get install -qq protobuf-compiler python-pil python-lxml python-tk !pip install -q Cython contextlib2 pillow lxml matplotlib !pip install -q pycocotools %cd /content/models/research !protoc object_detection/protos/*.proto --python_out=. import os os.environ['PYTHONPATH'] = '/content/models/research:/content/models/research/slim:' + os.environ['PYTHONPATH'] !python object_detection/builders/model_builder_test.py ``` ## Prepare `tfrecord` files Use the following scripts to generate the `tfrecord` files. ```bash # Convert train folder annotation xml files to a single csv file, # generate the `label_map.pbtxt` file to `data/` directory as well. python xml_to_csv.py -i data/images/train -o data/annotations/train_labels.csv -l data/annotations # Convert test folder annotation xml files to a single csv. python xml_to_csv.py -i data/images/test -o data/annotations/test_labels.csv # Generate `train.record` python generate_tfrecord.py --csv_input=data/annotations/train_labels.csv --output_path=data/annotations/train.record --img_path=data/images/train --label_map data/annotations/label_map.pbtxt # Generate `test.record` python generate_tfrecord.py --csv_input=data/annotations/test_labels.csv --output_path=data/annotations/test.record --img_path=data/images/test --label_map data/annotations/label_map.pbtxt ``` ``` %cd {repo_dir_path} # Convert train folder annotation xml files to a single csv file, # generate the `label_map.pbtxt` file to `data/` directory as well. !python xml_to_csv.py -i data/images/train -o data/annotations/train_labels.csv -l data/annotations # Convert test folder annotation xml files to a single csv. !python xml_to_csv.py -i data/images/test -o data/annotations/test_labels.csv # Generate `train.record` !python generate_tfrecord.py --csv_input=data/annotations/train_labels.csv --output_path=data/annotations/train.record --img_path=data/images/train --label_map data/annotations/label_map.pbtxt # Generate `test.record` !python generate_tfrecord.py --csv_input=data/annotations/test_labels.csv --output_path=data/annotations/test.record --img_path=data/images/test --label_map data/annotations/label_map.pbtxt test_record_fname = '/content/object_detection_demo/data/annotations/test.record' train_record_fname = '/content/object_detection_demo/data/annotations/train.record' label_map_pbtxt_fname = '/content/object_detection_demo/data/annotations/label_map.pbtxt' ``` ## Download base model ``` %cd /content/models/research import os import shutil import glob import urllib.request import tarfile MODEL_FILE = MODEL + '.tar.gz' DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' DEST_DIR = '/content/models/research/pretrained_model' if not (os.path.exists(MODEL_FILE)): urllib.request.urlretrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE) tar = tarfile.open(MODEL_FILE) tar.extractall() tar.close() os.remove(MODEL_FILE) if (os.path.exists(DEST_DIR)): shutil.rmtree(DEST_DIR) os.rename(MODEL, DEST_DIR) !echo {DEST_DIR} !ls -alh {DEST_DIR} fine_tune_checkpoint = os.path.join(DEST_DIR, "model.ckpt") fine_tune_checkpoint ``` ## Configuring a Training Pipeline ``` import os pipeline_fname = os.path.join('/content/models/research/object_detection/samples/configs/', pipeline_file) assert os.path.isfile(pipeline_fname), '`{}` not exist'.format(pipeline_fname) def get_num_classes(pbtxt_fname): from object_detection.utils import label_map_util label_map = label_map_util.load_labelmap(pbtxt_fname) categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=90, use_display_name=True) category_index = label_map_util.create_category_index(categories) return len(category_index.keys()) import re num_classes = get_num_classes(label_map_pbtxt_fname) with open(pipeline_fname) as f: s = f.read() with open(pipeline_fname, 'w') as f: # fine_tune_checkpoint s = re.sub('fine_tune_checkpoint: ".*?"', 'fine_tune_checkpoint: "{}"'.format(fine_tune_checkpoint), s) # tfrecord files train and test. s = re.sub( '(input_path: ".*?)(train.record)(.*?")', 'input_path: "{}"'.format(train_record_fname), s) s = re.sub( '(input_path: ".*?)(val.record)(.*?")', 'input_path: "{}"'.format(test_record_fname), s) # label_map_path s = re.sub( 'label_map_path: ".*?"', 'label_map_path: "{}"'.format(label_map_pbtxt_fname), s) # Set training batch_size. s = re.sub('batch_size: [0-9]+', 'batch_size: {}'.format(batch_size), s) # Set training steps, num_steps s = re.sub('num_steps: [0-9]+', 'num_steps: {}'.format(num_steps), s) # Set number of classes num_classes. s = re.sub('num_classes: [0-9]+', 'num_classes: {}'.format(num_classes), s) f.write(s) !cat {pipeline_fname} !pwd model_dir = 'training/' # Optionally remove content in output model directory to fresh start. !rm -rf {model_dir} os.makedirs(model_dir, exist_ok=True) ``` ## Run Tensorboard(Optional) ``` !wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip !unzip -o ngrok-stable-linux-amd64.zip LOG_DIR = model_dir get_ipython().system_raw( 'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &' .format(LOG_DIR) ) get_ipython().system_raw('./ngrok http 6006 &') ``` ### Get Tensorboard link ``` ! curl -s http://localhost:4040/api/tunnels | python3 -c \ "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])" ``` ## Train the model ``` !python /content/models/research/object_detection/model_main.py \ --pipeline_config_path={pipeline_fname} \ --model_dir={model_dir} \ --alsologtostderr \ --num_train_steps={num_steps} \ --num_eval_steps={num_eval_steps} !ls {model_dir} ``` ## Exporting a Trained Inference Graph Once your training job is complete, you need to extract the newly trained inference graph, which will be later used to perform the object detection. This can be done as follows: ``` import re import numpy as np output_directory = './fine_tuned_final_model' lst = os.listdir(model_dir) lst = [l for l in lst if 'model.ckpt-' in l and '.meta' in l] steps=np.array([int(re.findall('\d+', l)[0]) for l in lst]) last_model = lst[steps.argmax()].replace('.meta', '') last_model_path = os.path.join(model_dir, last_model) print(last_model_path) !python /content/models/research/object_detection/export_inference_graph.py \ --input_type=image_tensor \ --pipeline_config_path={pipeline_fname} \ --output_directory={output_directory} \ --trained_checkpoint_prefix={last_model_path} !ls {output_directory} ``` ## Download the model `.pb` file ``` import os pb_fname = os.path.join(os.path.abspath(output_directory), "frozen_inference_graph.pb") assert os.path.isfile(pb_fname), '`{}` not exist'.format(pb_fname) !ls -alh {pb_fname} ``` ### Option1 : upload the `.pb` file to your Google Drive Then download it from your Google Drive to local file system. During this step, you will be prompted to enter the token. ``` ''' # Install the PyDrive wrapper & import libraries. # This only needs to be done once in a notebook. !pip install -U -q PyDrive from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from google.colab import auth from oauth2client.client import GoogleCredentials # Authenticate and create the PyDrive client. # This only needs to be done once in a notebook. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) fname = os.path.basename(pb_fname) # Create & upload a text file. uploaded = drive.CreateFile({'title': fname}) uploaded.SetContentFile(pb_fname) uploaded.Upload() print('Uploaded file with ID {}'.format(uploaded.get('id'))) ''' ``` ### Option2 : Download the `.pb` file directly to your local file system This method may not be stable when downloading large files like the model `.pb` file. Try **option 1** instead if not working. ``` ''' from google.colab import files files.download(pb_fname) ''' ``` ### Download the `label_map.pbtxt` file ``` ''' from google.colab import files files.download(label_map_pbtxt_fname) ''' ``` ### Download the modified pipline file If you plan to use OpenVINO toolkit to convert the `.pb` file to inference faster on Intel's hardware (CPU/GPU, Movidius, etc.) ``` ''' files.download(pipeline_fname) ''' ``` ## Run inference test Test with images in repository `object_detection_demo/test` directory. ``` !sudo apt install tesseract-ocr !pip install pytesseract %cd /content/object_detection_demo/ import wabtec_track import os import glob # Path to frozen detection graph. This is the actual model that is used for the object detection. PATH_TO_CKPT = pb_fname # List of the strings that is used to add correct label for each box. PATH_TO_LABELS = label_map_pbtxt_fname # If you want to test the code with your images, just add images files to the PATH_TO_TEST_IMAGES_DIR. PATH_TO_TEST_IMAGES_DIR = os.path.join(repo_dir_path, "test") assert os.path.isfile(pb_fname) assert os.path.isfile(PATH_TO_LABELS) TEST_IMAGE_PATHS = glob.glob(os.path.join(PATH_TO_TEST_IMAGES_DIR, "*.*")) assert len(TEST_IMAGE_PATHS) > 0, 'No image found in `{}`.'.format(PATH_TO_TEST_IMAGES_DIR) print(TEST_IMAGE_PATHS) %cd /content/models/research/object_detection import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import pytesseract from pytesseract import Output import shutil import cv2 import zipfile import pandas as pd import wabtec_track from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image from google.colab import files # This is needed since the notebook is stored in the object_detection folder. sys.path.append("..") from object_detection.utils import ops as utils_ops # This is needed to display the images. %matplotlib inline from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util ''' To Visualize Initiate the following array Array Order ["Switch","Track","Left signal","Right Signal", "all"] Ex: If you want bounding boxes only on switches and Tracks array should be [1,1,0,0] ''' visualize = [1,1,1,1,0] detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') label_map = label_map_util.load_labelmap(PATH_TO_LABELS) categories = label_map_util.convert_label_map_to_categories( label_map, max_num_classes=num_classes, use_display_name=True) category_index = label_map_util.create_category_index(categories) s_id = [] milepost = [] s_type = [] def load_image_into_numpy_array(image): (im_width, im_height) = image.size return np.array(image.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) # Size, in inches, of the output images. IMAGE_SIZE = (12, 8) #thresholding def thresholding(image): return cv2.threshold(image, 60, 255, cv2.THRESH_BINARY) # get grayscale image def get_grayscale(image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # preprocessing def switch_preprocess(crop_image): kernel = np.ones((3,3),np.uint8) gray = cv2.cvtColor(crop_img,cv2.COLOR_BGR2GRAY) (thresh, gray) = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) inverse = cv2.bitwise_not(gray) closed = cv2.morphologyEx(inverse, cv2.MORPH_CLOSE, kernel) #dilation = cv2.dilate(inverse,kernel,iterations = 1) dil_inv = cv2.bitwise_not(closed) opened = cv2.morphologyEx(dil_inv, cv2.MORPH_OPEN, kernel) #opened = cv2.dilate(thresh, kernel, iterations = 1) custom_config = r'--oem 3 --psm 6' img = Image.fromarray(opened) sid = pytesseract.image_to_string(img, config=custom_config) return sid def milestone_preprocess(crop_image): gray = cv2.cvtColor(r_img,cv2.COLOR_RGB2GRAY) kernel = np.ones((3,3),np.uint8) im = np.array(gray * 255, dtype = np.uint8) (thresh, gray) = cv2.threshold(gray, 40, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) #opened = cv2.dilate(thresh, kernel, iterations = 1) custom_config = r'--oem 3 --psm 6' img = Image.fromarray(gray) milestone_id = pytesseract.image_to_string(img, config=custom_config) #print(pytesseract.image_to_string(img, config=custom_config)) return milestone_id #Draw Bounding boxes def drawBoundingBoxes(xmin,ymin,xmax,ymax,r,g,b,t): x1,y1,x2,y2 = np.int64(xmin * im_width), np.int64(ymin * im_height), np.int64(xmax * im_width), np.int64(ymax * im_height) cv2.rectangle(image_np, (x1, y1), (x2, y2), (r, g, b), t) #Rn Inference on single image def run_inference_for_single_image(image, graph): with graph.as_default(): with tf.Session() as sess: # Get handles to input and output tensors ops = tf.get_default_graph().get_operations() all_tensor_names = { output.name for op in ops for output in op.outputs} tensor_dict = {} for key in [ 'num_detections', 'detection_boxes', 'detection_scores', 'detection_classes', 'detection_masks' ]: tensor_name = key + ':0' if tensor_name in all_tensor_names: tensor_dict[key] = tf.get_default_graph().get_tensor_by_name( tensor_name) if 'detection_masks' in tensor_dict: # The following processing is only for single image detection_boxes = tf.squeeze( tensor_dict['detection_boxes'], [0]) detection_masks = tf.squeeze( tensor_dict['detection_masks'], [0]) # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size. real_num_detection = tf.cast( tensor_dict['num_detections'][0], tf.int32) detection_boxes = tf.slice(detection_boxes, [0, 0], [ real_num_detection, -1]) detection_masks = tf.slice(detection_masks, [0, 0, 0], [ real_num_detection, -1, -1]) detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks( detection_masks, detection_boxes, image.shape[0], image.shape[1]) detection_masks_reframed = tf.cast( tf.greater(detection_masks_reframed, 0.5), tf.uint8) # Follow the convention by adding back the batch dimension tensor_dict['detection_masks'] = tf.expand_dims( detection_masks_reframed, 0) image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0') # Run inference output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)}) # all outputs are float32 numpy arrays, so convert types as appropriate output_dict['num_detections'] = int( output_dict['num_detections'][0]) output_dict['detection_classes'] = output_dict[ 'detection_classes'][0].astype(np.uint8) output_dict['detection_boxes'] = output_dict['detection_boxes'][0] output_dict['detection_scores'] = output_dict['detection_scores'][0] if 'detection_masks' in output_dict: output_dict['detection_masks'] = output_dict['detection_masks'][0] return output_dict for image_path in TEST_IMAGE_PATHS: image = Image.open(image_path) # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = load_image_into_numpy_array(image) #Image to crop labels image_to_crop = load_image_into_numpy_array(image) # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. output_dict = run_inference_for_single_image(image_np, detection_graph) # Visualization of the results of a detection.4 #Obtaining detection boxes, classes and detection scores boxes = np.squeeze(output_dict['detection_boxes']) scores = np.squeeze(output_dict['detection_scores']) classes = np.squeeze(output_dict['detection_classes']) #set a min thresh score ######## min_score_thresh = 0.25 ######## #Filtering the bounding boxes bboxes = boxes[scores > min_score_thresh] d_classes = classes[scores > min_score_thresh] switch_boxes = bboxes[d_classes == 8] milepost_boxes = bboxes[d_classes == 4] signal_boxes = bboxes[d_classes == 5] crossover_boxes = bboxes[d_classes == 1] crossoverLabel_boxes = bboxes[d_classes == 2] electSwitch_boxes = bboxes[d_classes == 3] #get image size im_width, im_height = image.size final_box = [] for box in bboxes: ymin, xmin, ymax, xmax = box final_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) #print(final_box) if(visualize[0] == 1 or visualize[4] == 1): sw_box = [] for box in switch_boxes: ymin, xmin, ymax, xmax = box sw_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) drawBoundingBoxes(xmin,ymin,xmax,ymax,256,100,25,2) if(visualize[1] == 1 or visualize[4] == 1): signal_box = [] for box in signal_boxes: ymin, xmin, ymax, xmax = box signal_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) drawBoundingBoxes(xmin,ymin,xmax,ymax,0,255,255,2) if(visualize[2] == 1 or visualize[4] == 1): crossover_box = [] for box in crossover_boxes: ymin, xmin, ymax, xmax = box crossover_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) drawBoundingBoxes(xmin,ymin,xmax,ymax,0,0,255,2) if(visualize[3] == 1 or visualize[4] == 1): crossoverLabel_box = [] for box in crossoverLabel_boxes: ymin, xmin, ymax, xmax = box crossoverLabel_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) drawBoundingBoxes(xmin,ymin,xmax,ymax,255,0,0,2) if(visualize[4] == 1 or visualize[4] == 1): electSwitch_box = [] for box in electSwitch_boxes: ymin, xmin, ymax, xmax = box electSwitch_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) drawBoundingBoxes(xmin,ymin,xmax,ymax,120,25,35,2) m_box = [] for box in milepost_boxes: ymin, xmin, ymax, xmax = box m_box.append([xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]) drawBoundingBoxes(xmin,ymin,xmax,ymax,0,0,256,2) #switch if(visualize[0] == 1): for box in sw_box: ymin, xmin, ymax, xmax = box y,h,x,w = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) crop_img = image_to_crop[h-10:w+10,y-10:x+10] plt.figure(figsize=(1,2)) #plt.imshow(crop_img) for b in m_box: ymin, xmin, ymax, xmax = b a,b,c,d = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) val = (y+x)/2 mid_milepost_x = np.int64((a+c)/2) mid_milepost_y = np.int64((b+d)/2) if(a<val and val<c ): if(h>b): cv2.line(image_np, (y,h), (mid_milepost_x,d), (256,100,25), 2) else: cv2.line(image_np, (y,w), (mid_milepost_x,b), (256,100,25), 2) crop_ml = image_to_crop[b-5:d+5,a+5:c-5] r_img = cv2.rotate(crop_ml, cv2.ROTATE_90_CLOCKWISE) plt.figure(figsize=(3,6)) if '\n' in milestone_preprocess(r_img): milepost.append(milestone_preprocess(r_img).split('\ ')[4:14]) else: milepost.append(milestone_preprocess(r_img)[5:14]) s_id.append(switch_preprocess(crop_img)) s_type.append("switch") #plt.imshow(r_img,cmap='gray') #Corssover Label for box in crossover_box: ymin, xmin, ymax, xmax = box y,h,x,w = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) crop_img = image_to_crop[h-10:w+10,y-10:x+10] #plt.figure(figsize=(1,2)) #plt.imshow(crop_img) for b in m_box: ymin, xmin, ymax, xmax = b a,b,c,d = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) val = (y+x)/2 mid_milepost_x = np.int64((a+c)/2) mid_milepost_y = np.int64((b+d)/2) if(a<val and val<c ): if(h>b): cv2.line(image_np, (y,h), (mid_milepost_x,d), (256,100,25), 2) else: cv2.line(image_np, (y,w), (mid_milepost_x,b), (256,100,25), 2) crop_ml = image_to_crop[b-5:d+5,a+5:c-5] r_img = cv2.rotate(crop_ml, cv2.ROTATE_90_CLOCKWISE) plt.figure(figsize=(3,6)) if '\n' in milestone_preprocess(r_img): milepost.append(milestone_preprocess(r_img).split('\ ')[4:14]) else: milepost.append(milestone_preprocess(r_img)[5:14]) s_id.append(switch_preprocess(crop_img)) s_type.append("Cross Over") #plt.imshow(r_img,cmap='gray') #Signal for box in signal_box: ymin, xmin, ymax, xmax = box y,h,x,w = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) crop_img = image_to_crop[h-10:w+10,y-10:x+10] #plt.figure(figsize=(1,2)) #plt.imshow(crop_img) for b in m_box: ymin, xmin, ymax, xmax = b a,b,c,d = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) val = (y+x)/2 v = np.int64(val) mid_milepost_x = np.int64((a+c)/2) mid_milepost_y = np.int64((b+d)/2) if(a<val and val<c ): if(h>b): cv2.line(image_np, (v,h), (mid_milepost_x,d), (256,100,25), 2) else: cv2.line(image_np, (v,w), (mid_milepost_x,b), (256,100,25), 2) crop_ml = image_to_crop[b-5:d+5,a+5:c-5] r_img = cv2.rotate(crop_ml, cv2.ROTATE_90_CLOCKWISE) plt.figure(figsize=(3,6)) if '\n' in milestone_preprocess(r_img): milepost.append(milestone_preprocess(r_img).split('\ ')[4:14]) else: milepost.append(milestone_preprocess(r_img)[5:14]) s_id.append(switch_preprocess(crop_img)) s_type.append("Signal") #plt.imshow(r_img,cmap='gray') # #left signal # if(visualize[2] == 1): # for box in ls_box: # ymin, xmin, ymax, xmax = box # y,h,x,w = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) # crop_img = image_to_crop[h:w,y:x] # plt.imshow(crop_img) # for b in m_box: # ymin, xmin, ymax, xmax = b # a,b,c,d = np.int64(ymin), np.int64(ymax),np.int64(xmin), np.int64(xmax) # val = x-5 # if(a<val and val<c ): # crop_ml = image_np[b-15:d+15,a-15:c+15] # r_img = cv2.rotate(crop_ml, cv2.ROTATE_90_CLOCKWISE) # plt.figure(figsize=(3,6)) # #print(pytesseract.image_to_string(img, config=custom_config)) # s_id.append(sid) # s_type.append("left Signal") # if '\n' in milestone_id: # milepost.append(milestone_preprocess(r_img).split('\ ')[4:14]) # else: # milepost.append(milestone_preprocess(r_img)[5:14]) # #plt.imshow(img,cmap='gray') if(visualize[1]==1 or visualize[4] == 1): t_img = cv2.imwrite('color_img.jpg', image_np) wtt = wabtec_track.WabTecTrack(file_name='color_img.jpg') tracks = wtt.get_lines() for track in tracks: (x1,y1) = track.point_one() (x2,y2) = track.point_two() cv2.rectangle(image_np, (x1-10, y1-10), (x2+10, y2+10), (128, 0, 128), 2) #print(category_index) #print(d_classes) #print(m_box) plt.figure(figsize=IMAGE_SIZE) plt.imshow(image_np) dict = {'type': s_type, 'id':s_id, 'milepost':milepost} df = pd.DataFrame(dict) print(df) # df.to_csv (r'extracted_data.csv', index = False, header=True) # files.download('extracted_data.csv') ```
github_jupyter
## Interobserver Variability This Notebook demonstrates how to compute the interobserver variability of your Atlas data. ``` import os import sys import gc import re import time sys.path.append('../../..') import pandas as pd import SimpleITK as sitk from loguru import logger # Format the output a bit nicer for Jupyter logger.remove() logger.add(sys.stdout, format="{time:YYYY-MM-DD HH:mm:ss} {level} {message}", level="DEBUG") data_path = './data' working_path = "./working" if not os.path.exists(working_path): os.makedirs(working_path) # Read the data into a dictionary data = {} for root, dirs, files in os.walk(data_path, topdown=False): if root == data_path: continue case = root.split('/')[-1] data[case] = {} for f in files: file_path = os.path.join(root, f) name = f.split('.')[0].upper() # Clean up names with double underscore: name = name.replace('__','_') observer = None matches = re.findall(r"(.*)_([0-9])", f.split('.')[0]) if len(matches) > 0: name = matches[0][0].upper() observer = matches[0][1] if observer: if name in data[case]: data[case][name][observer] = file_path else: data[case][name] = {observer: file_path} else: data[case][name] = file_path ``` ### Compute the interobserver variability for each case ``` df_inter_ob_var_file = os.path.join(working_path, "df_inter_ob_var.pkl") # If already computed, read the data from a file if os.path.exists(df_inter_ob_var_file): print(f'Reading from file: {df_inter_ob_var_file}') df_inter_ob_var = pd.read_pickle(df_inter_ob_var_file) else: inter_observe_var = [] for c in data: for s in data[c]: if not s.startswith('STRUCT_'): continue for o1 in data[c][s]: for o2 in data[c][s]: if o1==o2: continue mask_1 = sitk.ReadImage(data[c][s][o1]) mask_2 = sitk.ReadImage(data[c][s][o2]) lomif = sitk.LabelOverlapMeasuresImageFilter() lomif.Execute(mask_1, mask_2) hdif = sitk.HausdorffDistanceImageFilter() hdif.Execute(mask_1, mask_2) dce = lomif.GetDiceCoefficient() hmax = hdif.GetHausdorffDistance() havg = hdif.GetAverageHausdorffDistance() row = {'o1': o1, 'o2': o2, 'case': c, 'struct': s, 'dce': dce, 'hausdorff_max': hmax, 'hausdorff_avg': havg } inter_observe_var.append(row) df_inter_ob_var = pd.DataFrame(inter_observe_var) print(f'Saving to file: {df_inter_ob_var_file}') df_inter_ob_var.to_pickle(df_inter_ob_var_file) ``` ### Output the results ``` df_inter = df_inter_ob_var.groupby(['struct']).aggregate(['mean', 'std', 'min', 'max']) df_inter = df_inter[['dce','hausdorff_max','hausdorff_avg']] df_inter ```
github_jupyter
# VQ-VAE WaveRNN [![Generic badge](https://img.shields.io/badge/vqvaevc--PyTorch-9cf.svg)][github] [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)][notebook] Reimplmentation of VQ-VAE WaveRNN Author: [tarepan] [github]:https://github.com/tarepan/vqvaevc [notebook]:https://colab.research.google.com/github/tarepan/vqvaevc/blob/main/vqvaevc.ipynb [tarepan]:https://github.com/tarepan ## Colab Check Check - Google Colaboratory runnning time - GPU type - Python version - CUDA version ``` !cat /proc/uptime | awk '{print $1 /60 /60 /24 "days (" $1 "sec)"}' !head -n 1 /proc/driver/nvidia/gpus/**/information !cat /usr/local/cuda/version.txt ``` ## Setup Activate notebook intermittently for long session (RUN once **by hand**) ```javascript const refresher = setInterval(()=>{document.querySelector("colab-connect-button").click();console.log("clicked for long session");}, 1000*60*10); ``` Clone repository from `tarepan/vqvaevc` ``` # GoogleDrive from google.colab import drive drive.mount('/content/gdrive') # clone repository !git clone https://github.com/tarepan/vqvaevc.git %cd ./vqvaevc ``` Prepare dataset ``` # !pip install torchaudio==0.7.0 # from torchaudio.datasets.utils import download_url # # Download and extract corpus # !mkdir ../data # download_url("http://www.udialogue.org/download/VCTK-Corpus.tar.gz", "../data") # !tar -xvf ../data/VCTK-Corpus.tar.gz # # Preprocess corpus into dataset # !python preprocess_multispeaker.py ./VCTK-Corpus/wav48 ./dataset # !cp -r ./dataset ../gdrive/MyDrive/ML_data/datasets/VCTK_processed # Copy dataset from storage (Google Drive) !cp -r ../gdrive/MyDrive/ML_data/datasets/VCTK_processed/dataset . ``` ## Training ``` !python wavernn.py --multi_speaker_data_path ./dataset ``` ## Training Optimization ### whole ``` # num_worker x pinmemory !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=0 !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=1 !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=2 !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=4 !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=0 --no_pin_memory !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=1 --no_pin_memory !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=2 --no_pin_memory !python -m scyclonepytorch.main_train --max_epochs=15 --num_workers=4 --no_pin_memory ``` ### num_worker ``` !python -m scyclonepytorch.main_train --max_epochs=5 --num_workers=0 !python -m scyclonepytorch.main_train --max_epochs=5 --num_workers=1 !python -m scyclonepytorch.main_train --max_epochs=5 --num_workers=2 !python -m scyclonepytorch.main_train --max_epochs=5 --num_workers=4 ``` ### pin_memory ``` !python -m scyclonepytorch.main_train --max_epochs=5 --num_workers=2 !python -m scyclonepytorch.main_train --max_epochs=5 --num_workers=2 --no_pin_memory ``` ### Profiling ``` !python -m scyclonepytorch.main_train --profiler --max_epochs=5 --num_workers=2 --no_pin_memory # profile mode # # Usage stat # ## GPU # !nvidia-smi -l 3 # ## CPU # !vmstat 5 # !top ```
github_jupyter
# Convolutional Networks So far we have worked with deep fully-connected networks, using them to explore different optimization strategies and network architectures. Fully-connected networks are a good testbed for experimentation because they are very computationally efficient, but in practice all state-of-the-art results use convolutional networks instead. First you will implement several layer types that are used in convolutional networks. You will then use these layers to train a convolutional network on the CIFAR-10 dataset. ``` # As usual, a bit of setup from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.cnn import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient_array, eval_numerical_gradient from cs231n.layers import * from cs231n.fast_layers import * from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.items(): print('%s: ' % k, v.shape) ``` # Convolution: Naive forward pass The core of a convolutional network is the convolution operation. In the file `cs231n/layers.py`, implement the forward pass for the convolution layer in the function `conv_forward_naive`. You don't have to worry too much about efficiency at this point; just write the code in whatever way you find most clear. You can test your implementation by running the following: ``` x_shape = (2, 3, 4, 4) w_shape = (3, 3, 4, 4) x = np.linspace(-0.1, 0.5, num=np.prod(x_shape)).reshape(x_shape) w = np.linspace(-0.2, 0.3, num=np.prod(w_shape)).reshape(w_shape) b = np.linspace(-0.1, 0.2, num=3) conv_param = {'stride': 2, 'pad': 1} out, _ = conv_forward_naive(x, w, b, conv_param) correct_out = np.array([[[[-0.08759809, -0.10987781], [-0.18387192, -0.2109216 ]], [[ 0.21027089, 0.21661097], [ 0.22847626, 0.23004637]], [[ 0.50813986, 0.54309974], [ 0.64082444, 0.67101435]]], [[[-0.98053589, -1.03143541], [-1.19128892, -1.24695841]], [[ 0.69108355, 0.66880383], [ 0.59480972, 0.56776003]], [[ 2.36270298, 2.36904306], [ 2.38090835, 2.38247847]]]]) # Compare your output to ours; difference should be around 2e-8 print('Testing conv_forward_naive') print('difference: ', rel_error(out, correct_out)) ``` # Aside: Image processing via convolutions As fun way to both check your implementation and gain a better understanding of the type of operation that convolutional layers can perform, we will set up an input containing two images and manually set up filters that perform common image processing operations (grayscale conversion and edge detection). The convolution forward pass will apply these operations to each of the input images. We can then visualize the results as a sanity check. ``` from scipy.misc import imread, imresize kitten, puppy = imread('kitten.jpg'), imread('puppy.jpg') # kitten is wide, and puppy is already square d = kitten.shape[1] - kitten.shape[0] kitten_cropped = kitten[:, d//2:-d//2, :] img_size = 200 # Make this smaller if it runs too slow x = np.zeros((2, 3, img_size, img_size)) x[0, :, :, :] = imresize(puppy, (img_size, img_size)).transpose((2, 0, 1)) x[1, :, :, :] = imresize(kitten_cropped, (img_size, img_size)).transpose((2, 0, 1)) # Set up a convolutional weights holding 2 filters, each 3x3 w = np.zeros((2, 3, 3, 3)) # The first filter converts the image to grayscale. # Set up the red, green, and blue channels of the filter. w[0, 0, :, :] = [[0, 0, 0], [0, 0.3, 0], [0, 0, 0]] w[0, 1, :, :] = [[0, 0, 0], [0, 0.6, 0], [0, 0, 0]] w[0, 2, :, :] = [[0, 0, 0], [0, 0.1, 0], [0, 0, 0]] # Second filter detects horizontal edges in the blue channel. w[1, 2, :, :] = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]] # Vector of biases. We don't need any bias for the grayscale # filter, but for the edge detection filter we want to add 128 # to each output so that nothing is negative. b = np.array([0, 128]) # Compute the result of convolving each input in x with each filter in w, # offsetting by b, and storing the results in out. out, _ = conv_forward_naive(x, w, b, {'stride': 1, 'pad': 1}) def imshow_noax(img, normalize=True): """ Tiny helper to show images as uint8 and remove axis labels """ if normalize: img_max, img_min = np.max(img), np.min(img) img = 255.0 * (img - img_min) / (img_max - img_min) plt.imshow(img.astype('uint8')) plt.gca().axis('off') # Show the original images and the results of the conv operation plt.subplot(2, 3, 1) imshow_noax(puppy, normalize=False) plt.title('Original image') plt.subplot(2, 3, 2) imshow_noax(out[0, 0]) plt.title('Grayscale') plt.subplot(2, 3, 3) imshow_noax(out[0, 1]) plt.title('Edges') plt.subplot(2, 3, 4) imshow_noax(kitten_cropped, normalize=False) plt.subplot(2, 3, 5) imshow_noax(out[1, 0]) plt.subplot(2, 3, 6) imshow_noax(out[1, 1]) plt.show() ``` # Convolution: Naive backward pass Implement the backward pass for the convolution operation in the function `conv_backward_naive` in the file `cs231n/layers.py`. Again, you don't need to worry too much about computational efficiency. When you are done, run the following to check your backward pass with a numeric gradient check. ``` np.random.seed(231) x = np.random.randn(4, 3, 5, 5) w = np.random.randn(2, 3, 3, 3) b = np.random.randn(2,) dout = np.random.randn(4, 2, 5, 5) conv_param = {'stride': 1, 'pad': 1} dx_num = eval_numerical_gradient_array(lambda x: conv_forward_naive(x, w, b, conv_param)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: conv_forward_naive(x, w, b, conv_param)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: conv_forward_naive(x, w, b, conv_param)[0], b, dout) out, cache = conv_forward_naive(x, w, b, conv_param) dx, dw, db = conv_backward_naive(dout, cache) # Your errors should be around 1e-8' print('Testing conv_backward_naive function') print('dx error: ', rel_error(dx, dx_num)) print('dw error: ', rel_error(dw, dw_num)) print('db error: ', rel_error(db, db_num)) ``` # Max pooling: Naive forward Implement the forward pass for the max-pooling operation in the function `max_pool_forward_naive` in the file `cs231n/layers.py`. Again, don't worry too much about computational efficiency. Check your implementation by running the following: ``` x_shape = (2, 3, 4, 4) x = np.linspace(-0.3, 0.4, num=np.prod(x_shape)).reshape(x_shape) pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2} out, _ = max_pool_forward_naive(x, pool_param) correct_out = np.array([[[[-0.26315789, -0.24842105], [-0.20421053, -0.18947368]], [[-0.14526316, -0.13052632], [-0.08631579, -0.07157895]], [[-0.02736842, -0.01263158], [ 0.03157895, 0.04631579]]], [[[ 0.09052632, 0.10526316], [ 0.14947368, 0.16421053]], [[ 0.20842105, 0.22315789], [ 0.26736842, 0.28210526]], [[ 0.32631579, 0.34105263], [ 0.38526316, 0.4 ]]]]) # Compare your output with ours. Difference should be around 1e-8. print('Testing max_pool_forward_naive function:') print('difference: ', rel_error(out, correct_out)) ``` # Max pooling: Naive backward Implement the backward pass for the max-pooling operation in the function `max_pool_backward_naive` in the file `cs231n/layers.py`. You don't need to worry about computational efficiency. Check your implementation with numeric gradient checking by running the following: ``` np.random.seed(231) x = np.random.randn(3, 2, 8, 8) dout = np.random.randn(3, 2, 4, 4) pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} dx_num = eval_numerical_gradient_array(lambda x: max_pool_forward_naive(x, pool_param)[0], x, dout) out, cache = max_pool_forward_naive(x, pool_param) dx = max_pool_backward_naive(dout, cache) # Your error should be around 1e-12 print('Testing max_pool_backward_naive function:') print('dx error: ', rel_error(dx, dx_num)) ``` # Fast layers Making convolution and pooling layers fast can be challenging. To spare you the pain, we've provided fast implementations of the forward and backward passes for convolution and pooling layers in the file `cs231n/fast_layers.py`. The fast convolution implementation depends on a Cython extension; to compile it you need to run the following from the `cs231n` directory: ```bash python setup.py build_ext --inplace ``` The API for the fast versions of the convolution and pooling layers is exactly the same as the naive versions that you implemented above: the forward pass receives data, weights, and parameters and produces outputs and a cache object; the backward pass recieves upstream derivatives and the cache object and produces gradients with respect to the data and weights. **NOTE:** The fast implementation for pooling will only perform optimally if the pooling regions are non-overlapping and tile the input. If these conditions are not met then the fast pooling implementation will not be much faster than the naive implementation. You can compare the performance of the naive and fast versions of these layers by running the following: ``` from cs231n.fast_layers import conv_forward_fast, conv_backward_fast from time import time np.random.seed(231) x = np.random.randn(100, 3, 31, 31) w = np.random.randn(25, 3, 3, 3) b = np.random.randn(25,) dout = np.random.randn(100, 25, 16, 16) conv_param = {'stride': 2, 'pad': 1} t0 = time() out_naive, cache_naive = conv_forward_naive(x, w, b, conv_param) t1 = time() out_fast, cache_fast = conv_forward_fast(x, w, b, conv_param) t2 = time() print('Testing conv_forward_fast:') print('Naive: %fs' % (t1 - t0)) print('Fast: %fs' % (t2 - t1)) print('Speedup: %fx' % ((t1 - t0) / (t2 - t1))) print('Difference: ', rel_error(out_naive, out_fast)) t0 = time() dx_naive, dw_naive, db_naive = conv_backward_naive(dout, cache_naive) t1 = time() dx_fast, dw_fast, db_fast = conv_backward_fast(dout, cache_fast) t2 = time() print('\nTesting conv_backward_fast:') print('Naive: %fs' % (t1 - t0)) print('Fast: %fs' % (t2 - t1)) print('Speedup: %fx' % ((t1 - t0) / (t2 - t1))) print('dx difference: ', rel_error(dx_naive, dx_fast)) print('dw difference: ', rel_error(dw_naive, dw_fast)) print('db difference: ', rel_error(db_naive, db_fast)) from cs231n.fast_layers import max_pool_forward_fast, max_pool_backward_fast np.random.seed(231) x = np.random.randn(100, 3, 32, 32) dout = np.random.randn(100, 3, 16, 16) pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} t0 = time() out_naive, cache_naive = max_pool_forward_naive(x, pool_param) t1 = time() out_fast, cache_fast = max_pool_forward_fast(x, pool_param) t2 = time() print('Testing pool_forward_fast:') print('Naive: %fs' % (t1 - t0)) print('fast: %fs' % (t2 - t1)) print('speedup: %fx' % ((t1 - t0) / (t2 - t1))) print('difference: ', rel_error(out_naive, out_fast)) t0 = time() dx_naive = max_pool_backward_naive(dout, cache_naive) t1 = time() dx_fast = max_pool_backward_fast(dout, cache_fast) t2 = time() print('\nTesting pool_backward_fast:') print('Naive: %fs' % (t1 - t0)) print('speedup: %fx' % ((t1 - t0) / (t2 - t1))) print('dx difference: ', rel_error(dx_naive, dx_fast)) ``` # Convolutional "sandwich" layers Previously we introduced the concept of "sandwich" layers that combine multiple operations into commonly used patterns. In the file `cs231n/layer_utils.py` you will find sandwich layers that implement a few commonly used patterns for convolutional networks. ``` from cs231n.layer_utils import conv_relu_pool_forward, conv_relu_pool_backward np.random.seed(231) x = np.random.randn(2, 3, 16, 16) w = np.random.randn(3, 3, 3, 3) b = np.random.randn(3,) dout = np.random.randn(2, 3, 8, 8) conv_param = {'stride': 1, 'pad': 1} pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} out, cache = conv_relu_pool_forward(x, w, b, conv_param, pool_param) dx, dw, db = conv_relu_pool_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda x: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: conv_relu_pool_forward(x, w, b, conv_param, pool_param)[0], b, dout) print('Testing conv_relu_pool') print('dx error: ', rel_error(dx_num, dx)) print('dw error: ', rel_error(dw_num, dw)) print('db error: ', rel_error(db_num, db)) from cs231n.layer_utils import conv_relu_forward, conv_relu_backward np.random.seed(231) x = np.random.randn(2, 3, 8, 8) w = np.random.randn(3, 3, 3, 3) b = np.random.randn(3,) dout = np.random.randn(2, 3, 8, 8) conv_param = {'stride': 1, 'pad': 1} out, cache = conv_relu_forward(x, w, b, conv_param) dx, dw, db = conv_relu_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda x: conv_relu_forward(x, w, b, conv_param)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: conv_relu_forward(x, w, b, conv_param)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: conv_relu_forward(x, w, b, conv_param)[0], b, dout) print('Testing conv_relu:') print('dx error: ', rel_error(dx_num, dx)) print('dw error: ', rel_error(dw_num, dw)) print('db error: ', rel_error(db_num, db)) ``` # Three-layer ConvNet Now that you have implemented all the necessary layers, we can put them together into a simple convolutional network. Open the file `cs231n/classifiers/cnn.py` and complete the implementation of the `ThreeLayerConvNet` class. Run the following cells to help you debug: ## Sanity check loss After you build a new network, one of the first things you should do is sanity check the loss. When we use the softmax loss, we expect the loss for random weights (and no regularization) to be about `log(C)` for `C` classes. When we add regularization this should go up. ``` model = ThreeLayerConvNet() N = 50 X = np.random.randn(N, 3, 32, 32) y = np.random.randint(10, size=N) loss, grads = model.loss(X, y) print('Initial loss (no regularization): ', loss) model.reg = 0.5 loss, grads = model.loss(X, y) print('Initial loss (with regularization): ', loss) ``` ## Gradient check After the loss looks reasonable, use numeric gradient checking to make sure that your backward pass is correct. When you use numeric gradient checking you should use a small amount of artifical data and a small number of neurons at each layer. Note: correct implementations may still have relative errors up to 1e-2. ``` num_inputs = 2 input_dim = (3, 16, 16) reg = 0.0 num_classes = 10 np.random.seed(231) X = np.random.randn(num_inputs, *input_dim) y = np.random.randint(num_classes, size=num_inputs) model = ThreeLayerConvNet(num_filters=3, filter_size=3, input_dim=input_dim, hidden_dim=7, dtype=np.float64) loss, grads = model.loss(X, y) for param_name in sorted(grads): f = lambda _: model.loss(X, y)[0] param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6) e = rel_error(param_grad_num, grads[param_name]) print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))) ``` ## Overfit small data A nice trick is to train your model with just a few training samples. You should be able to overfit small datasets, which will result in very high training accuracy and comparatively low validation accuracy. ``` np.random.seed(231) num_train = 100 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } model = ThreeLayerConvNet(weight_scale=1e-2) solver = Solver(model, small_data, num_epochs=15, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-4, }, verbose=True, print_every=1) solver.train() ``` Plotting the loss, training accuracy, and validation accuracy should show clear overfitting: ``` plt.subplot(2, 1, 1) plt.plot(solver.loss_history, 'o') plt.xlabel('iteration') plt.ylabel('loss') plt.subplot(2, 1, 2) plt.plot(solver.train_acc_history, '-o') plt.plot(solver.val_acc_history, '-o') plt.legend(['train', 'val'], loc='upper left') plt.xlabel('epoch') plt.ylabel('accuracy') plt.show() ``` ## Train the net By training the three-layer convolutional network for one epoch, you should achieve greater than 40% accuracy on the training set: ``` model = ThreeLayerConvNet(weight_scale=0.001, hidden_dim=500, reg=0.001) solver = Solver(model, data, num_epochs=1, batch_size=50, update_rule='adam', optim_config={ 'learning_rate': 1e-4, }, verbose=True, print_every=20) solver.train() ``` ## Visualize Filters You can visualize the first-layer convolutional filters from the trained network by running the following: ``` from cs231n.vis_utils import visualize_grid grid = visualize_grid(model.params['W1'].transpose(0, 2, 3, 1)) plt.imshow(grid.astype('uint8')) plt.axis('off') plt.gcf().set_size_inches(5, 5) plt.show() ``` # Spatial Batch Normalization We already saw that batch normalization is a very useful technique for training deep fully-connected networks. Batch normalization can also be used for convolutional networks, but we need to tweak it a bit; the modification will be called "spatial batch normalization." Normally batch-normalization accepts inputs of shape `(N, D)` and produces outputs of shape `(N, D)`, where we normalize across the minibatch dimension `N`. For data coming from convolutional layers, batch normalization needs to accept inputs of shape `(N, C, H, W)` and produce outputs of shape `(N, C, H, W)` where the `N` dimension gives the minibatch size and the `(H, W)` dimensions give the spatial size of the feature map. If the feature map was produced using convolutions, then we expect the statistics of each feature channel to be relatively consistent both between different imagesand different locations within the same image. Therefore spatial batch normalization computes a mean and variance for each of the `C` feature channels by computing statistics over both the minibatch dimension `N` and the spatial dimensions `H` and `W`. ## Spatial batch normalization: forward In the file `cs231n/layers.py`, implement the forward pass for spatial batch normalization in the function `spatial_batchnorm_forward`. Check your implementation by running the following: ``` np.random.seed(231) # Check the training-time forward pass by checking means and variances # of features both before and after spatial batch normalization N, C, H, W = 2, 3, 4, 5 x = 4 * np.random.randn(N, C, H, W) + 10 print('Before spatial batch normalization:') print(' Shape: ', x.shape) print(' Means: ', x.mean(axis=(0, 2, 3))) print(' Stds: ', x.std(axis=(0, 2, 3))) # Means should be close to zero and stds close to one gamma, beta = np.ones(C), np.zeros(C) bn_param = {'mode': 'train'} out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param) print('After spatial batch normalization:') print(' Shape: ', out.shape) print(' Means: ', out.mean(axis=(0, 2, 3))) print(' Stds: ', out.std(axis=(0, 2, 3))) # Means should be close to beta and stds close to gamma gamma, beta = np.asarray([3, 4, 5]), np.asarray([6, 7, 8]) out, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param) print('After spatial batch normalization (nontrivial gamma, beta):') print(' Shape: ', out.shape) print(' Means: ', out.mean(axis=(0, 2, 3))) print(' Stds: ', out.std(axis=(0, 2, 3))) np.random.seed(231) # Check the test-time forward pass by running the training-time # forward pass many times to warm up the running averages, and then # checking the means and variances of activations after a test-time # forward pass. N, C, H, W = 10, 4, 11, 12 bn_param = {'mode': 'train'} gamma = np.ones(C) beta = np.zeros(C) for t in range(50): x = 2.3 * np.random.randn(N, C, H, W) + 13 spatial_batchnorm_forward(x, gamma, beta, bn_param) bn_param['mode'] = 'test' x = 2.3 * np.random.randn(N, C, H, W) + 13 a_norm, _ = spatial_batchnorm_forward(x, gamma, beta, bn_param) # Means should be close to zero and stds close to one, but will be # noisier than training-time forward passes. print('After spatial batch normalization (test-time):') print(' means: ', a_norm.mean(axis=(0, 2, 3))) print(' stds: ', a_norm.std(axis=(0, 2, 3))) ``` ## Spatial batch normalization: backward In the file `cs231n/layers.py`, implement the backward pass for spatial batch normalization in the function `spatial_batchnorm_backward`. Run the following to check your implementation using a numeric gradient check: ``` np.random.seed(231) N, C, H, W = 2, 3, 4, 5 x = 5 * np.random.randn(N, C, H, W) + 12 gamma = np.random.randn(C) beta = np.random.randn(C) dout = np.random.randn(N, C, H, W) bn_param = {'mode': 'train'} fx = lambda x: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0] fg = lambda a: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0] fb = lambda b: spatial_batchnorm_forward(x, gamma, beta, bn_param)[0] dx_num = eval_numerical_gradient_array(fx, x, dout) da_num = eval_numerical_gradient_array(fg, gamma, dout) db_num = eval_numerical_gradient_array(fb, beta, dout) _, cache = spatial_batchnorm_forward(x, gamma, beta, bn_param) dx, dgamma, dbeta = spatial_batchnorm_backward(dout, cache) print('dx error: ', rel_error(dx_num, dx)) print('dgamma error: ', rel_error(da_num, dgamma)) print('dbeta error: ', rel_error(db_num, dbeta)) ``` # Extra Credit Description If you implement any additional features for extra credit, clearly describe them here with pointers to any code in this or other files if applicable.
github_jupyter
# Implementing doND using the dataset ``` from functools import partial import numpy as np from qcodes.dataset.database import initialise_database from qcodes.dataset.experiment_container import new_experiment from qcodes.tests.instrument_mocks import DummyInstrument from qcodes.dataset.measurements import Measurement from qcodes.dataset.plotting import plot_by_id initialise_database() # just in case no database file exists new_experiment("doNd-tutorial", sample_name="no sample") ``` First we borrow the dummy instruments from the contextmanager notebook to have something to measure. ``` # preparatory mocking of physical setup dac = DummyInstrument('dac', gates=['ch1', 'ch2']) dmm = DummyInstrument('dmm', gates=['v1', 'v2']) # and we'll make a 2D gaussian to sample from/measure def gauss_model(x0: float, y0: float, sigma: float, noise: float=0.0005): """ Returns a generator sampling a gaussian. The gaussian is normalised such that its maximal value is simply 1 """ while True: (x, y) = yield model = np.exp(-((x0-x)**2+(y0-y)**2)/2/sigma**2)*np.exp(2*sigma**2) noise = np.random.randn()*noise yield model + noise # and finally wire up the dmm v1 to "measure" the gaussian gauss = gauss_model(0.1, 0.2, 0.25) next(gauss) def measure_gauss(dac): val = gauss.send((dac.ch1.get(), dac.ch2.get())) next(gauss) return val dmm.v1.get = partial(measure_gauss, dac) ``` Now lets reimplement the qdev-wrapper do1d function that can measure one one more parameters as a function of another parameter. This is more or less as simple as you would expect. ``` def do1d(param_set, start, stop, num_points, delay, *param_meas): meas = Measurement() meas.register_parameter(param_set) # register the first independent parameter output = [] param_set.post_delay = delay # do1D enforces a simple relationship between measured parameters # and set parameters. For anything more complicated this should be reimplemented from scratch for parameter in param_meas: meas.register_parameter(parameter, setpoints=(param_set,)) output.append([parameter, None]) with meas.run() as datasaver: for set_point in np.linspace(start, stop, num_points): param_set.set(set_point) for i, parameter in enumerate(param_meas): output[i][1] = parameter.get() datasaver.add_result((param_set, set_point), *output) dataid = datasaver.run_id # convenient to have for plotting return dataid dataid = do1d(dac.ch1, 0, 1, 10, 0.01, dmm.v1, dmm.v2) axes, cbaxes = plot_by_id(dataid) def do2d(param_set1, start1, stop1, num_points1, delay1, param_set2, start2, stop2, num_points2, delay2, *param_meas): # And then run an experiment meas = Measurement() meas.register_parameter(param_set1) param_set1.post_delay = delay1 meas.register_parameter(param_set2) param_set1.post_delay = delay2 output = [] for parameter in param_meas: meas.register_parameter(parameter, setpoints=(param_set1,param_set2)) output.append([parameter, None]) with meas.run() as datasaver: for set_point1 in np.linspace(start1, stop1, num_points1): param_set1.set(set_point1) for set_point2 in np.linspace(start2, stop2, num_points2): param_set2.set(set_point2) for i, parameter in enumerate(param_meas): output[i][1] = parameter.get() datasaver.add_result((param_set1, set_point1), (param_set2, set_point2), *output) dataid = datasaver.run_id # convenient to have for plotting return dataid dataid = do2d(dac.ch1, -1, 1, 100, 0.01, dac.ch2, -1, 1, 100, 0.01, dmm.v1, dmm.v2) axes, cbaxes = plot_by_id(dataid) ```
github_jupyter
``` import pandas as pd df = pd.DataFrame( {"AAA": [4, 5, 6, 7], "BBB": [10, 20, 30, 40], "CCC": [100, 50, -30, -50]} ) df # Conditional replacement df.loc[df.AAA >= 5, "BBB"] = -1 df # Conditional replacement, multiple columns df.loc[df.BBB == -1, ["AAA", "CCC"]] = 1 df df_mask = pd.DataFrame( {"AAA": [True, False] * 2, "BBB": [False] * 4, "CCC": [True, False] * 2} ) df_mask df # set the value on -1000 on False record df.where(df_mask, -1000) # Position oriented df.iloc[1:3] # Label oriented df.loc[0:2] # Get non 1's with inverse operator df[~(df.AAA == 1)] # Create new columns dfcol = pd.DataFrame({"AAA": [1, 2, 1, 3], "BBB": [1, 1, 2, 2], "CCC": [2, 1, 3, 1]}) category = {1: "Alpha", 2: "Beta", 3: "Charlie"} category.get(1) # Alpha new_cols = [str(x) + "_cat" for x in dfcol.columns] # Get value from source_cols as arg to category.get to obtain the dict's value dfcol[new_cols] = dfcol[dfcol.columns].applymap(category.get) dfcol # Get index of min/max dfcol["AAA"].idxmax() # 3 dfcol["AAA"].idxmin() # 0 # Multiindexing dfmul = pd.DataFrame( { "row": [0, 1, 2], "One_X": [1.1, 1.1, 1.1], "One_Y": [1.2, 1.2, 1.2], "Two_X": [1.11, 1.11, 1.11], "Two_Y": [1.22, 1.22, 1.22], } ) # Index by default start at 0 dfmul # Label index dfmul.set_index("row") [tuple(c.split("_")) for c in dfmul.columns] dfmul.columns = pd.MultiIndex.from_tuples([tuple(c.split("_")) for c in dfmul.columns]) dfmul dfmul.columns = pd.MultiIndex.from_tuples([ ('One', 'row_index'), ('One', 'X'), ('One', 'Y'), ('Two', 'X'), ('Two', 'Y')]) dfmul # Reshaping dataframe based on new labels dfmul.columns = pd.MultiIndex.from_tuples([tuple(c.split("_")) for c in dfmul.columns]) dfmul # Reshape based on ('One', 'X') index dfmul.stack(0) # Original indeces 0,1 dfmul.stack(0).reset_index(0) # Index "One" and "Two" dfmul.stack(0).reset_index(1) dfm = pd.DataFrame([1,1,np.nan,0,0], index=pd.date_range("2013-08-01", periods=5, freq="B"), columns=["A"]) dfm # Fill values forward dfm.ffill() # Fill values backward dfm.bfill() # Grouping dfgp = pd.DataFrame( { "animal": "cat dog cat fish dog cat cat".split(), "size": list("SSMMMLL"), "weight": [8, 10, 11, 1, 20, 12, 12], "adult": [False] * 5 + [True] * 2, } ) dfgp # Group by animal and get max weight and display size dfgp.groupby("animal").apply(lambda g: g["size"][g["weight"].idxmax()]) # Using get_group dfgp.groupby(["animal"]).get_group("cat") # compared to loc dfgp.loc[dfgp["animal"]=="cat"] # Multiple aggregated columns ts = pd.Series(data=list(range(10)), index=pd.date_range(start="2014-10-07", periods=10, freq="2min")) ts # Apply these calculation on the column mhc = {"Mean": np.mean, "Max": np.max} # Resample and get data every 5min ts.resample("5min").apply(mhc) # Counting instance dfvc = pd.DataFrame( {"Color": "Red Red Red Blue".split(), "Value": [100, 150, 50, 50]} ) dfvc # Count color instance as a column dfvc["Counts"] = dfvc.groupby(["Color"]).transform(len) dfvc ```
github_jupyter
``` !pip install seaborn !pip install newspaper3k import nltk nltk.download('stopwords') ``` The next two lines are required to load files from your Google drive. ``` from google.colab import drive drive.mount('/content/drive') ``` # SCRAPER ``` from newspaper import Article from newspaper import ArticleException import newspaper # from progress.bar import IncrementalBar import time import string def scrape_news_links(url): ''' Scrapes links : not only google but any online vendor. set url while calling the function ''' print('Scraping links') paper = newspaper.build(url, memoize_articles=False) links = [] # bar = IncrementalBar('Scraping Links', max=len(paper.articles), suffix='%(percent)d%%') for article in paper.articles: links.append(article.url) # bar.next() time.sleep(0.1) # bar.finish() # print(links) return links def clean_text(text): ''' To clean text ''' print('cleaning_text') # text = text.strip() # text = text.lower() # for punct in string.punctuation: # text = text.replace(punct, '') text = text.lower() strin = text.split('\n') text = " ".join(strin) # text.replace('\\', '') exclude = set(string.punctuation) text = ''.join(ch for ch in text if ch not in exclude) return text def get_content(links): ''' get headlines and news content ''' print('getting content') content = [] # next_bar = IncrementalBar('Getting Content', max=) # bar = IncrementalBar('Getting content & Cleaning text', max=len(links), suffix='%(percent)d%%' ) for url in links: try: article = Article(url, language='en') article.download() article.parse() title = clean_text(article.title) news = clean_text(article.text) if title != None: if news != None: if news != ' ': if news != '': # for sites which news content cannot be scraped content.append([title, news]) # bar.next() except ArticleException as ae: # if 'Article \'download()\' failed' in ae: continue # bar.finish() return content def scraper(link='https://timesofindia.indiatimes.com/'): ''' aggregator function ''' # print('scraper_main')5 return get_content(scrape_news_links(link)) # if __name__ == "__main__": # links = scrape_google_links() # print(get_content(links[:15])) ``` # DF AND CSV ``` import csv import pandas as pd LINKS = ['https://timesofindia.indiatimes.com/', 'https://www.thehindu.com/', 'https://www.bbc.com/news', 'https://www.theguardian.co.uk/', 'https://www.hindustantimes.com/', 'https://indianexpress.com/', 'https://www.dailypioneer.com/' 'https://www.deccanherald.com/', 'https://www.telegraphindia.com/', 'https://www.dnaindia.com/', 'https://www.deccanchronicle.com/', 'https://www.asianage.com/', 'https://economictimes.indiatimes.com/', 'https://www.tribuneindia.com/'] def create_df(content_list): ''' To write the data to csv file takes a list of list where the inner list contains ['headline', 'news'] ''' title = [] news = [] print('creating_dataFrame') for content in content_list: title.append(content[0]) news.append(content[1]) # keywords.append(content[2]) data = {'Title' : title, 'News' : news} df = pd.DataFrame(data, columns=['Title', 'News']) return df def df_to_csv(df, filename='NewsCluster.csv'): ''' writes dataframe to csv ''' print('writing_to_csv') df.to_csv('/content/drive/My Drive/data/' + filename) def create_csv(): ''' aggregator function of this module ''' print('create_csv_main') content_list = [] for link in LINKS: content_list.append(scraper(link)) content_lst = [] for content in content_list: for cont in content: content_lst.append(cont) # content_lst = scraper() # print(content_lst) try: num = int(input('Enter the number of articles to be stored : ')) if num < 15: raise ValueError('Provide a larger number for dataset') df_to_csv(create_df(content_lst[:num])) except ValueError as ve: df_to_csv(create_df(content_lst)) ``` # CONVERT TO DB ``` import sqlite3 from sqlite3 import IntegrityError import csv def insert_to_db(tup): with sqlite3.connect('/content/drive/My Drive/data/NEWS.DB') as con: cur = con.cursor() cur.execute("INSERT INTO content (headlines, news) VALUES(?, ?);", tup) con.commit() def to_database(): ''' converts csv to db ''' with sqlite3.connect('/content/drive/My Drive/data/NEWS.DB') as con: cur = con.cursor() cur.execute('CREATE TABLE IF NOT EXISTS content(headlines TEXT, news TEXT PRIMARY KEY);') with open('/content/drive/My Drive/data/NewsCluster.csv', encoding='utf-8') as fin: dr = csv.DictReader(fin) for i in dr: try: tup = (i['Title'], i['News']) insert_to_db(tup) except IntegrityError as ie: # if 'unique constraint' in ie: continue # to_db = [(i['Title'], i['News']) for i in dr] # cur.executemany("INSERT INTO content (headlines, news) VALUES(?, ?);", to_db) con.commit() con.close() def print_db(): ''' prints database used for reference and verification ''' with sqlite3.connect("/content/drive/My Drive/data/NEWS.DB") as con: cur = con.cursor() cur.execute('SELECT * FROM content') return cur.fetchall() # if __name__ == "__main__": ''' execute either of the functions to update database or displahy the content ''' # to_database() # print(print_db()[0]) ``` # CALL SCRAPER, CREATE CSV and DB ``` create_csv() to_database() ``` # CHECK CSV ``` import csv def print_csv(filename): with open('/content/drive/My Drive/data/'+filename) as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') for row in csv_reader: print(row) if __name__ == '__main__': print_csv("NewsCluster.csv") ``` # CLUSTERING ``` """ Wrapper for offline clustering methods that do not take into account temporal aspects of data and online clustering methods that update and/or predict new data as it comes in. Framework supports custom text representations (e.g. Continuous Bag of Words) but will default to tfidf if none are provided. """ import numpy as np import seaborn as sns from sklearn.manifold import MDS from scipy.cluster.hierarchy import ward, dendrogram import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # from hdbscan import HDBSCAN from sklearn.metrics.pairwise import cosine_similarity from nltk.corpus import stopwords from scipy.sparse import issparse, vstack from sklearn.cluster import * from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer nltk_stopwords = stopwords.words('english') class Cluster: """ Clustering methods for text. Be cautious of datasize; in cases of large data, KMeans may be the only efficient choice. Accepts custom matrices Full analysis of methods can be found at: http://hdbscan.readthedocs.io/en/latest/comparing_clustering_algorithms.html Usage: >> with open('../data/cleaned_text.txt', 'r', encoding='utf8') as f: text = f.readlines() >> clustering = Cluster(text) >> results = clustering('hdbscan', matrix=None, reduce_dim=None, visualize=True, top_terms=False, min_cluster_size=10) >> print(results) """ def __init__(self, text): """ Args: text: strings to be clustered (list of strings) """ self.text = list(set(text)) def __call__(self, method, vectorizer=None, reduce_dim=None, viz=False, *args, **kwargs): """ Args: method: algorithm to use to cluster data (str) vectorizer: initialized method to convert text to np array; assumes __call__ vectorizes the text (Class, optional) reduce_dim: reduce dim of representation matrix (int, optional) visualize: visualize clusters in 3D (bool, optional) *args, **kwargs: see specified method function """ # Make sure method is valid assert method in ['hdbscan', 'dbscan', 'spectral', 'kmeans', 'minikmeans', 'affinity_prop', 'agglomerative', 'mean_shift', 'birch'], 'Invalid method chosen.' if not hasattr(self, 'vectorizer'): if vectorizer is None: self._init_tfidf() else: self.vectorizer = vectorizer self.matrix = self.vectorizer(self.text) # Reduce dimensionality using latent semantic analysis (makes faster) if reduce_dim is not None: self.matrix = self._pca(reduce_dim, self.matrix) # Cache current method method = eval('self.' + method) self.algorithm = method(*args, **kwargs) self.results = self._organize(self.algorithm.labels_) # For plotting self.viz_matrix = self.matrix # Visualize clustering outputs if applicable if viz: # _ = self.viz2D() _ = self.viz3D() _ = self.top_terms() return self.results # def hdbscan(self, min_cluster_size=10, prediction_data=False): # """ DBSCAN but allows for varying density clusters and no longer # requires epsilon parameter, which is difficult to tune. # http://hdbscan.readthedocs.io/en/latest/how_hdbscan_works.html # Scales slightly worse than DBSCAN, but with a more intuitive parameter. # """ # hdbscan = HDBSCAN(min_cluster_size=min_cluster_size, # prediction_data=prediction_data) # if prediction_data: # return hdbscan.fit(self._safe_dense(self.matrix)) # else: # return hdbscan.fit(self.matrix) def dbscan(self, eps=0.50): """ Density-based algorithm that clusters points in dense areas and distances points in sparse areas. Stable, semi-fast, non-global. Scales very well with n_samples, decently with n_clusters (not tunable) """ dbscan = DBSCAN(eps=eps, min_samples=3) return dbscan.fit(self.matrix) def kmeans(self, n_clusters=10, n_init=5): km = KMeans(n_clusters=n_clusters, init='k-means++', max_iter=300, n_init=n_init, verbose=0, random_state=3425) return km.fit(self.matrix) def minikmeans(self, n_clusters=10, n_init=5, batch_size=5000): """ Partition dataset into n_cluster global chunks by minimizing intra-partition distances. Expect quick results, but with noise. Scales exceptionally well with n_samples, decently with n_clusters. """ kmeans = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++', n_init=n_init, batch_size=batch_size) return kmeans.fit(self.matrix) def birch(self, n_clusters=10): """ Partitions dataset into n_cluster global chunks by repeatedly merging subclusters of a CF tree. Birch does not scale very well to high dimensional data. If many subclusters are desired, set n_clusters=None. Scales well with n_samples, well with n_clusters. """ birch = Birch(n_clusters=n_clusters) return birch.fit(self.matrix) def agglomerative(self, n_clusters=10, linkage='ward'): """ Iteratively clusters dataset semi-globally by starting with each point in its own cluster and then using some criterion to choose another cluster to merge that cluster with another cluster. Scales well with n_samples, decently with n_clusters. """ agglomerative = AgglomerativeClustering(n_clusters=n_clusters, linkage=linkage) return agglomerative.fit(self._safe_dense(self.matrix)) def spectral(self, n_clusters=5): """ Partitions dataset semi-globally by inducing a graph based on the distances between points and trying to learn a manifold, and then running a standard clustering algorithm (e.g. KMeans) on this manifold. Scales decently with n_samples, poorly with n_clusters. """ spectral = SpectralClustering(n_clusters=n_clusters) return spectral.fit(self.matrix) def affinity_prop(self, damping=0.50): """ Partitions dataset globally using a graph based approach to let points ‘vote’ on their preferred ‘exemplar’. Does not scale well with n_samples. Not recommended to use with text. """ affinity_prop = AffinityPropagation(damping=damping) return affinity_prop.fit(self._safe_dense(self.matrix)) def mean_shift(self, cluster_all=False): """ Centroid-based, global method that assumes there exists some probability density function from which the data is drawn, and tries to place centroids of clusters at the maxima of that density function. Unstable, but conservative. Does not scale well with n_samples. Not recommended to use with text. """ mean_shift = MeanShift(cluster_all=False) return mean_shift.fit(self._safe_dense(self.matrix)) def _init_tfidf(self, max_features=30000, analyzer='word', stopwords=nltk_stopwords, token_pattern=r"(?u)\b\w+\b"): """ Default representation for data is sparse tfidf vectors Args: max_features: top N vocabulary to consider (int) analyzer: 'word' or 'char', level at which to segment text (str) stopwords: words to remove from consideration, default nltk (list) """ # Initialize and fit tfidf vectors self.vectorizer = TfidfVectorizer(max_features=max_features, stop_words=stopwords, analyzer=analyzer, token_pattern=token_pattern) self.matrix = self.vectorizer.fit_transform(self.text) # Get top max_features vocabulary self.terms = self.vectorizer.get_feature_names() # For letting user know if tfidf has been initialized self.using_tfidf = True def viz2D(self, matrix=None, plot_kwds={'alpha':0.30, 's':40, 'linewidths':0}): """ Visualize clusters in 2D """ # Run PCA over the data so we can plot # matrix2D = self._pca(n=2, matrix=self.viz_matrix) # # Get labels # labels = np.unique(self.results['labels']) # # Assign a color to each label # palette = sns.color_palette('deep', max(labels)+1) # colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] # # Plot the data # plt.close() # fig = plt.figure(figsize=(10,6)) # plt.scatter(matrix2D.T[0], # matrix2D.T[1], # c=colors, # **plot_kwds # ) # frame = plt.gca() # # Turn off axes, since they are arbitrary # frame.axes.get_xaxis().set_visible(False) # frame.axes.get_yaxis().set_visible(False) # # Add a title # alg_name = str(self.algorithm.__class__.__name__) # plt.title('{0} clusters found by {1}'.format(len(labels), # alg_name), # fontsize=20) # plt.tight_layout() # plt.show() # return fig # Run PCA over the data matrix3D = self._pca(n=2, matrix=self.viz_matrix) # Extract labels from results labels = self.results['labels'] # Assign colors palette = sns.color_palette('deep', int(max(labels)+1)) colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] # Plot the data plt.close() fig = plt.figure(figsize=(10,6)) # ax = plt.axes(projection='3d') plt.scatter(matrix3D.T[0], matrix3D.T[1], # matrix3D.T[2], c=colors) # Add a title alg_name = str(self.algorithm.__class__.__name__) plt.title('{0} Clusters | {1} Items | {2}'.format(len(set(labels)), matrix3D.shape[0], alg_name), fontsize=20) # Turn off arbitrary axis tick labels # plt.tick_params(axis='both', left=False, top=False, right=False, # bottom=False, labelleft=False, labeltop=False, # labelright=False, labelbottom=False) plt.tight_layout() plt.show() return fig def viz3D(self, matrix=None): """ Visualize clusters in 3D """ # Run PCA over the data matrix3D = self._pca(n=3, matrix=self.viz_matrix) # Extract labels from results labels = self.results['labels'] # Assign colors palette = sns.color_palette('deep', int(max(labels)+1)) colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels] # Plot the data plt.close() fig = plt.figure(figsize=(10,6)) ax = plt.axes(projection='3d') ax.scatter(matrix3D.T[0], matrix3D.T[1], matrix3D.T[2], c=colors) # Add a title alg_name = str(self.algorithm.__class__.__name__) plt.title('{0} Clusters | {1} Items | {2}'.format(len(set(labels)), matrix3D.shape[0], alg_name), fontsize=20) # Turn off arbitrary axis tick labels plt.tick_params(axis='both', left=False, top=False, right=False, bottom=False, labelleft=False, labeltop=False, labelright=False, labelbottom=False) plt.tight_layout() plt.show() return fig def top_terms(self, topx=10): """ Print out top terms per cluster. """ if self.using_tfidf != True: print('For use with non-tfidf vectorizers,try sklearn NearestNeighbors\ (although NN performs poorly with high dimensional inputs.') return None # Get labels, sort text IDs by cluster labels = self.results['labels'] cluster_idx = {clust_id: np.where(labels == clust_id)[0] for clust_id in set(labels)} # Get centers, stack into array centroids = np.vstack([self.viz_matrix[indexes].mean(axis=0) for key, indexes in cluster_idx.items()]) # Compute closeness of each term representation to each centroid order_centroids = np.array(centroids).argsort()[:, ::-1] # Organize terms into a dictionary cluster_terms = {clust_id: [self.terms[ind] for ind in order_centroids[idx, :topx]] for idx, clust_id in enumerate(cluster_idx.keys())} # Print results print("Top terms per cluster:") for clust_id, terms in cluster_terms.items(): words = ' | '.join(terms) print("Cluster {0} ({1} items): {2}".format(clust_id, len(cluster_idx[clust_id]), words)) return cluster_terms def item_counts(self): """ Print number of counts in each cluster """ for key, vals in self.results.items(): if key == 'labels': continue print('Cluster {0}: {1} items'.format(key, len(vals))) def _organize(self, labels): """ Organize text from clusters into a dictionary """ # Organize text into respective clusters cluster_idx = {clust_id: np.where(labels == clust_id)[0] for clust_id in set(labels)} # Put results in a dictionary; key is cluster idx values are text results = {clust_id: [self.text[idx] for idx in cluster_idx[clust_id]] for clust_id in cluster_idx.keys()} results['labels'] = list(labels) return results def _pca(self, n, matrix): """ Perform PCA on the data """ return TruncatedSVD(n_components=n).fit_transform(matrix) def _safe_dense(self, matrix): """ Some algorithms don't accept sparse input; for these, make sure the input matrix is dense. """ if issparse(matrix): return matrix.todense() else: return matrix class OnlineCluster(Cluster): """ Online (stream) clustering of textual data. Check each method to determine if the model is updating or ad-hoc predicting. These are not 'true' online methods as they preserve all seen data, as opposed to letting data points and clusters fade, merge, etc. over time. Usage: To initialize: >> with open('../data/cleaned_text.txt', 'r', encoding='utf8') as f: text = f.readlines() >> online = OnlineCluster(method='kmeans', text, visualize=True) To predict and update parameters if applicable: >> new_text = text[-10:] >> online.predict(new_text) """ def __init__(self, text, method, *args, **kwargs): """ Args: text: strings to be clustered (list of strings) method: algorithm to use to cluster (string) *args, **kwargs (optional): vectorizer: text representation. Defaults tfidf (array, optional) reduce_dim: reduce dim of representation matrix (int, optional) visualize: visualize clusters in 3D (bool, optional) """ # Only accept valid arguments assert method in ['kmeans', 'birch', 'hdbscan', 'dbscan', 'mean_shift'], \ 'Method incompatible with online clustering.' # Initialize inherited class super().__init__(text) # Get initial results self.results = self.__call__(method=method, *args,**kwargs) # Save args, set method self.__dict__.update(locals()) self.method = eval('self._' + method) def predict(self, new_text): """ 'Predict' a new example based on cluster centroids and update params if applicable (kmeans, birch). If a custom (non-tfidf) text representation is being used, class assumes new_text is already in vectorized form. Args: new_text: list of strings to predict """ # Predict assert type(new_text) == list, 'Input should be list of strings.' self.text = list(set(self.text + new_text)) new_matrix = self._transform(new_text) output_labels = self.method(new_matrix) # Update attribute for results, plotting self._update_results(output_labels) self.viz_matrix = vstack([self.viz_matrix, new_matrix]) return output_labels def _kmeans(self, new_matrix): """ Updates parameters and predicts """ self.algorithm = self.algorithm.partial_fit(new_matrix) return self.algorithm.predict(new_matrix) def _birch(self, new_matrix): """ Updates parameters and predicts """ self.algorithm = self.algorithm.partial_fit(new_matrix) return self.algorithm.predict(new_matrix) def _hdbscan(self, new_matrix): """ Prediction only, HDBSCAN requires training to be done on dense matrices for prediction to work properly. This makes training inefficient, though. """ try: labels, _ = approximate_predict(self.algorithm, self._safe_dense(new_matrix)) except AttributeError: try: self.algorithm.generate_prediction_data() labels, _ = approximate_predict(self.algorithm, self._safe_dense(new_matrix)) except ValueError: print('Must (inefficiently) re-train with prediction_data=True') return labels def _dbscan(self, new_matrix): """ Prediction only """ # Extract labels labels = self.algorithm.labels_ # Result is noise by default output = np.ones(shape=new_matrix.shape[0], dtype=int)*-1 # Iterate all input samples for a label for idx, row in enumerate(new_matrix): # Find a core sample closer than EPS for i, row in enumerate(self.algorithm.components_): # If it's below the threshold of the dbscan model if cosine(row, x_core) < self.algorithm.eps: # Assign label of x_core to the input sample output[idx] = labels[self.algorithm.core_sample_indices_[i]] break return output def _mean_shift(self, new_matrix): """ Prediction only, not efficient """ return self.algorithm.predict(new_matrix) def _transform(self, new_text): """ Transform text to tfidf representation. Assumes already vectorized if tfidf matrix has not been initialized. """ if self.using_tfidf: return self.vectorizer.transform(new_text) else: return self.vectorizer(new_text) return new_matrix def _update_results(self, labels): """ Update running dictionary """ new_results = self._organize(labels) for key in self.results.keys(): try: self.results[key] += new_results[key] except KeyError: continue from matplotlib import pyplot as plt import pandas as pd import string cluster_dict = {2:'dbscan', 3:'spectral', 4:'kmeans', 5:'affinity_prop', 6:'agglomerative', 7:'mean_shift', 8:'birch'} def clean(text): ''' Clean text before running clusterer ''' text = text.strip() text = text.lower() for punct in string.punctuation: text = text.replace(punct, ' ') lst = text.split() text = " ".join(lst) for t in text: if t not in string.printable: text = text.replace(t, '') return text def clust(): df = pd.read_csv('/content/drive/My Drive/data/NewsCluster.csv') data = df["Title"].tolist() data = [clean(dt) for dt in data ] # for dt in data: # data[data.index(dt)] = clean(dt) data = pd.DataFrame(data, columns=["text"]) data['text'].dropna(inplace=True) # %matplotlib inline clustering = Cluster(data.text) # results = clustering(method='dbscan', vectorizer=None, # reduce_dim=None, viz=True, eps=0.9) results = clustering(method='kmeans', vectorizer=None, reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='birch', vectorizer=None, # reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='agglomerative', vectorizer=None, # reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='spectral', vectorizer=None, # reduce_dim=None, viz=True, n_clusters=12) # results = clustering(method='affinity_prop', vectorizer=None, # reduce_dim=None, viz=True, damping=0.5) results = clustering(method='minikmeans', vectorizer=None, reduce_dim=None, viz=True, n_clusters=12) # clustering = Cluster(data.text) # for i in range(2,9): # print(cluster_dict[i]) # if i == 4: # result = clustering(cluster_dict[i]) # else: # result = clustering(cluster_dict[i]) # print(result) clust() ```
github_jupyter
# 2D Advection-Diffusion equation in this notebook we provide a simple example of the DeepMoD algorithm and apply it on the 2D advection-diffusion equation. ``` # General imports import numpy as np import torch import matplotlib.pylab as plt # DeepMoD functions from deepymod import DeepMoD from deepymod.model.func_approx import NN, Siren from deepymod.model.library import Library2D from deepymod.model.constraint import LeastSquares from deepymod.model.sparse_estimators import Threshold,PDEFIND from deepymod.training import train from deepymod.training.sparsity_scheduler import TrainTestPeriodic from scipy.io import loadmat # Settings for reproducibility np.random.seed(42) torch.manual_seed(0) if torch.cuda.is_available(): device = 'cuda' else: device = 'cpu' %load_ext autoreload %autoreload 2 ``` ## Prepare the data Next, we prepare the dataset. ``` data = np.load('experimental_2DAD_long.npy')[:,:,:40] data.shape down_data= np.take(np.take(np.take(data,np.arange(0,data.shape[0],2),axis=0),np.arange(0,data.shape[1],2),axis=1),np.arange(0,data.shape[2],1),axis=2) down_data.shape steps = down_data.shape[2] width = down_data.shape[0] width_2 = down_data.shape[1] x_arr = np.arange(0,width) y_arr = np.arange(0,width_2) t_arr = np.arange(0,steps) x_grid, y_grid, t_grid = np.meshgrid(x_arr, y_arr, t_arr, indexing='ij') X = np.transpose((t_grid.flatten(), x_grid.flatten(), y_grid.flatten())) plt.imshow(down_data[:,:,20]) ``` Next we plot the dataset for three different time-points We flatten it to give it the right dimensions for feeding it to the network: ``` X = np.transpose((t_grid.flatten()/5, x_grid.flatten()/np.max(x_grid), y_grid.flatten()/np.max(x_grid))) #X = np.transpose((t_grid.flatten(), x_grid.flatten(), y_grid.flatten())) y = np.float32(down_data.reshape((down_data.size, 1))) y = y/10. np.max(y,axis=0) number_of_samples = 5000 idx = np.random.permutation(y.shape[0]) X_train = torch.tensor(X[idx, :][:number_of_samples], dtype=torch.float32, requires_grad=True).to(device) y_train = torch.tensor(y[idx, :][:number_of_samples], dtype=torch.float32).to(device) ``` ## Configuration of DeepMoD Configuration of the function approximator: Here the first argument is the number of input and the last argument the number of output layers. ``` network = NN(3, [30, 30, 30,30], 1) ``` Configuration of the library function: We select athe library with a 2D spatial input. Note that that the max differential order has been pre-determined here out of convinience. So, for poly_order 1 the library contains the following 12 terms: * [$1, u_x, u_y, u_{xx}, u_{yy}, u_{xy}, u, u u_x, u u_y, u u_{xx}, u u_{yy}, u u_{xy}$] ``` library = Library2D(poly_order=1) ``` Configuration of the sparsity estimator and sparsity scheduler used. In this case we use the most basic threshold-based Lasso estimator and a scheduler that asseses the validation loss after a given patience. If that value is smaller than 1e-5, the algorithm is converged. ``` estimator = Threshold(0.05) sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=25, delta=1e-5) ``` Configuration of the sparsity estimator ``` constraint = LeastSquares() # Configuration of the sparsity scheduler ``` Now we instantiate the model and select the optimizer ``` model = DeepMoD(network, library, estimator, constraint).to(device) # Defining optimizer optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=1e-3) ``` ## Run DeepMoD We can now run DeepMoD using all the options we have set and the training data: * The directory where the tensorboard file is written (log_dir) * The ratio of train/test set used (split) * The maximum number of iterations performed (max_iterations) * The absolute change in L1 norm considered converged (delta) * The amount of epochs over which the absolute change in L1 norm is calculated (patience) ``` train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/test2/', split=0.8, max_iterations=100000, delta=1e-7, patience=1000) ``` Sparsity masks provide the active and non-active terms in the PDE: ``` sol = model(torch.tensor(X, dtype=torch.float32))[0].reshape((width,width_2,steps)).detach().numpy() ux = model(torch.tensor(X, dtype=torch.float32))[2][0][:,1].reshape((width,width_2,steps)).detach().numpy() uy = model(torch.tensor(X, dtype=torch.float32))[2][0][:,2].reshape((width,width_2,steps)).detach().numpy() uxx = model(torch.tensor(X, dtype=torch.float32))[2][0][:,3].reshape((width,width_2,steps)).detach().numpy() uyy = model(torch.tensor(X, dtype=torch.float32))[2][0][:,4].reshape((width,width_2,steps)).detach().numpy() import pysindy as ps fd_spline = ps.SINDyDerivative(kind='spline', s=1e-2) fd_spectral = ps.SINDyDerivative(kind='spectral') fd_sg = ps.SINDyDerivative(kind='savitzky_golay', left=0.5, right=0.5, order=3) y = down_data[2,:,19] x = x_arr plt.plot(x,y, 'b--') plt.plot(x,sol[2,:,19]*np.max(down_data),'b', label='x = 1') y = down_data[5,:,19] x = x_arr plt.plot(x,y, 'g--') plt.plot(x,sol[5,:,19]*np.max(down_data),'g', label='x = 5') y = down_data[11,:,19] x = x_arr plt.plot(x,y, 'r--') plt.plot(x,sol[11,:,19]*np.max(down_data),'r', label='x = 10') plt.legend() y = down_data[1,:,1] x = x_arr plt.plot(x,y, 'b--') plt.plot(x,sol[1,:,1]*np.max(down_data),'b', label='x = 1') y = down_data[5,:,1] x = x_arr plt.plot(x,y, 'g--') plt.plot(x,sol[5,:,1]*np.max(down_data),'g', label='x = 5') y = down_data[11,:,1] x = x_arr plt.plot(x,y, 'r--') plt.plot(x,sol[11,:,1]*np.max(down_data),'r', label='x = 10') plt.legend() np.max(down_data)/100 plt.plot(x,fd_sg(y,x), 'ro') y = down_data[1,:,19] x = x_arr plt.plot(x,fd_sg(y,x), 'b--') plt.plot(x,uy[1,:,19]*np.max(down_data)/100,'b', label='x = 1') y = down_data[5,:,19] x = x_arr plt.plot(x,fd_sg(y,x), 'g--') plt.plot(x,uy[5,:,19]*np.max(down_data)/100,'g', label='x = 5') y = down_data[10,:,19] x = x_arr plt.plot(x,fd_sg(y,x), 'r--') plt.plot(x,uy[10,:,19]*np.max(down_data)/100,'r', label='x = 10') plt.legend() y = down_data[2,:,19] x = x_arr plt.plot(x,fd_sg(fd_sg(y,x)), 'b--') plt.plot(x,uyy[2,:,19]*np.max(down_data)/(100*100),'b') y = down_data[5,:,19] x = x_arr plt.plot(x,fd_sg(fd_sg(y,x)), 'g--') plt.plot(x,uyy[5,:,19]*np.max(down_data)/(100*100),'g') y = down_data[11,:,19] x = x_arr plt.plot(x,fd_sg(fd_sg(y,x)), 'r--') plt.plot(x,uyy[11,:,19]*np.max(down_data)/(100*100),'r') fig = plt.figure(figsize=(15,5)) plt.subplot(1,3, 1) y = down_data[2,:,2] x = x_arr plt.plot(x,y) plt.plot(x,sol[2,:,2]*np.max(down_data)) plt.legend() plt.subplot(1,3, 2) y = down_data[2,:,2] x = x_arr plt.plot(x,y) plt.plot(x,sol[2,:,2]*np.max(down_data)) plt.subplot(1,3, 3) y = down_data[2,:,2] x = x_arr plt.plot(x,y) plt.plot(x,sol[2,:,2]*np.max(down_data)) plt.legend() plt.show() fig = plt.figure(figsize=(15,5)) plt.subplot(1,3, 1) plt.imshow(sol[:,:,1], aspect=0.5) plt.subplot(1,3, 2) plt.imshow(sol[:,:,19], aspect=0.5) plt.subplot(1,3, 3) plt.imshow(sol[:,:,39], aspect=0.5) plt.savefig('reconstruction.pdf') fig = plt.figure(figsize=(15,5)) plt.subplot(1,3, 1) plt.imshow(down_data[:,:,1], aspect=0.5) plt.subplot(1,3, 2) plt.imshow(down_data[:,:,19], aspect=0.5) plt.subplot(1,3, 3) plt.imshow(down_data[:,:,39], aspect=0.5) plt.savefig('original_20_20_40.pdf') np.max(down_data) plt.plot(x,sol[5,:,10]*np.max(down_data)) noise_level = 0.025 y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size) plt.plot(x,uy[25,:,10]) plt.plot(x,ux[25,:,10]) fig = plt.figure(figsize=(15,5)) plt.subplot(1,3, 1) plt.plot(fd_spline(y.reshape(-1,1),x), label='Ground truth',linewidth=3) plt.plot(fd_spline(y_noisy.reshape(-1,1),x), label='Spline',linewidth=3) plt.legend() plt.subplot(1,3, 2) plt.plot(fd_spline(y.reshape(-1,1),x), label='Ground truth',linewidth=3) plt.plot(fd_sg(y_noisy.reshape(-1,1),x), label='Savitzky Golay',linewidth=3) plt.legend() plt.subplot(1,3, 3) plt.plot(fd_spline(y.reshape(-1,1),x), label='Ground truth',linewidth=3) plt.plot(uy[25,:,10],linewidth=3, label='DeepMoD') plt.legend() plt.show() plt.plot(ux[10,:,5]) ax = plt.subplot(1,1,1) ax.plot(fd(y.reshape(-1,1),x), label='Ground truth') ax.plot(fd_sline(y_noisy.reshape(-1,1),x), label='Spline') ax.plot(fd_sg(y_noisy.reshape(-1,1),x), label='Savitzky Golay') ax.legend() plt.plot(model(torch.tensor(X, dtype=torch.float32))[2][0].detach().numpy()) sol = model(torch.tensor(X, dtype=torch.float32))[0] plt.imshow(sol[:,:,4].detach().numpy()) plt.plot(sol[10,:,6].detach().numpy()) plt.plot(down_data[10,:,6]/np.max(down_data)) x = np.arange(0,len(y)) import pysindy as ps diffs = [ ('PySINDy Finite Difference', ps.FiniteDifference()), ('Smoothed Finite Difference', ps.SmoothedFiniteDifference()), ('Savitzky Golay', ps.SINDyDerivative(kind='savitzky_golay', left=0.5, right=0.5, order=3)), ('Spline', ps.SINDyDerivative(kind='spline', s=1e-2)), ('Trend Filtered', ps.SINDyDerivative(kind='trend_filtered', order=0, alpha=1e-2)), ('Spectral', ps.SINDyDerivative(kind='spectral')), ] fd = ps.SINDyDerivative(kind='spline', s=1e-2) y = down_data[:,10,9]/np.max(down_data) x = np.arange(0,len(y)) t = np.linspace(0,1,5) X = np.vstack((np.sin(t),np.cos(t))).T plt.plot(y) plt.plot(fd(y.reshape(-1,1),x)) y.shape plt.plot(fd._differentiate(y.reshape(-1,1),x)) plt.plot(ux[:,10,6]) plt.plot(sol[:,10,6].detach().numpy()) plt.plot(down_data[:,10,6]/np.max(down_data)) model.sparsity_masks ``` estimatior_coeffs gives the magnitude of the active terms: ``` print(model.estimator_coeffs()) plt.contourf(ux[:,:,10]) plt.plot(ux[25,:,2]) ax = plt.subplot(1,1,1) ax.plot(fd(y.reshape(-1,1),x), label='Ground truth') ax.plot(fd_sline(y_noisy.reshape(-1,1),x), label='Spline') ax.plot(fd_sg(y_noisy.reshape(-1,1),x), label='Savitzky Golay') ax.legend() import pysindy as ps fd_spline = ps.SINDyDerivative(kind='spline', s=1e-2) fd_spectral = ps.SINDyDerivative(kind='spectral') fd_sg = ps.SINDyDerivative(kind='savitzky_golay', left=0.5, right=0.5, order=3) y = u_v[25,:,2] x = y_v[25,:,2] plt.scatter(x,y) y.shape noise_level = 0.025 y_noisy = y + noise_level * np.std(y) * np.random.randn(y.size) ax = plt.subplot(1,1,1) ax.plot(x,y_noisy, label="line 1") ax.plot(x,y, label="line 2") ax.legend() ax = plt.subplot(1,1,1) ax.plot(fd(y.reshape(-1,1),x), label='Ground truth') ax.plot(fd_sline(y_noisy.reshape(-1,1),x), label='Spline') ax.plot(fd_sg(y_noisy.reshape(-1,1),x), label='Savitzky Golay') ax.legend() ```
github_jupyter
# Cross-asset skewness This notebook analyses cross-asset cross-sectional skewness strategy. The strategy takes long positions on contracts with most negative historical skewness and short positions on ones with most positive skewness. ``` %matplotlib inline from datetime import datetime import logging import warnings warnings.simplefilter(action='ignore', category=FutureWarning) import pandas as pd import matplotlib.pyplot as plt import matplotlib.ticker as mticker plt.style.use('bmh') from vivace.backtest import signal from vivace.backtest import processing from vivace.backtest.contract import all_futures_baltas2019 from vivace.backtest.engine import BacktestEngine from vivace.backtest.enums import Strategy from vivace.backtest.stats import Performance ``` # Data Various futures contracts in commodity, currency, government bond futures and equity index futures are tested. Some contracts are missing in this data set due to data availability. ``` all_futures_baltas2019 all_futures_baltas2019.shape ``` # Performance ## Run backtest For each asset class, a simple portfolio is constructed by using trailing 1-year returns of each futures. Unlike studies in equities, the recent 1-month is included in the formation period. Positions are rebalanced on a monthly basis. ``` engine_commodity = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "commodity"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_commodity.run() commodity_portfolio_return = (engine_commodity.calculate_equity_curve(calculate_net=False) .rename('Commodity skewness portfolio')) engine_equity = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "equity"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_equity.run() equity_portfolio_return = (engine_equity.calculate_equity_curve(calculate_net=False) .rename('Equity skewness portfolio')) engine_fixed_income = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "fixed_income"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_fixed_income.run() fixed_income_portfolio_return = (engine_fixed_income.calculate_equity_curve(calculate_net=False) .rename('Fixed income skewness portfolio')) engine_currency = BacktestEngine( strategy=Strategy.DELTA_ONE.value, instrument=all_futures_baltas2019.query('asset_class == "currency"').index, signal=signal.XSSkewness(lookback=252, post_process=processing.Pipeline([ processing.Negate(), processing.AsFreq(freq='m', method='pad') ])), log_level=logging.WARN, ) engine_currency.run() currency_portfolio_return = (engine_currency.calculate_equity_curve(calculate_net=False) .rename('Currency skewness portfolio')) fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True) commodity_portfolio_return.plot(ax=ax[0][0], logy=True) equity_portfolio_return.plot(ax=ax[0][1], logy=True) fixed_income_portfolio_return.plot(ax=ax[1][0], logy=True) currency_portfolio_return.plot(ax=ax[1][1], logy=True) ax[0][0].set_title('Commodity skewness portfolio') ax[0][1].set_title('Equity skewness portfolio') ax[1][0].set_title('Fixed income skewness portfolio') ax[1][1].set_title('Currency skewness portfolio') ax[0][0].set_ylabel('Cumulative returns'); ax[1][0].set_ylabel('Cumulative returns'); pd.concat(( commodity_portfolio_return.pipe(Performance).summary(), equity_portfolio_return.pipe(Performance).summary(), fixed_income_portfolio_return.pipe(Performance).summary(), currency_portfolio_return.pipe(Performance).summary(), ), axis=1) ``` ## Performance since 1990 In the original paper, performance since 1990 is reported. The result below confirms that all skewness based portfolios exhibited positive performance over time. Interestingly the equity portfolio somewhat performed weakly in the backtest. This could be due to the slightly different data set. ``` fig, ax = plt.subplots(2, 2, figsize=(14, 8), sharex=True) commodity_portfolio_return['1990':].plot(ax=ax[0][0], logy=True) equity_portfolio_return['1990':].plot(ax=ax[0][1], logy=True) fixed_income_portfolio_return['1990':].plot(ax=ax[1][0], logy=True) currency_portfolio_return['1990':].plot(ax=ax[1][1], logy=True) ax[0][0].set_title('Commodity skewness portfolio') ax[0][1].set_title('Equity skewness portfolio') ax[1][0].set_title('Fixed income skewness portfolio') ax[1][1].set_title('Currency skewness portfolio') ax[0][0].set_ylabel('Cumulative returns'); ax[1][0].set_ylabel('Cumulative returns'); ``` ## GSF The authors defines the global skewness factor (GSF) by combining the 4 asset classes with equal vol weighting. Here, the 4 backtests are simply combined with each ex-post realised volatility. ``` def get_leverage(equity_curve: pd.Series) -> float: return 0.1 / (equity_curve.pct_change().std() * (252 ** 0.5)) gsf = pd.concat(( commodity_portfolio_return.pct_change() * get_leverage(commodity_portfolio_return), equity_portfolio_return.pct_change() * get_leverage(equity_portfolio_return), fixed_income_portfolio_return.pct_change() * get_leverage(fixed_income_portfolio_return), currency_portfolio_return.pct_change() * get_leverage(currency_portfolio_return), ), axis=1).mean(axis=1) gsf = gsf.fillna(0).add(1).cumprod().rename('GSF') fig, ax = plt.subplots(1, 2, figsize=(14, 4)) gsf.plot(ax=ax[0], logy=True); gsf['1990':].plot(ax=ax[1], logy=True); ax[0].set_title('GSF portfolio') ax[1].set_title('Since 1990') ax[0].set_ylabel('Cumulative returns'); pd.concat(( gsf.pipe(Performance).summary(), gsf['1990':].pipe(Performance).summary().add_suffix(' (since 1990)') ), axis=1) ``` ## Post publication ``` publication_date = datetime(2019, 12, 16) fig, ax = plt.subplots(1, 2, figsize=(14, 4)) gsf.plot(ax=ax[0], logy=True); ax[0].set_title('GSF portfolio') ax[0].set_ylabel('Cumulative returns'); ax[0].axvline(publication_date, lw=1, ls='--', color='black') ax[0].text(publication_date, 0.6, 'Publication date ', ha='right') gsf.loc[publication_date:].plot(ax=ax[1], logy=True); ax[1].set_title('GSF portfolio (post publication)'); ``` ## Recent performance ``` fig, ax = plt.subplots(figsize=(8, 4.5)) gsf.tail(252 * 2).plot(ax=ax, logy=True); ax.set_title('GSF portfolio') ax.set_ylabel('Cumulative returns'); ``` # Reference - Baltas, N. and Salinas, G., 2019. Cross-Asset Skew. Available at SSRN. ``` print(f'Updated: {datetime.utcnow().strftime("%d-%b-%Y %H:%M")}') ```
github_jupyter
# GFA Zero Calibration GFA calibrations should normally be updated in the following sequence: zeros, flats, darks. This notebook should be run using a DESI kernel, e.g. `DESI master`. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import os import sys import json import collections from pathlib import Path import scipy.interpolate import scipy.stats import fitsio ``` Install / upgrade the `desietcimg` package: ``` try: import desietcimg print('desietcimg already installed') except ImportError: print('Installing desietcimg...') !{sys.executable} -m pip install --user git+https://github.com/dkirkby/desietcimg upgrade = False if upgrade: print('Upgrading desietcimg...') !{sys.executable} -m pip install --upgrade --user git+https://github.com/dkirkby/desietcimg import desietcimg.util import desietcimg.plot import desietcimg.gfa ``` NERSC configuration: ``` assert os.getenv('NERSC_HOST', False) ROOT = Path('/project/projectdirs/desi/spectro/data/') assert ROOT.exists() ``` Initial GFA calibration: ``` CALIB = Path('/global/cscratch1/sd/dkirkby/GFA_calib.fits') assert CALIB.exists() ``` Directory for saving plots: ``` plotdir = Path('zerocal') plotdir.mkdir(exist_ok=True) ``` ## Process Zero Sequences Use a sequence of 200 zeros from [20191027](http://desi-www.kpno.noao.edu:8090/nightsum/nightsum-2019-10-27/nightsum.html). **Since this data has not yet been staged to its final location, we fetch it from the `lost+found` directory** (by overriding the definition of `ROOT` above): ``` ROOT = Path('/global/project/projectdirs/desi/spectro/staging/lost+found/') files = desietcimg.util.find_files(ROOT / '20191027' / '{N}/gfa-{N}.fits.fz', min=21968, max=22167) ``` Build master zero images: ``` def build_master_zero(): master_zero = {} GFA = desietcimg.gfa.GFACamera(calib_name=str(CALIB)) for k, gfa in enumerate(GFA.gfa_names): raw, meta = desietcimg.util.load_raw(files, 'EXPTIME', hdu=gfa) assert np.all(np.array(meta['EXPTIME']) == 0) GFA.setraw(raw, name=gfa, subtract_master_zero=False, apply_gain=False) master_zero[gfa] = np.median(GFA.data, axis=0) return master_zero %time master_zero = build_master_zero() ``` Estimate the readnoise in ADU for each amplifier, using the new master zero: ``` desietcimg.gfa.GFACamera.master_zero = master_zero def get_readnoise(hrange=70, hbins=141, nsig=6, save=None): GFA = desietcimg.gfa.GFACamera(calib_name=str(CALIB)) fig, axes = plt.subplots(5, 2, sharex=True, figsize=(18, 11)) bins = np.linspace(-hrange, +hrange, hbins) noise = {} for k, gfa in enumerate(GFA.gfa_names): GFA.name = gfa ax = axes[k // 2, k % 2] raw, meta = desietcimg.util.load_raw(files, 'EXPTIME', hdu=gfa) assert np.all(np.array(meta['EXPTIME']) == 0) GFA.setraw(raw, name=gfa, subtract_master_zero=True, apply_gain=False) noise[gfa] = {} for j, amp in enumerate(GFA.amp_names): # Extract data for this quadrant. qdata = GFA.data[GFA.quad[amp]] X = qdata.reshape(-1) # Clip for std dev calculation. Xclipped, lo, hi = scipy.stats.sigmaclip(X, low=nsig, high=nsig) noise[gfa][amp] = np.std(Xclipped) label = f'{amp} {noise[gfa][amp]:.2f}' c = plt.rcParams['axes.prop_cycle'].by_key()['color'][j] ax.hist(X, bins=bins, label=label, color=c, histtype='step') for x in lo, hi: ax.axvline(x, ls='-', c=c, alpha=0.5) ax.set_yscale('log') ax.set_yticks([]) if k in (8, 9): ax.set_xlabel('Zero Residual [ADU]') ax.set_xlim(bins[0], bins[-1]) ax.legend(ncol=2, title=f'{gfa}', loc='upper left') plt.subplots_adjust(left=0.03, right=0.99, bottom=0.04, top=0.99, wspace=0.07, hspace=0.04) if save: plt.savefig(save) return noise %time readnoise = get_readnoise(save=str(plotdir / 'GFA_readnoise.png')) repr(readnoise) ``` ## Save Updated Calibrations ``` desietcimg.gfa.save_calib_data('GFA_calib_zero.fits', master_zero=master_zero, readnoise=readnoise) ``` Use this for subsequent flat and dark calibrations: ``` !cp GFA_calib_zero.fits {CALIB} ``` ## Comparisons Compare with the read noise values from the lab studies and Aaron Meisner's [independent analysis](https://desi.lbl.gov/trac/wiki/Commissioning/Planning/gfachar/bias_readnoise_20191027): ``` ameisner_rdnoise = { 'GUIDE0': { 'E': 5.56, 'F': 5.46, 'G': 5.12, 'H': 5.24}, 'FOCUS1': { 'E': 5.21, 'F': 5.11, 'G': 4.88, 'H': 4.90}, 'GUIDE2': { 'E': 7.11, 'F': 6.23, 'G': 5.04, 'H': 5.29}, 'GUIDE3': { 'E': 5.28, 'F': 5.16, 'G': 4.89, 'H': 5.00}, 'FOCUS4': { 'E': 5.23, 'F': 5.12, 'G': 5.01, 'H': 5.11}, 'GUIDE5': { 'E': 5.11, 'F': 5.00, 'G': 4.80, 'H': 4.86}, 'FOCUS6': { 'E': 5.12, 'F': 5.09, 'G': 4.85, 'H': 5.07}, 'GUIDE7': { 'E': 5.00, 'F': 4.96, 'G': 4.63, 'H': 4.79}, 'GUIDE8': { 'E': 6.51, 'F': 5.58, 'G': 5.12, 'H': 5.47}, 'FOCUS9': { 'E': 6.85, 'F': 5.53, 'G': 5.07, 'H': 5.57}, } def compare_rdnoise(label='20191027', save=None): # Use the new calibrations written above. desietcimg.gfa.GFACamera.calib_data = None GFA = desietcimg.gfa.GFACamera(calib_name='GFA_calib_zero.fits') markers = '+xo.' fig, ax = plt.subplots(1, 2, figsize=(12, 5)) for k, gfa in enumerate(GFA.gfa_names): color = plt.rcParams['axes.prop_cycle'].by_key()['color'][k] ax[1].scatter([], [], marker='o', c=color, label=gfa) for j, amp in enumerate(desietcimg.gfa.GFACamera.amp_names): marker = markers[j] measured = GFA.calib_data[gfa][amp]['RDNOISE'] # Lab results are given in elec so use lab gains to convert back to ADU lab = GFA.lab_data[gfa][amp]['RDNOISE'] / GFA.lab_data[gfa][amp]['GAIN'] ax[0].scatter(lab, measured, marker=marker, c=color) ax[1].scatter(ameisner_rdnoise[gfa][amp], measured, marker=marker, c=color) for j, amp in enumerate(GFA.amp_names): ax[1].scatter([], [], marker=markers[j], c='k', label=amp) xylim = (4.3, 5.5) for axis in ax: axis.plot(xylim, xylim, 'k-', zorder=-10, alpha=0.25) axis.set_ylabel(f'{label} Read Noise [ADU]') axis.set_xlim(*xylim) axis.set_ylim(*xylim) ax[1].legend(ncol=3) ax[0].set_xlabel('Lab Data Read Noise [ADU]') ax[1].set_xlabel('ameisner Read Noise [ADU]') plt.tight_layout() if save: plt.savefig(save) compare_rdnoise(save=str(plotdir / 'rdnoise_compare.png')) ```
github_jupyter
# Pandas Playground ## Series in Pandas ``` ''' The following code is to help you play with the concept of Series in Pandas. You can think of Series as an one-dimensional object that is similar to an array, list, or column in a database. By default, it will assign an index label to each item in the Series ranging from 0 to N, where N is the number of items in the Series minus one. Please feel free to play around with the concept of Series and see what it does *This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures: http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/ ''' import pandas as pd series = pd.Series(['Dave', 'Cheng-Han', 'Udacity', 42, -1789710578]) print series print type(series) print series[0] ''' You can also manually assign indices to the items in the Series when creating the series ''' series = pd.Series(['Dave', 'Cheng-Han', 359, 9001], index=['Instructor', 'Curriculum Manager', 'Course Number', 'Power Level']) print series ''' You can use index to select specific items from the Series ''' series = pd.Series(['Dave', 'Cheng-Han', 359, 9001], index=['Instructor', 'Curriculum Manager', 'Course Number', 'Power Level']) print series['Instructor'] print "" print series[['Instructor', 'Curriculum Manager', 'Course Number']] ''' You can also use boolean operators to select specific items from the Series ''' cuteness = pd.Series([1, 2, 3, 4, 5], index=['Cockroach', 'Fish', 'Mini Pig', 'Puppy', 'Kitten']) print cuteness > 3 print "" print cuteness[cuteness > 3] ``` ## Dataframe in Pandas ``` ''' The following code is to help you play with the concept of Dataframe in Pandas. You can think of a Dataframe as something with rows and columns. It is similar to a spreadsheet, a database table, or R's data.frame object. *This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures: http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/ ''' ''' To create a dataframe, you can pass a dictionary of lists to the Dataframe constructor: 1) The key of the dictionary will be the column name 2) The associating list will be the values within that column. ''' import pandas as pd import numpy as np data = {'year': [2010, 2011, 2012, 2011, 2012, 2010, 2011, 2012], 'team': ['Bears', 'Bears', 'Bears', 'Packers', 'Packers', 'Lions', 'Lions', 'Lions'], 'wins': [11, 8, 10, 15, 11, 6, 10, 4], 'losses': [5, 8, 6, 1, 5, 10, 6, 12]} football = pd.DataFrame(data) print football ''' Pandas also has various functions that will help you understand some basic information about your data frame. Some of these functions are: 1) dtypes: to get the datatype for each column 2) describe: useful for seeing basic statistics of the dataframe's numerical columns 3) head: displays the first five rows of the dataset 4) tail: displays the last five rows of the dataset ''' data = {'year': [2010, 2011, 2012, 2011, 2012, 2010, 2011, 2012], 'team': ['Bears', 'Bears', 'Bears', 'Packers', 'Packers', 'Lions', 'Lions', 'Lions'], 'wins': [11, 8, 10, 15, 11, 6, 10, 4], 'losses': [5, 8, 6, 1, 5, 10, 6, 12]} football = pd.DataFrame(data) print football print "" print football.dtypes print "" print football.describe() print "" print football.head() print "" print football.tail() ``` ## Panda indexing Dataframe ``` ''' You can think of a DataFrame as a group of Series that share an index. This makes it easy to select specific columns that you want from the DataFrame. Also a couple pointers: 1) Selecting a single column from the DataFrame will return a Series 2) Selecting multiple columns from the DataFrame will return a DataFrame *This playground is inspired by Greg Reda's post on Intro to Pandas Data Structures: http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/ ''' data = {'year': [2010, 2011, 2012, 2011, 2012, 2010, 2011, 2012], 'team': ['Bears', 'Bears', 'Bears', 'Packers', 'Packers', 'Lions', 'Lions', 'Lions'], 'wins': [11, 8, 10, 15, 11, 6, 10, 4], 'losses': [5, 8, 6, 1, 5, 10, 6, 12]} football = pd.DataFrame(data) print (football) print '' print football['year'] print '' print football.year # shorthand for football['year'] print '' print football[['year', 'wins', 'losses']] ''' Row selection can be done through multiple ways. Some of the basic and common methods are: 1) Slicing 2) An individual index (through the functions iloc or loc) 3) Boolean indexing You can also combine multiple selection requirements through boolean operators like & (and) or | (or) ''' data = {'year': [2010, 2011, 2012, 2011, 2012, 2010, 2011, 2012], 'team': ['Bears', 'Bears', 'Bears', 'Packers', 'Packers', 'Lions', 'Lions', 'Lions'], 'wins': [11, 8, 10, 15, 11, 6, 10, 4], 'losses': [5, 8, 6, 1, 5, 10, 6, 12]} football = pd.DataFrame(data) print (football) print '' print football.iloc[[0]] print "" print football.loc[[0]] print "" print football[3:5] print "" print football[football.wins > 10] print "" print football[(football.wins > 10) & (football.team == "Packers")] ```
github_jupyter
``` # set file path filepath = '../fact/' trainfile = 'train.csv' testfile = 'test.csv' # read train.csv import pandas as pd df_train = pd.read_csv(filepath+trainfile) df_test = pd.read_csv(filepath+testfile) #from sklearn.naive_bayes import MultinomialNB import lightgbm as lgb from sklearn.metrics import confusion_matrix, classification_report, f1_score def get_text_model(train, valid): prefix = 'word_' c_word = [column for column in train.columns.tolist() if prefix == column[:len(prefix)]] prefix = 'url_' c_url = [column for column in train.columns.tolist() if prefix == column[:len(prefix)]] prefix = 'hashtag_' c_hashtag = [column for column in train.columns.tolist() if prefix == column[:len(prefix)]] # fill nan train.fillna(0, inplace=True) X_train, X_valid, X_test = train[c_word+c_url+c_hashtag].values, valid[c_word+c_url+c_hashtag].values, test[c_word+c_url+c_hashtag].values y_train, y_valid = train.target.values, valid.target.values # fit model lgb_train = lgb.Dataset(X_train, y_train) lgb_valid = lgb.Dataset(X_valid, y_valid, reference=lgb_train) lgb_test = lgb.Dataset(X_test) lgbm_params = { 'objective': 'binary', 'metric':'binary_logloss', 'verbose': -1, 'learning_rate': 0.01, 'num_iterations': 1000 } model = lgb.train( lgbm_params, lgb_train, valid_sets=lgb_valid, verbose_eval=False, early_stopping_rounds=10 ) def report(X, y): # print train report y_pred = model.predict(X, num_iteration=model.best_iteration) y_pred_cls = y_pred >= 0.5 print('f1:{}'.format(f1_score(y, y_pred_cls, average=None)[0])) print(confusion_matrix(y, y_pred_cls)) print(classification_report(y, y_pred_cls)) report(X_train, y_train) report(X_valid, y_valid) # fit train and valid X = np.concatenate([X_train, X_valid], 0) y = np.concatenate([y_train, y_valid], 0) lgb_train_valid = lgb.Dataset(X, y) model = lgb.train( lgbm_params, lgb_train_valid, verbose_eval=False ) report(X_test, y_test) # retrun proba return ( model.predict(X_train, num_iteration=model.best_iteration), model.predict(X_valid, num_iteration=model.best_iteration), model.predict(X_test, num_iteration=model.best_iteration) ) y_train_text_proba, y_test_text_proba = get_text_model(df_train, df_test) from catboost import CatBoost from catboost import Pool from sklearn.metrics import confusion_matrix, classification_report, f1_score def get_category_model(train, valid, test): c_text = ['keyword', 'location'] X_train, X_valid, X_test = train[c_text].values, valid[c_text].values, test[c_text].values y_train, y_valid = train.target, valid.target # CatBoost が扱うデータセットの形式に直す train_pool = Pool(X_train, label=y_train) valid_pool = Pool(X_valid, label=y_valid) test_pool = Pool(X_test) # 学習用のパラメータ params = { # タスク設定と損失関数 'loss_function': 'Logloss', # 学習ラウンド数 'num_boost_round': 1000, 'eval_metric': 'F1', 'silent': False, 'verbose': None, 'early_stopping_rounds': 10 } # モデルを学習する model = CatBoost(params) model.fit(train_pool, logging_level='Silent') def report(X_pool, y): y_pred = model.predict(X_pool, prediction_type='Class') print('f1:{}'.format(f1_score(y, y_pred, average=None)[0])) print(confusion_matrix(y, y_pred)) print(classification_report(y, y_pred)) report(train_pool, y_train) report(valid_pool, y_valid) # fit train and valid X = np.concatenate([X_train, X_valid], 0) y = np.concatenate([y_train, y_valid], 0) pool = Pool(X, label=y) model.fit(pool, logging_level='Silent') report(test_pool, y_test) # retrun proba return ( model.predict(train_pool, prediction_type='Probability')[:, 1], model.predict(valid_pool, prediction_type='Probability')[:, 1], model.predict(test_pool, prediction_type='Probability')[:, 1] ) y_train_cat_proba, y_valid_cat_proba, y_test_cat_proba = get_category_model(df_train, df_valid, df_test) from sklearn.linear_model import LogisticRegression import numpy as np X_train = np.stack([y_train_text_proba, y_train_cat_proba], 1) X_valid = np.stack([y_valid_text_proba, y_valid_cat_proba], 1) X_test = np.stack([y_test_text_proba, y_test_cat_proba], 1) y_train, y_valid = df_train.target, df_valid.target clf = LogisticRegression( class_weight = 'balanced', random_state = 0, penalty = 'elasticnet', l1_ratio = 0.0, C = 0.001, solver='saga' ) def report(X, y): y_pred = clf.predict(X) print('f1:{}'.format(f1_score(y, y_pred, average=None)[0])) print(confusion_matrix(y, y_pred)) print(classification_report(y, y_pred)) # 再学習 X = np.concatenate([X_train, X_valid], 0) y = np.concatenate([y_train, y_valid], 0) clf.fit(X, y) report(X_train, y_train) report(X_valid, y_valid) report(X_test, y_test) df_submit.to_csv('../output/submit.csv', index=None) ```
github_jupyter
# M² Experimental Design **Scott Prahl** **Mar 2021** The basic idea for measuring M² is simple. Use a CCD imager to capture changing beam profile at different points along the direction of propagation. Doing this accurately is a challenge because the beam must always fit within camera sensor and the measurement locations should include both points near the focus and far from the focus. Moreover, in most situations, the focus is not accessible. In this case a lens is used to create an artificial focus that can be measured. One of the nice properties of M² is that it is not affected by refocusing: the artificially focused beam will have different beam waist and Rayleigh distances but the M² value will be the same as the original beam. This notebook describes a set of constraints for selection of an imaging lens and then gives an example of a successful measurement and an unsuccessful measurement. --- *If* `` laserbeamsize `` *is not installed, uncomment the following cell (i.e., delete the initial #) and execute it with* `` shift-enter ``. *Afterwards, you may need to restart the kernel/runtime before the module will import successfully.* ``` #!pip install --user laserbeamsize import numpy as np import matplotlib.pyplot as plt try: import laserbeamsize as lbs except ModuleNotFoundError: print('laserbeamsize is not installed. To install, uncomment and run the cell above.') print('Once installation is successful, rerun this cell again.') pixel_size = 3.75e-6 # pixel size in m pixel_size_mm = pixel_size * 1e3 pixel_size_µm = pixel_size * 1e6 ``` ## Designing an M² measurement We first need to to figure out the focal length of the lens that will be used. The design example that we will use is for a low divergence beam. (High divergence lasers (e.g. diodes) are more suited to other techniques.) Obviously, we do not want to introduce experimental artifacts into the measurement and therefore we want to minimize introducing wavefront aberrations with the lens. In general, to avoid spherical aberrations the f-number (the focal length divided by the beam diameter) of the lens should be over 20. For a low divergence beam the beam diameter will be about 1mm at the lens and, as we will see below, the allowed f-numbers will all be much greater than 20 and we don't need to worry about it further (as long as a plano-convex lens or doublet is used in the right orientation). ### Creating an artificial focus An example of beam propagation is shown below. The beam waist is at -500mm and a lens is located at 0mm. The beam cross section is exaggerated because the aspect ratio on the axes is 1000:1. ``` lambda0 = 632.8e-9 # wavelength of light [m] w0 = 450e-6 # radius at beam waist [m] f = 300e-3 # focal length of lens [m] lbs.M2_focus_plot(w0, lambda0, f, z0=-500e-3, M2=2) plt.show() ``` ### Axial measurement positions The ISO 11146-1 document, [Lasers and laser-related equipment - Test methods for laser beam widths, divergence angles and beam propagation, Part 1: Stigmatic and simple astigmatic beams](https://www.iso.org/obp/ui/#iso:std:iso:11146:-1:ed-1:v1:en) gives specific instructions for how to measure the M² value. > If the beam waist is accessible for direct measurement, the beam waist location, beam widths, divergence angles and beam propagation ratios shall be determined by a hyperbolic fit to different measurements of the beam width along the propagation axis $z$. Hence, measurements at at least 10 different $z$ positions shall be taken. Approximately half of the measurements shall be distributed within one Rayleigh length on either side of the beam waist, and approximately half of them shall be distributed beyond two Rayleigh lengths from the beam waist. For simple astigmatic beams this procedure shall be applied separately for both principal directions. In the picture above, the artificial beam waist is at 362mm and the Rayleigh distance for the artificial beam is 155mm. Therefore, to comply with the requirements above, five measurements should be made between 207 and 517mm of the lens and then five more at distances greater than 672mm. One possibility might be the ten measurements shown below. ``` lambda0 = 632.8e-9 # wavelength of light [m] w0 = 450e-6 # radius at beam waist [m] f = 300e-3 # focal length of lens [m] z = np.array([250, 300, 350, 400, 450, 675, 725, 775, 825, 875])*1e-3 lbs.M2_focus_plot(w0, lambda0, f, z0=-500e-3, M2=2) r = lbs.beam_radius(250e-6, lambda0, z, z0=362e-3, M2=2) plt.plot(z*1e3,r*1e6,'or') plt.show() ``` ### Camera sensor size constraints If the beam is centered on the camera sensor then should be larger than 20 pixels and it should less than 1/4 of the narrower sensor dimension. The first constraint is critical for weakly divergent beams (e.g., HeNe) and the second is critical for strongly divergent beams (e.g., diode laser). For a HeNe, this ensures that the focal length of the lens should be greater than 100mm. If we want 40 pixel diameters then the requirement is that the focal length must be more than 190mm. (Use M²=1 so that the beam size is smallest possible.) ``` w0 = (1e-3)/2 lambda0 = 632.8e-9 f = np.linspace(10,250)*1e-3 s = -400e-3 max_size = 960 * 0.25 * pixel_size_µm min_size = 20 * pixel_size_µm w0_artificial = w0 * lbs.magnification(w0,lambda0,s,f,M2=1) plt.plot(f*1e3, w0_artificial*1e6) plt.axhspan(min_size, 0, color='blue', alpha=0.1) plt.text(70, 20, "Image too small") plt.xlabel("Focal Length (mm)") plt.ylabel("Beam Radius (µm)") plt.axvline(190,color='black') plt.show() ``` ### Working size constraints (i.e., the optical table is only so big) The measurements must be made on an optical table. Now, while mirrors could be used to bounce the light around the table, this makes exact measurements of the lens to the camera sensor difficult. Thus we would like the distance from the lens to the focus + 4 Rayleigh distances to be less than a meter. Longer focal length lenses reduce the relative error in the positioning of the camera sensor relative to the lens. If one is doing these measurements by hand then ±1mm might be a typical positioning error. A motorized stage could minimize such errors, but who has the money for a stage that moves half of a meter! This means the focal distance needs to be less than 320mm. However, at this distance, the beam becomes too large and the largest focal length lens is now about 275mm. ``` w0 = 1e-3 / 2 lambda0 = 632.8e-9 f = np.linspace(50,500)*1e-3 s = -400e-3 M2 = 2 w0_artificial = w0 * lbs.magnification(w0,lambda0,s,f,M2=M2) z0_artificial = lbs.image_distance(w0,lambda0,s,f,M2=M2) zR_artificial = lbs.z_rayleigh(w0_artificial, lambda0, M2=M2) lens_to_4zr_distance = z0_artificial + 4 * zR_artificial plt.plot(f*1e3, lens_to_4zr_distance*1e3) plt.axhspan(1000, lens_to_4zr_distance[-1]*1e3, color='blue', alpha=0.1) plt.text(350, 1050, "Axial distance too far") plt.xlabel("Focal Length (mm)") plt.ylabel("$z_0+4z_R$ (mm)") plt.axvline(320,color='black') plt.show() radius_at_4zr = lbs.beam_radius(w0_artificial, lambda0, lens_to_4zr_distance, z0=z0_artificial, M2=M2) max_size = 960 * 0.25 * pixel_size_µm plt.plot(f*1e3, radius_at_4zr*1e6) plt.axhspan(1600, max_size, color='blue', alpha=0.1) plt.text(350, 1000, "Beam too big") plt.axvline(275,color='black') plt.xlabel("Focal Length (mm)") plt.ylabel("Beam Radius (mm)") plt.show() ``` ### Putting it all together The focal length of the lens to measure a multimode HeNe beam should then be between 190 and 275 mm. Here is what a reasonable set of measurements should be for a f=250mm lens. ``` lambda0 = 632.8e-9 # wavelength of light [m] w0 = 500e-6 # radius at beam waist [m] f = 250e-3 # focal length of lens [m] s = -400e-3 # beam waist in laser to lens distance [m] M2 = 2 lbs.M2_focus_plot(w0, lambda0, f, z0=s, M2=M2) z0_after = lbs.image_distance(w0,lambda0,s,f,M2=M2) w0_after = w0 * lbs.magnification(w0,lambda0,s,f,M2=M2) zR_after = lbs.z_rayleigh(w0_after,lambda0,M2=M2) zn = np.linspace(z0_after-zR_after,z0_after+zR_after,5) zf = np.linspace(z0_after+2*zR_after,z0_after+4*zR_after,5) rn = lbs.beam_radius(w0_after, lambda0, zn, z0=z0_after, M2=2) rf = lbs.beam_radius(w0_after, lambda0, zf, z0=z0_after, M2=2) plt.plot(zn*1e3,rn*1e6,'or') plt.plot(zf*1e3,rf*1e6,'ob') plt.show() ``` ## Good spacing of beam size measurements ``` # datapoints digitized by hand from the graph at https://www.rp-photonics.com/beam_quality.html lambda1=308e-9 z1_all=np.array([-200,-180,-160,-140,-120,-100,-80,-60,-40,-20,0,20,40,60,80,99,120,140,160,180,200])*1e-3 d1_all=2*np.array([416,384,366,311,279,245,216,176,151,120,101,93,102,120,147,177,217,256,291,316,348])*1e-6 lbs.M2_radius_plot(z1_all, d1_all, lambda1, strict=True) ``` ## Poor spacing of beam size measurements A nice fit of the beam is achieved, however the fitted value for M²<1. This is impossible. Basically the problem boils down to the fact that the measurements in the beam waist are terrible for determining the actual divergence of the beam. The fit then severely underestimates the divergence of the beam and claims that the beam diverges more slowly than a simple Gaussian beam!! ``` ## Some Examples f=500e-3 # m lambda2 = 632.8e-9 # m z2_all = np.array([168, 210, 280, 348, 414, 480, 495, 510, 520, 580, 666, 770]) * 1e-3 # [m] d2_all = 2*np.array([597, 572, 547, 554, 479, 404, 415, 399, 377, 391, 326, 397]) * 1e-6 # [m] lbs.M2_radius_plot(z2_all, d2_all, lambda2, strict=True) plt.show() ```
github_jupyter
``` import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import tensorflow as tf import keras from keras.datasets import fashion_mnist, cifar10 from keras.layers import Dense, Flatten, Normalization, Dropout, Conv2D, MaxPooling2D, RandomFlip, RandomRotation, RandomZoom, BatchNormalization, Activation, InputLayer from keras.models import Sequential from keras.losses import SparseCategoricalCrossentropy, CategoricalCrossentropy from keras.callbacks import EarlyStopping from keras.utils import np_utils from keras import utils import os from keras.preprocessing.image import ImageDataGenerator import matplotlib as mpl import matplotlib.pyplot as plt import datetime ``` # Tensorboard and Pretrained Models ``` # Load Some Data mnist = tf.keras.datasets.mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) ``` ## Tensorboard Tensorboard is a tool from Keras that can monitor the results of a tensorflow model and display it in a nice Tableau-like dashboard view. We can enable tensorboard and add it to our modelling process to get a better view of progress and save on some of the custom charting functions. The first thing that we can use tensorboard for is to get a nice chart of our training progress. ### Create Model ``` # Set # of epochs epochs = 10 def create_model(): return tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(10, activation='softmax') ]) acc = keras.metrics.CategoricalAccuracy(name="accuracy") pre = keras.metrics.Precision(name="precision") rec = keras.metrics.Recall(name="recall") metric_list = [acc, pre, rec] ``` #### Add Tensorboard Callback The tensorboard can be added to the model as it is being fit as a callback. The primary parameter that matters there is the log_dir, where we can setup the folder to put the logs that the visualizations are made from. The example I have here is from the tensorflow documentation, generating a new subfolder for each execution. Using this to log the tensorboard data is fine, there's no need to change it without reason. ### Launch Tensorboard In recent versions of VS Code, whioch I assume all of you have, tensorboard can be used directly in a VS Code tab: ![VS Code Tensor](images/vscode_tensorboard.png "VS Code Tensor" ) The command below launches tensorboard elsewhere, such as Google colab. Either way, the actual tensorboard feature works the same once launched. We can open it before or after we start training the model. If we open it before we can update it to watch training progress - something that may be usefull if you have models that can train for a very long time. ``` %load_ext tensorboard %tensorboard --logdir logs/fit # The logdir is wherever the logs are, this is specified in the callback setup. model = create_model() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metric_list) log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) model.fit(x=x_train, y=y_train, epochs=epochs, validation_data=(x_test, y_test), callbacks=[tensorboard_callback]) #http://localhost:6008/#scalars ``` ### Tensorboard Contents The first page of the tensorboard page gives us a nice pretty view of our training progress - this part should be quite straightforward. The board will capture whatever executions are in that log file, we can filter them on the side to see what we are currently working on, or use different log locations to keep things separate. Like the text results, we get whichever metrics were specified when setting up the model. #### Tensorboard Images We can also use the tensorboard to visualize other stuff. For example we can load up some images from our dataset. ``` # Sets up a timestamped log directory. logdir = "logs/train_data/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # Creates a file writer for the log directory. file_writer = tf.summary.create_file_writer(logdir) with file_writer.as_default(): # Don't forget to reshape. images = np.reshape(x_train[0:25], (-1, 28, 28, 1)) tf.summary.image("25 training data examples", images, max_outputs=25, step=0) ``` ## Using Pretrained Models As we've seen lately, training neural networks can take a really long time. Highly accurate models such as the ones that are used for image recognition in a self driving cars can take multiple computers days or weeks to train. With one laptop we don't really have the ability to get anywhere close to that. Is there any hope of getting anywhere near that accurate? We can use models that have been trained on large datasets and adapt them to our purposes. By doing this we can benefit from all of that other learning that is embedded into a model without going through a training process that would be impossible with our limited resources. We will look at using a pretrained model here, and at making modifications to it next time. #### Functional Models I have lied to you, I forgot that the pretrained models are not sequntial ones (generally, not as a rule), so some of the syntax here is for functional models. It leads to us using some slightly unfamiliar syntax. ``` _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True) PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered') train_dir = os.path.join(PATH, 'train') validation_dir = os.path.join(PATH, 'validation') BATCH_SIZE = 32 IMG_SIZE = (160, 160) train_dataset = tf.keras.utils.image_dataset_from_directory(train_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) validation_dataset = tf.keras.utils.image_dataset_from_directory(validation_dir, shuffle=True, batch_size=BATCH_SIZE, image_size=IMG_SIZE) ``` ### Download Model There are several models that are pretrained and available to us to use. VGG16 is one developed to do image recognition, the name stands for "Visual Geometry Group" - a group of researchers at the University of Oxford who developed it, and ‘16’ implies that this architecture has 16 layers. The model got ~93% on the ImageNet test that we mentioned a couple of weeks ago. ![VGG16](images/vgg16.png "VGG16" ) ``` # Load Model from keras.applications.vgg16 import VGG16 from keras.layers import Input from keras.models import Model input_tensor = Input(shape=(160, 160, 3)) vgg = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor) for layer in vgg.layers: layer.trainable = False x = Flatten()(vgg.output) prediction = Dense(1, activation='sigmoid')(x) model = Model(inputs=vgg.input, outputs=prediction) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metric_list) log_dir = "logs/fit/VGG" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) model.fit(train_dataset, epochs=epochs, validation_data=validation_dataset, callbacks=[tensorboard_callback]) model.evaluate(validation_dataset) model.evaluate(validation_dataset) ``` ## More Complex Data We can use the rose data for a more complex dataset and a more interesting example in terms of accuracy. ``` import pathlib import PIL dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz" data_dir = tf.keras.utils.get_file(origin=dataset_url, fname='flower_photos', untar=True) data_dir = pathlib.Path(data_dir) #Flowers batch_size = 32 img_height = 180 img_width = 180 train_ds = tf.keras.utils.image_dataset_from_directory( data_dir, validation_split=0.2, subset="training", seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.utils.image_dataset_from_directory( data_dir, validation_split=0.2, subset="validation", seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names print(class_names) input_tensor = Input(shape=(180, 180, 3)) vgg = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor) for layer in vgg.layers: layer.trainable = False x = Flatten()(vgg.output) prediction = Dense(5)(x) model = Model(inputs=vgg.input, outputs=prediction) model.summary() model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="adam", metrics=keras.metrics.SparseCategoricalAccuracy(name="accuracy")) log_dir = "logs/fit/VGG" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1) callback = EarlyStopping(monitor='loss', patience=3, restore_best_weights=True) model.fit(train_ds, epochs=epochs, verbose=1, validation_data=val_ds, callbacks=[tensorboard_callback, callback]) ``` ## Hyperparameter Tuning We can also utilize the tensorboard display to give us a view of hyperparameter tuning. This requires more work than a simple grid search, but the results are pretty similar. Below is an example adapted from the tensorflow docs. We'll do this with a simple model - a dense layer, a dropout, and the output, more complex ones are the same in their setup: <ol> <li> HP_NUM_UNITS - test a different number of units between 16 and 64. <li> HP_DROPOUT - the proportion of dropouts in the dropout. <li> HP_OPTIMIZER - we can try some different optimizers. </ol> ``` # Load some data from tensorboard.plugins.hparams import api as hp fashion_mnist = tf.keras.datasets.fashion_mnist (x_train, y_train),(x_test, y_test) = fashion_mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 ``` #### Setup Parameters We can define the parameters we want to grid search here. Each one is one of these hParam objects - we assign it a name and a range of values to use, here we have a numerical and a discreet example. We list those variables in the hparams argument. This is very similar to the idea of setting different values in a gridsearch, just with slightly different syntax. ``` HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([16, 32, 48, 64])) HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.4)) HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd', "rmsprop"])) METRIC_ACCURACY = 'accuracy' with tf.summary.create_file_writer('logs/hparam_tuning').as_default(): hp.hparams_config( hparams=[HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER], metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')], ) ``` #### Build Test Models We can create our models inside of some helper functions - each one will run a model with certain HPs and return the accuracy, or whichever other metric we define. Note the key change - the variables that we are changing are replaced with the matching hparams item. ``` def train_test_model(hparams): model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(hparams[HP_NUM_UNITS], activation=tf.nn.relu), tf.keras.layers.Dropout(hparams[HP_DROPOUT]), tf.keras.layers.Dense(10, activation=tf.nn.softmax), ]) model.compile( optimizer=hparams[HP_OPTIMIZER], loss='sparse_categorical_crossentropy', metrics=['accuracy'], ) model.fit(x_train, y_train, epochs=10) _, accuracy = model.evaluate(x_test, y_test) return accuracy def run(run_dir, hparams): with tf.summary.create_file_writer(run_dir).as_default(): hp.hparams(hparams) # record the values used in this trial accuracy = train_test_model(hparams) tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1) ``` #### Perfrom the GridSearch We have to write the gridsearch manually, but we can copy this basic setup as a template and modify it. Once complete, load tensorboard and go to the HPARAMS section to visualize. The parallel coordinates view allows us to do a quick exploration of the best HPs. ``` session_num = 0 for num_units in HP_NUM_UNITS.domain.values: for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value): for optimizer in HP_OPTIMIZER.domain.values: hparams = { HP_NUM_UNITS: num_units, HP_DROPOUT: dropout_rate, HP_OPTIMIZER: optimizer, } run_name = "run-%d" % session_num print('--- Starting trial: %s' % run_name) print({h.name: hparams[h] for h in hparams}) run('logs/hparam_tuning/' + run_name, hparams) session_num += 1 ```
github_jupyter
# Character-Level LSTM in PyTorch In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!** This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN. <img src="assets/charseq.jpeg" width="500"> First let's load in our required resources for data loading and model creation. ``` import numpy as np import torch from torch import nn import torch.nn.functional as F ``` ## Load in Data Then, we'll load the Anna Karenina text file and convert it into integers for our network to use. ``` # open text file and read in data as `text` with open('data/anna.txt', 'r') as f: text = f.read() ``` Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever. ``` text[:100] ``` ### Tokenization In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network. ``` # encode the text and map each character to an integer and vice versa # we create two dictionaries: # 1. int2char, which maps integers to characters # 2. char2int, which maps characters to unique integers chars = tuple(set(text)) print(chars) int2char = dict(enumerate(chars)) print(int2char) char2int = {ch: ii for ii, ch in int2char.items()} print(char2int) # encode the text encoded = np.array([char2int[ch] for ch in text]) ``` And we can see those same characters from above, encoded as integers. ``` encoded[:100] ``` ## Pre-processing the data As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that! ``` def one_hot_encode(arr, n_labels): # Initialize the the encoded array one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32) # Fill the appropriate elements with ones one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1. # Finally reshape it to get back to the original array one_hot = one_hot.reshape((*arr.shape, n_labels)) return one_hot # check that the function works as expected test_seq = np.array([[3, 5, 1]]) one_hot = one_hot_encode(test_seq, 8) print(one_hot) ``` ## Making training mini-batches To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this: <img src="assets/sequence_batching@1x.png" width=500px> <br> In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long. ### Creating Batches **1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. ** Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$. **2. After that, we need to split `arr` into $N$ batches. ** You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$. **3. Now that we have this array, we can iterate through it to get our mini-batches. ** The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide. > **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.** ``` def get_batches(arr, batch_size, seq_length): '''Create a generator that returns batches of size batch_size x seq_length from arr. Arguments --------- arr: Array you want to make batches from batch_size: Batch size, the number of sequences per batch seq_length: Number of encoded chars in a sequence ''' ## TODO: Get the number of batches we can make batch_size_total = batch_size * seq_length print(f'batch size total {batch_size_total}') n_batches = len(arr) // batch_size_total print(f'n batches {n_batches}') ## TODO: Keep only enough characters to make full batches arr = arr[:n_batches * batch_size_total] print(f'arr shape {arr.shape}') ## TODO: Reshape into batch_size rows arr = arr.reshape((batch_size, -1)) print(f'arr shape {arr.shape}') ## TODO: Iterate over the batches using a window of size seq_length for n in range(0, arr.shape[1], seq_length): # The features x = arr[:, n : n + seq_length] # The targets, shifted by one y = np.zeros_like(x) try: y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n + seq_length] except IndexError: y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0] yield x, y ``` ### Test Your Implementation Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps. ``` batches = get_batches(encoded, 8, 50) x, y = next(batches) # printing out the first 10 items in a sequence print('x\n', x[:10, :10]) print('\ny\n', y[:10, :10]) ``` If you implemented `get_batches` correctly, the above output should look something like ``` x [[25 8 60 11 45 27 28 73 1 2] [17 7 20 73 45 8 60 45 73 60] [27 20 80 73 7 28 73 60 73 65] [17 73 45 8 27 73 66 8 46 27] [73 17 60 12 73 8 27 28 73 45] [66 64 17 17 46 7 20 73 60 20] [73 76 20 20 60 73 8 60 80 73] [47 35 43 7 20 17 24 50 37 73]] y [[ 8 60 11 45 27 28 73 1 2 2] [ 7 20 73 45 8 60 45 73 60 45] [20 80 73 7 28 73 60 73 65 7] [73 45 8 27 73 66 8 46 27 65] [17 60 12 73 8 27 28 73 45 27] [64 17 17 46 7 20 73 60 20 80] [76 20 20 60 73 8 60 80 73 17] [35 43 7 20 17 24 50 37 73 36]] ``` although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`. --- ## Defining the network with PyTorch Below is where you'll define the network. <img src="assets/charRNN.png" width=500px> Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters. ### Model Structure In `__init__` the suggested structure is as follows: * Create and store the necessary dictionaries (this has been done for you) * Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching) * Define a dropout layer with `dropout_prob` * Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters) * Finally, initialize the weights (again, this has been given) Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`. --- ### LSTM Inputs/Outputs You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows ```python self.lstm = nn.LSTM(input_size, n_hidden, n_layers, dropout=drop_prob, batch_first=True) ``` where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell. We also need to create an initial hidden state of all zeros. This is done like so ```python self.init_hidden() ``` ``` # check if GPU is available train_on_gpu = torch.cuda.is_available() if(train_on_gpu): print('Training on GPU!') else: print('No GPU available, training on CPU; consider making n_epochs very small.') class CharRNN(nn.Module): def __init__(self, tokens, n_hidden=256, n_layers=2, drop_prob=0.5, lr=0.001): super().__init__() self.drop_prob = drop_prob self.n_layers = n_layers self.n_hidden = n_hidden self.lr = lr self.batch_size = 8 # creating character dictionaries self.chars = tokens self.int2char = dict(enumerate(self.chars)) self.char2int = {ch: ii for ii, ch in self.int2char.items()} ## TODO: define the layers of the model self.lstm = nn.LSTM(len(self.chars), self.n_hidden, self.n_layers, self.drop_prob, batch_first=True) self.dropout = nn.Dropout(self.drop_prob) self.fc = nn.Linear(self.n_hidden, len(self.chars)) def forward(self, x, hidden): ''' Forward pass through the network. These inputs are x, and the hidden/cell state `hidden`. ''' ## TODO: Get the outputs and the new hidden state from the lstm r_output, hidden = self.lstm(x, hidden) ## TODO: pass through a dropout layer out = self.dropout(r_output) out = out.contiguous().view(-1, self.n_hidden) out = self.fc(out) # return the final output and the hidden state return out, hidden def init_hidden(self, batch_size): ''' Initializes hidden state ''' # Create two new tensors with sizes n_layers x batch_size x n_hidden, # initialized to zero, for hidden state and cell state of LSTM weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(), weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(), weight.new(self.n_layers, batch_size, self.n_hidden).zero_()) return hidden ``` ## Time to train The train function gives us the ability to set the number of epochs, the learning rate, and other parameters. Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual! A couple of details about training: >* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states. * We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients. ``` def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10): ''' Training a network Arguments --------- net: CharRNN network data: text data to train the network epochs: Number of epochs to train batch_size: Number of mini-sequences per mini-batch, aka batch size seq_length: Number of character steps per mini-batch lr: learning rate clip: gradient clipping val_frac: Fraction of data to hold out for validation print_every: Number of steps for printing training and validation loss ''' net.train() opt = torch.optim.Adam(net.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() # create training and validation data val_idx = int(len(data)*(1-val_frac)) data, val_data = data[:val_idx], data[val_idx:] if(train_on_gpu): net.cuda() counter = 0 n_chars = len(net.chars) for e in range(epochs): # initialize hidden state h = net.init_hidden(batch_size) for x, y in get_batches(data, batch_size, seq_length): counter += 1 # One-hot encode our data and make them Torch tensors x = one_hot_encode(x, n_chars) inputs, targets = torch.from_numpy(x), torch.from_numpy(y) if(train_on_gpu): inputs, targets = inputs.cuda(), targets.cuda() # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history h = tuple([each.data for each in h]) # zero accumulated gradients net.zero_grad() # get the output from the model output, h = net(inputs, h) # calculate the loss and perform backprop loss = criterion(output, targets.view(batch_size*seq_length)) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. nn.utils.clip_grad_norm_(net.parameters(), clip) opt.step() # loss stats if counter % print_every == 0: # Get validation loss val_h = net.init_hidden(batch_size) val_losses = [] net.eval() for x, y in get_batches(val_data, batch_size, seq_length): # One-hot encode our data and make them Torch tensors x = one_hot_encode(x, n_chars) x, y = torch.from_numpy(x), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history val_h = tuple([each.data for each in val_h]) inputs, targets = x, y if(train_on_gpu): inputs, targets = inputs.cuda(), targets.cuda() output, val_h = net(inputs, val_h) val_loss = criterion(output, targets.view(batch_size*seq_length)) val_losses.append(val_loss.item()) net.train() # reset to train mode after iterationg through validation data print("Epoch: {}/{}...".format(e+1, epochs), "Step: {}...".format(counter), "Loss: {:.4f}...".format(loss.item()), "Val Loss: {:.4f}".format(np.mean(val_losses))) ``` ## Instantiating the model Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training! ``` ## TODO: set you model hyperparameters # define and print the net n_hidden= 512 n_layers= 2 net = CharRNN(chars, n_hidden, n_layers) print(net) ``` ### Set your training hyperparameters! ``` batch_size = 8 seq_length = 50 n_epochs = 2# start small if you are just testing initial behavior # train the model train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10) ``` ## Getting the best model To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network. ## Hyperparameters Here are the hyperparameters for the network. In defining the model: * `n_hidden` - The number of units in the hidden layers. * `n_layers` - Number of hidden LSTM layers to use. We assume that dropout probability and learning rate will be kept at the default, in this example. And in training: * `batch_size` - Number of sequences running through the network in one pass. * `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here. * `lr` - Learning rate for training Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks). > ## Tips and Tricks >### Monitoring Validation Loss vs. Training Loss >If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular: > - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on. > - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer) > ### Approximate number of parameters > The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are: > - The number of parameters in your model. This is printed when you start training. > - The size of your dataset. 1MB file is approximately 1 million characters. >These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples: > - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger. > - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss. > ### Best models strategy >The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end. >It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance. >By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative. ## Checkpoint After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters. ``` # change the name, for saving multiple files model_name = 'rnn_x_epoch.net' checkpoint = {'n_hidden': net.n_hidden, 'n_layers': net.n_layers, 'state_dict': net.state_dict(), 'tokens': net.chars} with open(model_name, 'wb') as f: torch.save(checkpoint, f) ``` --- ## Making Predictions Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text! ### A note on the `predict` function The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**. > To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character. ### Top K sampling Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk). ``` def predict(net, char, h=None, top_k=None): ''' Given a character, predict the next character. Returns the predicted character and the hidden state. ''' # tensor inputs x = np.array([[net.char2int[char]]]) x = one_hot_encode(x, len(net.chars)) inputs = torch.from_numpy(x) if(train_on_gpu): inputs = inputs.cuda() # detach hidden state from history h = tuple([each.data for each in h]) # get the output of the model out, h = net(inputs, h) # get the character probabilities p = F.softmax(out, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # get top characters if top_k is None: top_ch = np.arange(len(net.chars)) else: p, top_ch = p.topk(top_k) top_ch = top_ch.numpy().squeeze() # select the likely next character with some element of randomness p = p.numpy().squeeze() char = np.random.choice(top_ch, p=p/p.sum()) # return the encoded value of the predicted char and the hidden state return net.int2char[char], h ``` ### Priming and generating text Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from. ``` def sample(net, size, prime='The', top_k=None): if(train_on_gpu): net.cuda() else: net.cpu() net.eval() # eval mode # First off, run through the prime characters chars = [ch for ch in prime] h = net.init_hidden(1) for ch in prime: char, h = predict(net, ch, h, top_k=top_k) chars.append(char) # Now pass in the previous character and get a new one for ii in range(size): char, h = predict(net, chars[-1], h, top_k=top_k) chars.append(char) return ''.join(chars) print(sample(net, 1000, prime='Anna', top_k=5)) ``` ## Loading a checkpoint ``` # Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net` with open('rnn_x_epoch.net', 'rb') as f: checkpoint = torch.load(f) loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers']) loaded.load_state_dict(checkpoint['state_dict']) # Sample using a loaded model print(sample(loaded, 2000, top_k=5, prime="And Levin said")) ```
github_jupyter
``` import numpy as np #UNITS #A = mol/cm^3 -s #n = none #Ea = kcal/k*mol #c = #d = #f = six_parameter_fit_sensitivities = {'H2O2 + OH <=> H2O + HO2':{'A':np.array([-13.37032086, 32.42060027, 19.23022032, 6.843287462 , 36.62853824 ,-0.220309785 ,-0.099366346, -4.134352081]), 'n':np.array([1.948532282, -5.341557065, -3.337497841, -1.025292166, -5.813524857, 0.011862923 ,0.061801326, 0.581628835]), 'Ea':np.array([-0.463042822, 1.529151218, 0.808025472 ,0.359889935, -0.021309254, -0.098013004, -0.102022118, -0.097024727]), 'c':np.array([0.00163576, -0.008645666, -0.003111179, -0.002541995, 0.014228149 ,0.001263134, 0.001236963, -0.000390567]), 'd':np.array([1.071992802, -2.780550365, -1.71391034 ,-0.274481751, -4.491132406, -0.054960894, 0.049553379, 0.270885383]), 'f':np.array([-0.027060156, 0.056903076, 0.041102936 ,0.001361221, 0.144385439, 0.003136796 ,0.001374015, -0.006089248])}, '2 HO2 <=> H2O2 + O2': {'A':np.array([-12.93733217, 24.39245077 ,17.73177606, 4.37803475, 33.44985889, 0.381601192 ,3.748890308]), 'n':np.array([1.872602872, -4.096806067, -3.09439453 ,-0.63226683, -5.125008418, -0.061610462, -0.677953862]), 'Ea':np.array([-0.463903763 ,1.259537237, 0.826684258 ,0.257400116, 0.803882706 ,2.20E-05, 0.181336266]), 'c':np.array([0.002069572, -0.008314769, -0.00424128 ,-0.002016113, 0.000134642 ,0.000122049 ,-0.001026567]), 'd':np.array([0.981856324, -1.847383095, -1.493544053, 0.016222685, -3.428753345, -0.050708107, -0.526284003]), 'f':np.array([-0.022628436, 0.023558844, 0.031573523 ,-0.00732987, 0.096573278 ,0.001668073, 0.01033547])}, 'HO2 + OH <=> H2O + O2': {'A':np.array([-4.795727446, 6.426354909 ,4.878258417, 2.472791017, 7.856296474, 1.328033302 ,-3.457932692, -0.349839371, 2.331070924 ,2.403555921, -0.165397001, 0.246540172 ,0.722946077]), 'n':np.array([0.624241134, -1.321082842, -1.032242319, -0.36532386, -1.112545721, -0.188622956, 0.421083939 ,0.038859478 ,-0.360855106, -0.38989218, 0.029669899 ,-0.04371581, -0.130487515]), 'Ea':np.array([-0.259799111, 0.205620792 ,0.130799794, 0.137023666 ,0.379232542, 6.19E-02, -0.198196699, -0.023548432, 0.118069394 ,0.104383314 ,-0.003830947, 0.011566499 ,-0.073557828]), 'c':np.array([0.00161312, -0.001906694, -0.000863021, -0.00105112 ,-0.002185605, -0.000334461, 0.001817049 ,0.000170761, -0.000859313, -0.000653029, -3.11E-06 ,-6.37E-05, 0.00047058]), 'd':np.array([0.124499363, -0.645652135, -0.535188558, 0.052734001 ,-0.45181066, -0.082250635, 0.034779283, -0.011522821, 0.017057742, -0.165960963, 0.057288687, -0.012776017, -0.192422381]), 'f':np.array([0.002033109, -0.011099716, 0.005351213 ,-0.007623667, 0.005327017 ,0.001259485,0.00245957, 0.000976725 ,-0.004879845, 0.001903886 ,-0.001838669 ,0.000252269, 0.004691829])}, '2 OH <=> H2O + O': {'A': np.array([-5.40485067, 18.96061659 ,8.089301961, 6.953940096 ,-12.54280438, -3.264972401, 2.106487623 ,-1.657943467, 1.614935 ,-1.536463599]), 'n': np.array([0.803274875, -3.167851673, -1.607661056, -1.041258197, 1.679914849, 0.466415264 ,-0.326136934, 0.355297684 ,-0.16618967, 0.253903734]), 'Ea': np.array([0.147285831, 0.605814544, -0.062253282, 0.372322712, -1.884116555, -0.281992263, 0.099465537 ,0.030650483, 0.176069015 ,-0.056967886]), 'c': np.array([-0.003001658, -0.001870536, 0.003820535 ,-0.002753277, 0.014224162, 0.00032969 ,-0.000627241, -0.001081979, -0.002009835, 0.000255318]), 'd':np.array([0.446957978, -1.467039994, -1.298391635, -0.402720385, 0.568106728 ,0.229877892, -0.194395052, 1.033858025 ,0.527183366, 0.308743056]), 'f':np.array([-0.010053913, 0.025128322, 0.035579811 ,0.00515753 ,-0.0083511, -0.00512885, 0.003954, -0.029711993 ,-0.01986861, -0.007691647])}, 'CH3 + HO2 <=> CH4 + O2': {'A':np.array([.007845,-.89278,-.94908]), 'n':np.array([-0.00104,-.36888,.154462]), 'Ea':np.array([.504278,-.44379,-0.03181]), 'c':np.array([0,0,0]), 'd':np.array([0,0,0]), 'f':np.array([0,0,0])}, 'CH3 + HO2 <=> CH3O + OH': {'A':np.array([1.319108,-.92151]), 'n':np.array([-.04282,.150846]), 'Ea':np.array([0.024285,-0.02956]), 'c':np.array([0,0]), 'd':np.array([0,0]), 'f':np.array([0,0])}} six_parameter_fit_nominal_parameters_dict = {'H2O2 + OH <=> H2O + HO2':{'A':4.64E-06,'n':5.605491008,'Ea':-5440.266692,'c':126875776.1,'d':0.000441194,'f':-5.35E-13}, '2 HO2 <=> H2O2 + O2':{'A':1.30E+04,'n':1.997152351,'Ea':-3628.04407,'c':93390973.44,'d':-0.000732521,'f':8.20E-12} , 'HO2 + OH <=> H2O + O2':{'A':1.41E+18,'n':-2.05344973,'Ea':-232.0064051,'c':15243859.12,'d':-0.001187694,'f':8.01E-12}, '2 OH <=> H2O + O':{'A':354.5770856,'n':2.938741717,'Ea':-1836.492972,'c':12010735.18,'d':-4.87E-05,'f':1.22E-12}, 'CH3 + HO2 <=> CH4 + O2':{'A':3.19e3,'n':2.670857,'Ea':-4080.73,'c':0.0,'d':0.0,'f':0.0}, 'CH3 + HO2 <=> CH3O + OH':{'A':8.38e11,'n':.29,'Ea':-785.45,'c':0.0,'d':0.0,'f':0.0}} def calculate_six_parameter_fit(reaction,dictonary,temperature): #finish editing this #calc Ea,c,d,F seprately A = dictonary[reaction]['A'] n = dictonary[reaction]['n'] Ea_temp = dictonary[reaction]['Ea']/(1.987*temperature) c_temp = dictonary[reaction]['c']/((1.987*temperature)**3) d_temp = dictonary[reaction]['d']*(1.987*temperature) f_temp = dictonary[reaction]['f']* ((1.987*temperature)**3) k = A*(temperature**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) return k xdata = [] ydata = [] for t in np.arange(200,2400): xdata.append(t) ydata.append(calculate_six_parameter_fit('2 HO2 <=> H2O2 + O2',six_parameter_fit_nominal_parameters_dict,t)) ydata = np.array(ydata) ydata = np.log(ydata) plt.scatter(xdata,ydata) #fitting sigmas import matplotlib.pyplot as plt from scipy.optimize import curve_fit def func(x, A,n,Ea): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) plt.scatter(xdata, ydata,label='data') popt, pcov = curve_fit(func, xdata, ydata) print(popt) test_array = [] for T in xdata: test_array.append(np.log(popt[0]*T**popt[1]*np.exp(-popt[2]/(1.987*T)))) plt.plot(xdata,test_array,'r') #fitting sigmas import matplotlib.pyplot as plt from scipy.optimize import curve_fit def func2(x, A,n,Ea,c,d,f): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) + (-c/((1.987*x)**3)) + (-d*(1.987*x)) + (-f*((1.987*x)**3)) popt, pcov = curve_fit(func2, xdata, ydata,maxfev=1000000) #popt, pcov = curve_fit(func2, xdata, ydata, method='dogbox',maxfev=10000) #method{‘lm’, ‘trf’, ‘dogbox’}, optional plt.scatter(xdata, ydata,label='data') print(popt) test_array = [] for T in xdata: A = popt[0] n = popt[1] Ea_temp = popt[2]/(1.987*T) c_temp = popt[3]/((1.987*T)**3) d_temp = popt[4]*(1.987*T) f_temp =popt[5]* ((1.987*T)**3) k = A*(T**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) test_array.append(np.log(k)) plt.plot(xdata,test_array,'r') #calculate original 3 parameter fit #fitting sigmas import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit nominal_rc_df = pd.read_csv('') xdata=nominal_rc_df['T'] ydata=nominal_rc_df['k'] def func(x, A,n,Ea): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) plt.scatter(xdata, ydata,label='data') popt, pcov = curve_fit(func, xdata, ydata,maxfev=1000000) print(popt) A_nominal_3pf = popt[0] n_nominal_3pf = popt[1] Ea_nominal_3pf = popt[2]/(1.987*1000) test_array = [] for T in xdata: test_array.append(np.log(popt[0]*T**popt[1]*np.exp(-popt[2]/(1.987*T)))) plt.plot(xdata,test_array,'r') #looping over csvs and calculating sens coefficients for 3pf import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit A_list_3pf = [] n_list_3pf = [] Ea_list_3pf = [] for csv in csv list: df = pd.read_csv('') xdata=df['T'] ydata=df['k'] amount_perturbed = def func(x, A,n,Ea): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) plt.scatter(xdata, ydata,label='data') popt, pcov = curve_fit(func, xdata, ydata,maxfev=1000000) print(popt) A = popt[0] n = popt[1] Ea = popt[2]/(1.987*1000) test_array = [] for T in xdata: test_array.append(np.log(popt[0]*T**popt[1]*np.exp(-popt[2]/(1.987*T)))) plt.plot(xdata,test_array,'r') sensitivty_A = (A - A_nominal_3pf)/amount_perturbed sensitivity_n = (n-n_nominal_3pf)/amount_perturbed sensitivty_Ea = (Ea - Ea_nominal_3pf)/amount_perturbed A_list_3pf.append(sensitivty_A) n_list_3pf.append(sensitivity_n) Ea_list_3pf.append(sensitivty_Ea) #calculating original 6 paramter fit #fitting sigmas import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit nominal_rc_df = pd.read_csv('') xdata=nominal_rc_df['T'] ydata=nominal_rc_df['k'] def func2(x, A,n,Ea,c,d,f): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) + (-c/((1.987*x)**3)) + (-d*(1.987*x)) + (-f*((1.987*x)**3)) popt, pcov = curve_fit(func2, xdata, ydata,maxfev=1000000) #popt, pcov = curve_fit(func2, xdata, ydata, method='dogbox',maxfev=10000) #method{‘lm’, ‘trf’, ‘dogbox’}, optional plt.scatter(xdata, ydata,label='data') print(popt) A_nominal_spf = popt[0] n_nominal_spf = popt[1] Ea_nominal_spf = popt[2]/(1.987*1000) c_nominal_spf = popt[3]/((1.987*1000)**3) d_nominal_spf = popt[4]/((1.987*1000)**-1) f_nominal_spf = popt[5]/((1.987*1000)**-3) test_array = [] for T in xdata: A = popt[0] n = popt[1] Ea_temp = popt[2]/(1.987*T) c_temp = popt[3]/((1.987*T)**3) d_temp = popt[4]*(1.987*T) f_temp =popt[5]* ((1.987*T)**3) k = A*(T**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) test_array.append(np.log(k)) plt.plot(xdata,test_array,'r') #looping over csvs and calculating sens coefficients for 6pf import pandas as pd import matplotlib.pyplot as plt from scipy.optimize import curve_fit A_list_6pf = [] n_list_6pf = [] Ea_list_6pf = [] c_list_6pf = [] d_list_6pf = [] f_list_6pf = [] for csv in csv list: df = pd.read_csv('') xdata=df['T'] ydata=df['k'] amount_perturbed = def func2(x, A,n,Ea,c,d,f): return np.log(A)+np.log(x)*n + (-Ea/(1.987*x)) + (-c/((1.987*x)**3)) + (-d*(1.987*x)) + (-f*((1.987*x)**3)) popt, pcov = curve_fit(func2, xdata, ydata,maxfev=1000000) #popt, pcov = curve_fit(func2, xdata, ydata, method='dogbox',maxfev=10000) test_array = [] for T in xdata: A = popt[0] n = popt[1] Ea_temp = popt[2]/(1.987*T) c_temp = popt[3]/((1.987*T)**3) d_temp = popt[4]*(1.987*T) f_temp =popt[5]* ((1.987*T)**3) k = A*(T**n)*np.exp(-Ea_temp-c_temp-d_temp-f_temp) test_array.append(np.log(k)) plt.plot(xdata,test_array,'r') #method{‘lm’, ‘trf’, ‘dogbox’}, optional plt.scatter(xdata, ydata,label='data') print(popt) A = popt[0] n = popt[1] Ea = popt[2]/(1.987*1000) c = popt[3]/((1.987*1000)**3) d = popt[4]/((1.987*1000)**-1) f = popt[5]/((1.987*1000)**-3) sensitivty_A = (A - A_nominal_6pf)/amount_perturbed sensitivity_n = (n-n_nominal_6pf)/amount_perturbed sensitivty_Ea = (Ea - Ea_nominal_6pf)/amount_perturbed sensitivity_c = (c - c_nominal_6pf)/amount_perturbed sensitivity_d = (d - d_nominal_6pf)/amount_perturbed sensitivity_f = (f - f_nominal_6pf)/amount_perturbed A_list_6pf.append(sensitivty_A) n_list_6pf.append(sensitivity_n) Ea_list_6pf.append(sensitivty_Ea) c_list_6pf.append(sensitivity_c) d_list_6pf.append(sensitivity_d) f_list_6pf.append(sensitivity_f) ```
github_jupyter
# Data Types When reading in a data set, pandas will try to guess the data type of each column like float, integer, datettime, bool, etc. In Pandas, strings are called "object" dtypes. However, Pandas does not always get this right. That was the issue with the World Bank projects data. Hence, the dtype was specified as a string: ``` df_projects = pd.read_csv('../data/projects_data.csv', dtype=str) ``` Run the code cells below to read in the indicator and projects data. Then run the following code cell to see the dtypes of the indicator data frame. ``` # Run this code cell import pandas as pd # read in the population data and drop the final column df_indicator = pd.read_csv('../data/population_data.csv', skiprows=4) df_indicator.drop(['Unnamed: 62'], axis=1, inplace=True) # read in the projects data set with all columns type string df_projects = pd.read_csv('../data/projects_data.csv', dtype=str) df_projects.drop(['Unnamed: 56'], axis=1, inplace=True) df_indicator.head(2) # Run this code cell df_indicator.dtypes ``` These results look reasonable. Country Name, Country Code, Indicator Name and Indicator Code were all read in as strings. The year columns, which contain the population data, were read in as floats. # Exercise 1 Since the population indicator data was read in correctly, you can run calculations on the data. In this first exercise, sum the populations of the United States, Canada, and Mexico by year. ``` # TODO: Calculate the population sum by year for Canada, # the United States, and Mexico. # the keepcol variable makes a list of the column names to keep. You can use this if you'd like keepcol = ['Country Name'] for i in range(1960, 2018, 1): keepcol.append(str(i)) # TODO: In the df_nafta variable, store a data frame that only contains the rows for # Canada, United States, and Mexico. df_nafta = df_indicator[(df_indicator['Country Name']== "Canada") | (df_indicator['Country Name']== "United States") | (df_indicator['Country Name']== "Mexico")].iloc[:,] #print(df_nafta) # TODO: Calculate the sum of the values in each column in order to find the total population by year. # You can use the keepcol variable if you want to control which columns get outputted df_nafta.sum(axis =0)[keepcol] ``` # Exercise 2 Now, run the code cell below to look at the dtypes for the projects data set. They should all be "object" types, ie strings, because that's what was specified in the code when reading in the csv file. As a reminder, this was the code: ``` df_projects = pd.read_csv('../data/projects_data.csv', dtype=str) ``` ``` # Run this code cell df_projects.dtypes #df_projects.shape ``` Many of these columns should be strings, so there's no problem; however, a few columns should be other data types. For example, `boardapprovaldate` should be a datettime and `totalamt` should be an integer. You'll learn about datetime formatting in the next part of the lesson. For this exercise, focus on the 'totalamt' and 'lendprojectcost' columns. Run the code cell below to see what that data looks like ``` # Run this code cell df_projects[['totalamt', 'lendprojectcost']].head() # Run this code cell to take the sum of the total amount column df_projects['totalamt'].sum() ``` What just happened? Pandas treated the totalamts like strings. In Python, adding strings concatenates the strings together. There are a few ways to remedy this. When using pd.read_csv(), you could specify the column type for every column in the data set. The pd.read_csv() dtype option can accept a dictionary mapping each column name to its data type. You could also specify the `thousands` option with `thousands=','`. This specifies that thousands are separated by a comma in this data set. However, this data is somewhat messy, contains missing values, and has a lot of columns. It might be faster to read in the entire data set with string types and then convert individual columns as needed. For this next exercise, convert the `totalamt` column from a string to an integer type. ``` # TODO: Convert the totalamt column from a string to a float and save the results back into the totalamt column # Step 1: Remove the commas from the 'totalamt' column # HINT: https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.Series.str.replace.html #df_projects['totalamt'] =df_projects['totalamt'].str.replace(",", "") # Step 2: Convert the 'totalamt' column from an object data type (ie string) to an integer data type. # HINT: https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.to_numeric.html df_projects['totalamt'] = pd.to_numeric(df_projects['totalamt'].str.replace(",", "")) ``` # Conclusion With messy data, you might find it easier to read in everything as a string; however, you'll sometimes have to convert those strings to more appropriate data types. When you output the dtypes of a dataframe, you'll generally see these values in the results: * float64 * int64 * bool * datetime64 * timedelta * object where timedelta is the difference between two datetimes and object is a string. As you've seen here, you sometimes need to convert data types from one type to another type. Pandas has a few different methods for converting between data types, and here are link to the documentation: * [astype](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.astype.html#pandas.DataFrame.astype) * [to_datetime](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.to_datetime.html#pandas.to_datetime) * [to_numeric](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.to_numeric.html#pandas.to_numeric) * [to_timedelta](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.to_timedelta.html#pandas.to_timedelta)
github_jupyter
``` from music21 import * import numpy as np import torch import pretty_midi import os import sys import pickle import time import random import re class MusicData(object): def __init__(self, abc_file, culture= None): self.stream = None self.metadata = dict() self.description = None self.midi = None self.torch_matrix = None self.title = None self.key = None self.meter = None self.culture = culture self.gene = None self.valid = True self.set_proporties(abc_file) def set_proporties(self, abc_file): # print(abc_file.split('/')[-1]) step_list = ['stream','metadata','key','meter','others'] try: step_counter = 0 self.stream = converter.parse(abc_file) step_counter = 1 self.metadata = dict(self.stream.metadata.all()) step_counter = 2 self.key = self.metadata['key'] = str(self.stream.flat.getElementsByClass('Key')[0]) step_counter = 3 self.meter = self.metadata['meter'] = str(self.stream.flat.getElementsByClass('TimeSignature')[0])[1:-1].split()[-1] step_counter = 4 self.title = self.metadata['title'] self.midi = f"/gpfsnyu/home/yz6492/multimodal/data/midi/{self.title}.mid" if 'localeOfComposition' in self.metadata and self.culture is None: self.culture = self.culture_analyzer(self.metadata['localeOfComposition']) if 'gene' in self.metadata: pass except: self.valid = False print(f'Error in parsing: id - {step_list[step_counter]}') return try: mf = midi.translate.streamToMidiFile(self.stream) mf.open(self.midi, 'wb') mf.write() mf.close() self.torch_matrix = self.melody_to_numpy(fpath = self.midi) except Exception as e: self.stream, flag = self.emergence_fix(abc_file) # if flag is False: # self.stream, flag = self.emergence_fix(abc_file) print(f'Error in Matrix. Fixed? {flag}') self.description = self.generate_description() if self.torch_matrix is None: self.valid = False self.stream = None # for data size compression def emergence_fix(self, abc_file): with open(abc_file, 'r') as f: input_list = [line for line in f] output_list = input_list.copy() for i, line in enumerate(input_list): if 'L:' in line: if line[-3:] == '16\n': output_list[i] = 'L:1/8\n' elif line[-2:] == '8\n': output_list[i] = 'L:1/4\n' with open(abc_file, 'w') as f: f.writelines(output_list) # fix finished. now test try: self.stream = converter.parse(abc_file) mf = midi.translate.streamToMidiFile(self.stream) mf.open(self.midi, 'wb') mf.write() mf.close() self.torch_matrix = self.melody_to_numpy(fpath = self.midi) self.valid = True return stream, True except Exception as e: self.valid = False # do not use this object return stream, False def culture_analyzer(self, text): if 'china' in text.lower(): return 'Chinese' if 'irish' in text.lower(): return 'Irish' if 'english' in text.lower(): return 'English' def melody_to_numpy(self, fpath=None, unit_time=0.125, take_rhythm=False, ): music = pretty_midi.PrettyMIDI(fpath) notes = music.instruments[0].notes t = 0. roll = [] # print(notes[0], notes[-1]) for note in notes: # print(t, note) elapsed_time = note.start - t if elapsed_time > 0.: steps = torch.zeros((int(round(elapsed_time / unit_time)), 130)) steps[range(int(round(elapsed_time / unit_time))), 129] += 1. roll.append(steps) n_units = int(round((note.end - note.start) / unit_time)) steps = torch.zeros((n_units, 130)) if take_rhythm: steps[0, 60] += 1 else: steps[0, note.pitch] += 1 steps[range(1, n_units), 128] += 1 roll.append(steps) t = note.end return torch.cat(roll, 0) def generate_description(self): # order shuffle (total 6 possibilities) order = random.randint(0,5) # connector to decide grammar connecter = [random.randint(0,1), random.randint(0,1)] sequences = [ f'This is a song in {self.key}. It has a {self.meter} tempo. It is a {self.culture} song.', f'This is a song in {self.key}. This is in {self.culture} style with a beat of {self.meter}.', f'This is a song in {self.key}. This is a {self.culture} style song with a rhythm of {self.meter}.', f'This is a {self.key} album. They have got a {self.meter} tempo. It is a song from {self.culture}.', f'This is {self.key} song. This does have a tempo of {self.meter}. It is a song in {self.culture} style.', f'That is a {self.key} song. The tempo is {self.meter}. It is a song of the {self.culture} style.', f'That is a {self.key} hit. There is a pace of {self.meter}. It is a album in {self.culture} style.', f'This is a song in {self.key} with a {self.meter} tempo and it is a {self.culture} style song.', f'It is a {self.meter} pace {self.key} piece, and it is a {self.culture} type piece.', f'This is a {self.meter} tempo composition in {self.key} and is a {self.culture} hit.', f'It is a song of {self.culture} theme. It is a {self.meter} tempo song in {self.key}.', f'This is a song of {self.culture} theme. It is a {self.meter}-tempo composition in {self.key}.', f'This is an album about {self.culture} theme. This is a record of {self.meter} tempo in {self.key}', ] return sequences[random.randint(0, len(sequences)-1)] ```
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm %matplotlib inline import datetime import cPickle as pickle import csv import numpy as np import random import sys maxInt = sys.maxsize decrement = True while decrement: # decrease the maxInt value by factor 10 # as long as the OverflowError occurs. decrement = False try: csv.field_size_limit(maxInt) except OverflowError: maxInt = int(maxInt/10) decrement = True ``` # get term-/document-frequency ``` csv_reader = csv.reader(open('../data/raw/NELA-17/train.csv', 'r')) tkn2tf = {} len_heads = [] #1 len_paras = [] #2 cnt_paras = [] #3 len_bodys = [] #4 # csv data: 0:id, 1:head, 2:body, 3:label print datetime.datetime.now().isoformat() for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, head = row[1].lower().strip() for tkn in head.split(): if tkn in tkn2tf: tkn2tf[tkn] += 1 else: tkn2tf[tkn] = 1 len_heads.append(len(head.split())) #1 body = row[2].lower().strip() tkn_para = [] for para in body.split('<eop>'): if para and para != ' ': _para = para + '<eop>' len_para = len(_para.split()) len_paras.append(len_para) #2 tkn_para.append(_para) cnt_paras.append(len(tkn_para)) #3 body_split = [] for tkn in body.split(): if tkn in tkn2tf: tkn2tf[tkn] += 1 else: tkn2tf[tkn] = 1 body_split.append(tkn) len_bodys.append(len(body_split)) #4 print n+1, 'Done' print datetime.datetime.now().isoformat() print 'voca size :', len(tkn2tf) sorted_token = sorted(tkn2tf.items(), key=lambda kv: kv[1], reverse=True) tkn2idx = {} for idx, (tkn, _) in tqdm(enumerate(sorted_token)): tkn2idx[tkn] = idx + 2 tkn2idx['<UNK>'] = 1 tkn2idx[''] = 0 if len(tkn2idx) == len(tkn2tf)+2: print len(tkn2idx), 'No problem' print print 'Show top-10 tkn:' for tkn, freq in sorted_token[:10]: print tkn,':',freq print '' with open('../data/nela-17/whole/dic_mincut0.txt', 'wb') as f: for key in tkn2idx.keys(): f.write(key+'\n') tkn2tf_mincut5 = {} for tkn, tf in tkn2tf.items(): if tf < 2: continue tkn2tf_mincut5[tkn] = tf print 'voca size :', len(tkn2tf_mincut5) tkn2tf_mincut5['<EOS>'] = tkn2tf_mincut5['<eos>'] tkn2tf_mincut5['<EOP>'] = tkn2tf_mincut5['<eop>'] del tkn2tf_mincut5['<eos>'] del tkn2tf_mincut5['<eop>'] import operator sorted_voca = sorted(tkn2tf_mincut5.items(), key=operator.itemgetter(1)) len(sorted_voca) list_voca_mincut = [] list_voca_mincut.append('') # PAD list_voca_mincut.append('<UNK>') # UNK list_voca_mincut.append('<EOS>') # EOS list_voca_mincut.append('<EOP>') # EOP for word, idx in sorted_voca: if word=='<UNK>' or word=='<EOP>' or word=='<EOS>': print("existing word", word) continue else: list_voca_mincut.append(word) len(list_voca_mincut) with open('../data/nela-17/whole/dic_mincutN.txt', 'wb') as f: for i in range(len(list_voca_mincut)): f.write(list_voca_mincut[i]+'\n') dic_voca = {} for voca in list_voca_mincut: dic_voca[voca] = len(dic_voca) print(dic_voca[''], dic_voca['<UNK>'], dic_voca['<EOS>'], dic_voca['<EOP>']) with open('../data/nela-17/whole/dic_mincutN.pkl', 'wb') as f: pickle.dump(dic_voca, f) ``` #### for data processing ``` import copy dic_voca_lower = copy.deepcopy(dic_voca) dic_voca_lower['<eos>'] = dic_voca_lower['<EOS>'] dic_voca_lower['<eop>'] = dic_voca_lower['<EOP>'] del dic_voca_lower['<EOS>'] del dic_voca_lower['<EOP>'] len(dic_voca_lower) print(dic_voca_lower[''], dic_voca_lower['<UNK>'], dic_voca_lower['<eos>'], dic_voca_lower['<eop>']) ``` ## stats ``` import csv import sys import numpy as np data= [] with open('../data/raw/NELA-17/train.csv', 'r') as f: data_csv = csv.reader(f, delimiter=',') for row in data_csv: data.append(row) def print_info(data): print("mean", np.average(data)) print("std", np.std(data)) print("max", np.max(data)) print("95.xx coverage", np.average(data) + 2*np.std(data) ) print("99.73 coverage", np.average(data) + 3*np.std(data) ) print("99.95 coverage", np.average(data) + 3.5*np.std(data) ) print("99.99 coverage", np.average(data) + 4*np.std(data) ) head = [x[1].strip() for x in data] head_len = [len(x.split()) for x in head] print('head_len') print_info(head_len) body = [x[2].strip() for x in data] body_len = [len(x.split()) for x in body ] print('body_len') print_info(body_len) context_len = [len(x.split('<EOP>')) for x in body] print('context_len') print_info(context_len) body_sentence = [] for sent in body: sent = sent.split('<EOP>') body_sentence.extend(sent) body_len = [ len(x.split()) for x in body_sentence ] print('body_len') print_info(body_len) ``` # encode to numpy ``` def fit_length(data, max_len_t, max_len_b): data_t, data_b = data list_zeros = np.zeros(max_len_b, 'int32').tolist() fl_data_t = [] for datum in data_t: try: datum = list(datum) except: pass _len = len(datum) if _len >= max_len_t: fl_data_t.append( datum[:max_len_t] ) else: fl_data_t.append( datum + list_zeros[:(max_len_t-_len)] ) fl_data_b = [] for datum in data_b: try: datum = list(datum) except: pass _len = len(datum) if _len >= max_len_b: fl_data_b.append( datum[:max_len_b] ) else: fl_data_b.append( datum + list_zeros[:(max_len_b-_len)] ) np_data_t = np.asarray(fl_data_t, dtype='int32') np_data_b = np.asarray(fl_data_b, dtype='int32') data = [np_data_t, np_data_b] return data csv_reader = csv.reader(open('../data/raw/NELA-17/train.csv', 'r')) print datetime.datetime.now().isoformat() ids = [] heads = [] bodys = [] labels = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids.append(row[0]) labels.append(int(row[3])) head = [] for tkn in row[1].lower().strip().split(): if tkn in dic_voca_lower: head.append(dic_voca_lower[tkn]) else: head.append(1) # 0: <UNK> heads.append(head) body = [] for tkn in row[2].lower().strip().split(): if tkn in dic_voca_lower: body.append(dic_voca_lower[tkn]) else: body.append(1) # 0: <UNK> bodys.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() # ~5 mins print datetime.datetime.now().isoformat() [np_heads, np_bodys] = fit_length([heads, bodys], 25, 2000) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17/whole/train/train_title.npy' np.save(t_trainpath, np_heads) b_trainpath = '../data/nela-17/whole/train/train_body.npy' np.save(b_trainpath, np_bodys) l_trainpath = '../data/nela-17/whole/train/train_label.npy' np.save(l_trainpath, labels) print datetime.datetime.now().isoformat() ``` # devset ``` csv_reader = csv.reader(open('../data/raw/NELA-17/dev.csv', 'r')) print datetime.datetime.now().isoformat() ids_dev = [] heads_dev = [] bodys_dev = [] labels_dev = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_dev.append(row[0]) labels_dev.append(int(row[3])) head = [] for tkn in row[1].lower().strip().split(): if tkn in dic_voca_lower: head.append(dic_voca_lower[tkn]) else: head.append(1) # 0: UNK heads_dev.append(head) body = [] for tkn in row[2].lower().strip().split(): if tkn in dic_voca_lower: body.append(dic_voca_lower[tkn]) else: body.append(1) # 0: UNK bodys_dev.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_dev, np_bodys_dev] = fit_length([heads_dev, bodys_dev], 25, 2000) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17/whole/dev/dev_title.npy' np.save(t_trainpath, np_heads_dev) b_trainpath = '../data/nela-17/whole/dev/dev_body.npy' np.save(b_trainpath, np_bodys_dev) l_trainpath = '../data/nela-17/whole/dev/dev_label.npy' np.save(l_trainpath, labels_dev) print datetime.datetime.now().isoformat() ``` # testset ``` csv_reader = csv.reader(open('../data/raw/NELA-17/test.csv', 'r')) print datetime.datetime.now().isoformat() ids_dev = [] heads_dev = [] bodys_dev = [] labels_dev = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_dev.append(row[0]) labels_dev.append(int(row[3])) head = [] for tkn in row[1].lower().strip().split(): if tkn in dic_voca_lower: head.append(dic_voca_lower[tkn]) else: head.append(1) # 0 - UNK heads_dev.append(head) body = [] for tkn in row[2].lower().strip().split(): if tkn in dic_voca_lower: body.append(dic_voca_lower[tkn]) else: body.append(1) # 0 - UNK bodys_dev.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_dev, np_bodys_dev] = fit_length([heads_dev, bodys_dev], 25, 2000) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17/whole/test/test_title.npy' np.save(t_trainpath, np_heads_dev) b_trainpath = '../data/nela-17/whole/test/test_body.npy' np.save(b_trainpath, np_bodys_dev) l_trainpath = '../data/nela-17/whole/test/test_label.npy' np.save(l_trainpath, labels_dev) print datetime.datetime.now().isoformat() ``` # debugset ``` print datetime.datetime.now().isoformat() t_trainpath = '../data/nela-17//whole/debug/debug_title.npy' np.save(t_trainpath, np_heads_dev[:200]) b_trainpath = '../data/nela-17/whole/debug/debug_body.npy' np.save(b_trainpath, np_bodys_dev[:200]) l_trainpath = '../data/nela-17/whole/debug/debug_label.npy' np.save(l_trainpath, labels_dev[:200]) print datetime.datetime.now().isoformat() with open('../data/nela-17/whole/dic_mincutN.txt') as f: test_list_voca = f.readlines() test_list_voca = [x.strip() for x in test_list_voca] from nlp_vocab import Vocab tt = Vocab(test_list_voca) print(tt.index2sent(np_heads_dev[100])) ``` # para ver. ``` SEED = 448 random.seed(SEED) csv_reader = csv.reader(open('version2/data_para_train.csv', 'r')) print datetime.datetime.now().isoformat() data = [] true_data = [] for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, if row[3] == "1": data.append(row) else: true_data.append(row) random.shuffle(true_data) data += true_data[:len(data)] print datetime.datetime.now().isoformat() ids_para = [] heads_para = [] bodys_para = [] labels_para = [] for n, row in enumerate(data): if (n+1) % 10000 == 0: print n+1, ids_para.append(row[0]) labels_para.append(int(row[3])) head = [] for tkn in row[1].split(): if tkn in tkn2idx_mincut5: head.append(tkn2idx_mincut5[tkn]) else: head.append(1) heads_para.append(head) body = [] for tkn in row[2].split(): if tkn in tkn2idx_mincut5: body.append(tkn2idx_mincut5[tkn]) else: body.append(1) bodys_para.append(body) print n+1, ': Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_para, np_bodys_para] = fit_length([heads_para, bodys_para], 49, 170) print 'numpy: Done' print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = 'nps/train_para_head_mincut5' np.save(t_trainpath, np_heads_para) b_trainpath = 'nps/train_para_body_mincut5' np.save(b_trainpath, np_bodys_para) l_trainpath = 'nps/train_para_label_mincut5' np.save(l_trainpath, labels_para) print 'save: Done' print datetime.datetime.now().isoformat() import numpy as np l_trainpath = np.load('nps/train_para_label_mincut5.npy') l_trainpath.shape csv_reader = csv.reader(open('version2/data_para_dev.csv', 'r')) print datetime.datetime.now().isoformat() ids_para_dev = [] heads_para_dev = [] bodys_para_dev = [] labels_para_dev = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_para_dev.append(row[0]) labels_para_dev.append(int(row[3])) head = [] for tkn in row[1].split(): if tkn in tkn2idx_mincut5: head.append(tkn2idx_mincut5[tkn]) else: head.append(1) heads_para_dev.append(head) body = [] for tkn in row[2].split(): if tkn in tkn2idx_mincut5: body.append(tkn2idx_mincut5[tkn]) else: body.append(1) bodys_para_dev.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_para_dev, np_bodys_para_dev] = fit_length([heads_para_dev, bodys_para_dev], 49, 170) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = 'nps/valid_para_head_mincut5' np.save(t_trainpath, np_heads_para_dev) b_trainpath = 'nps/valid_para_body_mincut5' np.save(b_trainpath, np_bodys_para_dev) l_trainpath = 'nps/valid_para_label_mincut5' np.save(l_trainpath, labels_para_dev) print datetime.datetime.now().isoformat() ``` # testset ``` csv_reader = csv.reader(open('version2/data_whole_test.csv', 'r')) print datetime.datetime.now().isoformat() ids_test = [] heads_test = [] bodys_test = [] labels_test = [] for n, row in enumerate(csv_reader): if (n+1) % 10000 == 0: print n+1, ids_test.append(row[0]) labels_test.append(int(row[3])) head = [] for tkn in row[1].split(): if tkn in tkn2idx_mincut5: head.append(tkn2idx_mincut5[tkn]) else: head.append(1) heads_test.append(head) body = [] for tkn in row[2].split(): if tkn in tkn2idx_mincut5: body.append(tkn2idx_mincut5[tkn]) else: body.append(1) bodys_test.append(body) print n+1, 'Done' print datetime.datetime.now().isoformat() print datetime.datetime.now().isoformat() [np_heads_test, np_bodys_test] = fit_length([heads_test, bodys_test], 49, 1200) print datetime.datetime.now().isoformat() # ~3 mins print datetime.datetime.now().isoformat() t_trainpath = 'nps/test_whole_head_mincut5' np.save(t_trainpath, np_heads_test) b_trainpath = 'nps/test_whole_body_mincut5' np.save(b_trainpath, np_bodys_test) l_trainpath = 'nps/test_whole_label_mincut5' np.save(l_trainpath, labels_test) print datetime.datetime.now().isoformat() ``` # test stats. ``` csv_reader = csv.reader(open('version2/data_whole_test.csv', 'r')) len_heads_test = [] #1 len_paras_test = [] #2 cnt_paras_test = [] #3 len_bodys_test = [] #4 labels_test = [] print datetime.datetime.now().isoformat() for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, labels_test.append(int(row[3])) head = row[1] len_heads_test.append(len(head.split())) #1 body = row[2] tkn_para = [] for para in body.split('<EOP>'): if para and para != ' ': _para = para + '<EOP>' len_para = len(_para.split()) len_paras_test.append(len_para) #2 tkn_para.append(_para) cnt_paras_test.append(len(tkn_para)) #3 body_split = body.split() len_bodys_test.append(len(body_split)) #4 print n+1, 'Done' print datetime.datetime.now().isoformat() #1 len_titles = np.array(len_heads_test) print len_titles.tolist().count(1) print np.max(len_titles), np.min(len_titles), np.mean(len_titles), np.std(len_titles) len_t = len(len_titles) cnt_t = sum(len_titles <= 49) print cnt_t, len_t, cnt_t*1.0/len_t #2 len_paras = np.array(len_paras_test) print len_paras.tolist().count(1) print np.max(len_paras), np.min(len_paras), np.mean(len_paras), np.std(len_paras) len_p = len(len_paras) cnt_p = sum(len_paras <= 170) print cnt_p, len_p, cnt_p*1.0/len_p #3 cnt_para = np.array(cnt_paras_test) print cnt_para.tolist().count(1) print np.max(cnt_para), np.min(cnt_para), np.mean(cnt_para), np.std(cnt_para), np.median(cnt_para) len_cp = len(cnt_para) cnt_cp = sum(cnt_para <= 20) print cnt_cp, len_cp, cnt_cp*1.0/len_cp #4 len_bodys = np.array(len_bodys_test) print len_bodys.tolist().count(2) print np.max(len_bodys), np.min(len_bodys), np.mean(len_bodys), np.std(len_bodys) len_b = len(len_bodys) cnt_b = sum(len_bodys <= 1200) print cnt_b, len_b, cnt_b*1.0/len_b plt.figure(1) plt.hist(len_paras, range=[0, 500], normed=False, bins=500) tkn2df = {} for tkn in tkn2tf.keys(): tkn2df[tkn] = 0 csv_reader = csv.reader(open('final_final/data_whole_training.csv', 'r')) print datetime.datetime.now().isoformat() for n, row in enumerate(csv_reader): if (n+1) % 100000 == 0: print n+1, tmp_tkn = [] head = row[1] body = row[2] doc = ' '.join([head, body]) for tkn in doc.split(): if tkn in tmp_tkn: continue else: tkn2df[tkn] += 1 tmp_tkn.append(tkn) print n, 'Done' print datetime.datetime.now().isoformat() ```
github_jupyter
<table> <tr> <td ><h1><strong>NI SystemLink Analysis Automation</strong></h1></td> </tr> </table> This notebook is an example for how you can analyze your data with NI SystemLink Analysis Automation. It forms the core of the analysis procedure, which includes the notebook, the query, and the execution parameters (parallel or comparative). The [procedure is uploaded to Analysis Automation](https://www.ni.com/documentation/en/systemlink/latest/analysis/creating-anp-with-jupyter/). The output is a report in form of PDF documents or HTML pages. <br> <hr> ## Prerequisites Before you run this example, you need to [create a DataFinder search query](https://www.ni.com/documentation/en/systemlink/latest/datanavigation/finding-data-with-advanced-search/) in Data Navigation to find the example files (e.g. 'TR_M17_QT_42-1.tdms'). Save this query on the server. <hr> ## Summary This example exercises the SystemLink TDMReader API to access bulk data (see `data_api`) and/or descriptive data (see `metadata_api`). When the notebook executes, Analysis Automation provides data links which the API uses to access content. It also shows how to select channels from two channel groups and display the data in two graphs. The channel values from each channel group populate arrays, which you can use to further analyze and visualize your data. Furthermore, the example uses two procedure parameters that write a comment to the first graph and select a channel to display in the second graph (refer to __Plot Graph__ below). <hr> ## Imports This example uses the `TDMReader` API to work with the bulk data and meta data of the given files. `Matplotlib` is used for plotting the graph. The `scrapbook` is used to set and display the results in the analysis procedure results list. ``` import systemlink.clients.nitdmreader as tdmreader metadata_api = tdmreader.MetadataApi() data_api = tdmreader.DataApi() import matplotlib.pyplot as plt import scrapbook as sb def get_property(element, property_name): """Gets a property of the given element. The element can be a file, channel group, or channel. Args: element: Element to get the property from. property_name: Name of the property to get. Returns: The according property of the element or ``None`` if the property doesn't exist. """ return next((e.value for e in element.properties.properties if e.name == property_name), None) ``` ## Define Notebook Parameters a) In a code cell (*called __parameters cell__*), define the parameters. Fill in the needed values/content parameters in the code cell below. E.g. **Defined parameters:** - `comment_group_1`: Writes a comment into the box of the first group.<br> (Default value = `Checked`) - `shown_channel_index`: Any valid channel index of the second group. This channel is plotted in the second graph. <br> (Default value = `2`) Your code may look like the following: ``` comment_group_1 = "Checked" shown_channel_index = 2 ``` b) Select this code cell (*__parameters cell__*) and open on the __Property Inspector__ panel on the right sidebar to add the parameters, their default values, to the __Cell Metadata__ code block. For example, your code may look like the following: ```json { "papermill": { "parameters": { "comment_group_1": "Checked", "shown_channel_index": 2 } }, "tags": [ "parameters" ] } ``` You can use the variables of the __parameters__ cell content in all code cells below. ## Retrieve Metadata with a Data Link A data link is the input for each __Analysis Automation procedure__ that uses a query to collect specific data items. A `data_link` contains a list of one or more elements that point to a list of files, channel groups, or channels (depending on the query result type). This example shows how the Metadata API accesses the `file_info` structure from the file, through the `groups`, and down to the `channels` level. This example calculates the absolute minimum and absolute maximum value of all channels in each group and displays these values in the report. ``` data_links = ni_analysis_automation["data_links"] file_ids = [d["fileId"] for d in data_links] file_infos = await metadata_api.get_multiple_file_info(tdmreader.FileList(file_ids)) file_info = file_infos[0] test_file_name = get_property(file_info, "name") program_name = get_property(file_info, "Test~Procedure") group_names = [] channels = [] formatted_properties = [] for group in file_info.groups: group_names.append(group.name) channels.append(group.channels) max_values_of_group = [] min_values_of_group = [] mean_values_of_group = [] for channel in group.channels: minimum = float(get_property(channel, "minimum") or "NaN") maximum = float(get_property(channel, "maximum") or "NaN") mean_values_of_group.append((minimum + maximum) / 2) max_values_of_group.append(maximum) min_values_of_group.append(minimum) # Calculate statistical values from metadata abs_min = max(max_values_of_group) abs_max = min(max_values_of_group) abs_mean = sum(mean_values_of_group) / float(len(mean_values_of_group)) formatted_properties.append(f"Absolute Maximum: {abs_max:.3f} °C"+ f",Absolute Minimum: {abs_min:.3f} °C"+ f",Mean Value: {abs_mean:.3f} °C") # Populate the info box of the plot with the notebook parameters formatted_properties[1] += f",Parameter: {comment_group_1}" formatted_properties[0] += f",Channel #: {shown_channel_index}" ``` ## Retrieve Bulk Data with a Data Link Use the TDMReader API to work with bulk data. There are multiple ways for retrieving the data. The access path used in this example shows you how to loop over all groups and over all channels within the groups. The resulting channel specifiers (`chn_specs`) are used in the next step to `query` the bulk data and retrieve all channel `values` from the queried data. ``` bulk_data = [] file_id = data_links[0]['fileId'] for group in file_info.groups: chn_specs = [] for channel in group.channels: channel_specifier = tdmreader.OneChannelSpecifier( file_id=file_id, group_name=group.name, channel_name=channel.name) chn_specs.append(channel_specifier) xy_chns = tdmreader.ChannelSpecificationsXyChannels(y_channels=chn_specs) channel_specs = tdmreader.ChannelSpecifications(xy_channels=[xy_chns]) query = tdmreader.QueryDataSpecifier(channel_specs) data = await data_api.query_data(query) # get numeric y-data y_channels = data.data[0].y values = list(map(lambda c: c.numeric_data, y_channels)) bulk_data.append(values) ``` ## Plot Graph The next two cells plot a graph with two areas and two sub plots, using the Python `matplotlib.pyplot` module as `plt`. ``` # Helper method and constant for plotting data curr_fontsize = 18 axis_lable_fontsize = curr_fontsize - 5 def plot_area(subplot, area_bulk_data, area_meta_data, enable_channel_selector, area_properties): """ Plot a sub print area of a figure :param subplot: Object of the plot print area :param area_bulk_data: Channel bulk data to print :param area_meta_data: Channel metadata (name, properties, ...) :param enable_channel_selector: True, when property shown_channel_index should be used :param area_properties: String with comma-separated parts as content for the info box area e.g.: "Absolute Maximum: 12.6 °C,Absolute Minimum: -22.3 °C" """ # Place a text box below the legend subplot.text(1.05, 0.0, area_properties.replace(",", "\n"), transform=subplot.transAxes, ha="left", va="bottom") subplot.grid(True) subplot.set_xlabel('Time [s]', fontsize=axis_lable_fontsize) unit = get_property(area_meta_data[0], "unit_string") subplot.set_ylabel('Amplitudes ['+unit+']', fontsize=axis_lable_fontsize) i = 0 for channel in area_meta_data: if (enable_channel_selector): if (i == (shown_channel_index - 1)): subplot.plot(area_bulk_data[i], label=channel.name) # Lable => name of the curve = channel else: subplot.plot(area_bulk_data[i], label=channel.name) # Lable => name of the curve = channel i += 1 # Place a legend to the right of this subplot. subplot.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0., fontsize=axis_lable_fontsize) # Create plot and print data fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(15, 10)) fig.suptitle ('Temperature Monitoring File: '+ test_file_name + ' Test program: ' + program_name, fontsize=curr_fontsize, color='blue') ax1.set_title(group_names[1], fontsize=curr_fontsize) plot_area(ax1, bulk_data[1], channels[1], False, formatted_properties[1]) ax2.set_title(group_names[0], fontsize=curr_fontsize) plot_area(ax2, bulk_data[0], channels[0], True, formatted_properties[0]) plt.tight_layout() plt.show() ``` ## Add Result Summary Each Scrap recorded with `sb.glue()` is displayed for each procedure on the __History__ tab in Analysis Automation. ``` sb.glue("File", test_file_name) sb.glue("Test", program_name) sb.glue("Comment", comment_group_1) sb.glue("Displayed Channel #", shown_channel_index) ```
github_jupyter
<a href="https://colab.research.google.com/github/Serbeld/ArtificialVisionForQualityControl/blob/master/Copia_de_Yolo_Step_by_Step.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> **Outline of Steps** + Initialization + Download COCO detection data from http://cocodataset.org/#download + http://images.cocodataset.org/zips/train2014.zip <= train images + http://images.cocodataset.org/zips/val2014.zip <= validation images + http://images.cocodataset.org/annotations/annotations_trainval2014.zip <= train and validation annotations + Run this script to convert annotations in COCO format to VOC format + https://gist.github.com/chicham/6ed3842d0d2014987186#file-coco2pascal-py + Download pre-trained weights from https://pjreddie.com/darknet/yolo/ + https://pjreddie.com/media/files/yolo.weights + Specify the directory of train annotations (train_annot_folder) and train images (train_image_folder) + Specify the directory of validation annotations (valid_annot_folder) and validation images (valid_image_folder) + Specity the path of pre-trained weights by setting variable *wt_path* + Construct equivalent network in Keras + Network arch from https://github.com/pjreddie/darknet/blob/master/cfg/yolo-voc.cfg + Load the pretrained weights + Perform training + Perform detection on an image with newly trained weights + Perform detection on an video with newly trained weights # Initialization ``` !pip install h5py import h5py from google.colab import drive,files drive.mount('/content/drive') import sys sys.path.append('/content/drive/My Drive/keras-yolo2/') !pip install tensorflow-gpu==2.0.0-alpha0 from keras.models import Sequential, Model from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda from keras.layers.advanced_activations import LeakyReLU from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard from keras.optimizers import SGD, Adam, RMSprop from keras.layers.merge import concatenate import matplotlib.pyplot as plt import keras.backend as K import tensorflow as tf import imgaug as ia from tqdm import tqdm from imgaug import augmenters as iaa import numpy as np import pickle import os, cv2 from preprocessing import parse_annotation, BatchGenerator from utils import WeightReader, decode_netout, draw_boxes os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "" LABELS = ['COLOR HDPE', 'PET', 'WHITE HDPE'] IMAGE_H, IMAGE_W = 416, 416 GRID_H, GRID_W = 13 , 13 BOX = 5 CLASS = len(LABELS) CLASS_WEIGHTS = np.ones(CLASS, dtype='float32') OBJ_THRESHOLD = 0.2#0.5 NMS_THRESHOLD = 0.2#0.45 ANCHORS = [0.96,4.22, 1.52,4.79, 2.30,4.30, 2.76,2.35, 3.62,6.03] NO_OBJECT_SCALE = 1.0 OBJECT_SCALE = 5.0 COORD_SCALE = 1.0 CLASS_SCALE = 1.0 BATCH_SIZE = 16 WARM_UP_BATCHES = 0 TRUE_BOX_BUFFER = 50 wt_path = '/content/drive/My Drive/keras-yolo2/yolov2.weights' train_image_folder = '/content/drive/My Drive/dataset/images/' train_annot_folder = '/content/drive/My Drive/dataset/annotations/' valid_image_folder = '/content/drive/My Drive/dataset/images_val/' valid_annot_folder = '/content/drive/My Drive/dataset/annotattionsVAL/' #import os #print(os.listdir('/content/drive/My Drive/dataset/images')) train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS) val_imgs, seen_val_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS) train_batch = BatchGenerator(train_imgs, generator_config, norm=normalize) valid_batch = BatchGenerator(val_imgs, generator_config, norm=normalize) ``` **Sanity check: show a few images with ground truth boxes overlaid** ``` batches = BatchGenerator(train_imgs, generator_config) image = batches[0][0][0][0] image = cv2.resize(image,(680,340)) plt.imshow(image.astype('uint8')) ``` # Construct the network ``` # the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K) def space_to_depth_x2(x): return tf.space_to_depth(x, block_size=2) input_image = Input(shape=(IMAGE_H, IMAGE_W, 3)) true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER , 4)) # Layer 1 x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image) x = BatchNormalization(name='norm_1')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 2 x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x) x = BatchNormalization(name='norm_2')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 3 x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x) x = BatchNormalization(name='norm_3')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 4 x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x) x = BatchNormalization(name='norm_4')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 5 x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x) x = BatchNormalization(name='norm_5')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 6 x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x) x = BatchNormalization(name='norm_6')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 7 x = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x) x = BatchNormalization(name='norm_7')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 8 x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x) x = BatchNormalization(name='norm_8')(x) x = LeakyReLU(alpha=0.1)(x) x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 9 x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x) x = BatchNormalization(name='norm_9')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 10 x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x) x = BatchNormalization(name='norm_10')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 11 x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x) x = BatchNormalization(name='norm_11')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 12 x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x) x = BatchNormalization(name='norm_12')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 13 x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x) x = BatchNormalization(name='norm_13')(x) x = LeakyReLU(alpha=0.1)(x) skip_connection = x x = MaxPooling2D(pool_size=(2, 2))(x) # Layer 14 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x) x = BatchNormalization(name='norm_14')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 15 x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x) x = BatchNormalization(name='norm_15')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 16 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x) x = BatchNormalization(name='norm_16')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 17 x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x) x = BatchNormalization(name='norm_17')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 18 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x) x = BatchNormalization(name='norm_18')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 19 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x) x = BatchNormalization(name='norm_19')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 20 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x) x = BatchNormalization(name='norm_20')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 21 skip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection) skip_connection = BatchNormalization(name='norm_21')(skip_connection) skip_connection = LeakyReLU(alpha=0.1)(skip_connection) skip_connection = Lambda(space_to_depth_x2)(skip_connection) x = concatenate([skip_connection, x]) # Layer 22 x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x) x = BatchNormalization(name='norm_22')(x) x = LeakyReLU(alpha=0.1)(x) # Layer 23 x = Conv2D(BOX * (4 + 1 + CLASS), (1,1), strides=(1,1), padding='same', name='conv_23')(x) output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x) # small hack to allow true_boxes to be registered when Keras build the model # for more information: https://github.com/fchollet/keras/issues/2790 output = Lambda(lambda args: args[0])([output, true_boxes]) model = Model([input_image, true_boxes], output) model.summary() ``` # Load pretrained weights **Load the weights originally provided by YOLO** ``` weight_reader = WeightReader(wt_path) weight_reader.reset() nb_conv = 23 for i in range(1, nb_conv+1): conv_layer = model.get_layer('conv_' + str(i)) if i < nb_conv: norm_layer = model.get_layer('norm_' + str(i)) size = np.prod(norm_layer.get_weights()[0].shape) beta = weight_reader.read_bytes(size) gamma = weight_reader.read_bytes(size) mean = weight_reader.read_bytes(size) var = weight_reader.read_bytes(size) weights = norm_layer.set_weights([gamma, beta, mean, var]) if len(conv_layer.get_weights()) > 1: bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape)) kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape)) kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape))) kernel = kernel.transpose([2,3,1,0]) conv_layer.set_weights([kernel, bias]) else: kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape)) kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape))) kernel = kernel.transpose([2,3,1,0]) conv_layer.set_weights([kernel]) ``` **Randomize weights of the last layer** ``` layer = model.layers[-4] # the last convolutional layer weights = layer.get_weights() new_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W) new_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W) layer.set_weights([new_kernel, new_bias]) ``` # Perform training **Loss function** $$\begin{multline} \lambda_\textbf{coord} \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{obj}} \left[ \left( x_i - \hat{x}_i \right)^2 + \left( y_i - \hat{y}_i \right)^2 \right] \\ + \lambda_\textbf{coord} \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{obj}} \left[ \left( \sqrt{w_i} - \sqrt{\hat{w}_i} \right)^2 + \left( \sqrt{h_i} - \sqrt{\hat{h}_i} \right)^2 \right] \\ + \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{obj}} \left( C_i - \hat{C}_i \right)^2 \\ + \lambda_\textrm{noobj} \sum_{i = 0}^{S^2} \sum_{j = 0}^{B} L_{ij}^{\text{noobj}} \left( C_i - \hat{C}_i \right)^2 \\ + \sum_{i = 0}^{S^2} L_i^{\text{obj}} \sum_{c \in \textrm{classes}} \left( p_i(c) - \hat{p}_i(c) \right)^2 \end{multline}$$ ``` def custom_loss(y_true, y_pred): mask_shape = tf.shape(y_true)[:4] cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(GRID_W), [GRID_H]), (1, GRID_H, GRID_W, 1, 1))) cell_y = tf.transpose(cell_x, (0,2,1,3,4)) cell_grid = tf.tile(tf.concat([cell_x,cell_y], -1), [BATCH_SIZE, 1, 1, 5, 1]) coord_mask = tf.zeros(mask_shape) conf_mask = tf.zeros(mask_shape) class_mask = tf.zeros(mask_shape) seen = tf.Variable(0.) total_recall = tf.Variable(0.) """ Adjust prediction """ ### adjust x and y pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid ### adjust w and h pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(ANCHORS, [1,1,1,BOX,2]) ### adjust confidence pred_box_conf = tf.sigmoid(y_pred[..., 4]) ### adjust class probabilities pred_box_class = y_pred[..., 5:] """ Adjust ground truth """ ### adjust x and y true_box_xy = y_true[..., 0:2] # relative position to the containing cell ### adjust w and h true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically ### adjust confidence true_wh_half = true_box_wh / 2. true_mins = true_box_xy - true_wh_half true_maxes = true_box_xy + true_wh_half pred_wh_half = pred_box_wh / 2. pred_mins = pred_box_xy - pred_wh_half pred_maxes = pred_box_xy + pred_wh_half intersect_mins = tf.maximum(pred_mins, true_mins) intersect_maxes = tf.minimum(pred_maxes, true_maxes) intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.) intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1] true_areas = true_box_wh[..., 0] * true_box_wh[..., 1] pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1] union_areas = pred_areas + true_areas - intersect_areas iou_scores = tf.truediv(intersect_areas, union_areas) true_box_conf = iou_scores * y_true[..., 4] ### adjust class probabilities true_box_class = tf.argmax(y_true[..., 5:], -1) """ Determine the masks """ ### coordinate mask: simply the position of the ground truth boxes (the predictors) coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * COORD_SCALE ### confidence mask: penelize predictors + penalize boxes with low IOU # penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6 true_xy = true_boxes[..., 0:2] true_wh = true_boxes[..., 2:4] true_wh_half = true_wh / 2. true_mins = true_xy - true_wh_half true_maxes = true_xy + true_wh_half pred_xy = tf.expand_dims(pred_box_xy, 4) pred_wh = tf.expand_dims(pred_box_wh, 4) pred_wh_half = pred_wh / 2. pred_mins = pred_xy - pred_wh_half pred_maxes = pred_xy + pred_wh_half intersect_mins = tf.maximum(pred_mins, true_mins) intersect_maxes = tf.minimum(pred_maxes, true_maxes) intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.) intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1] true_areas = true_wh[..., 0] * true_wh[..., 1] pred_areas = pred_wh[..., 0] * pred_wh[..., 1] union_areas = pred_areas + true_areas - intersect_areas iou_scores = tf.truediv(intersect_areas, union_areas) best_ious = tf.reduce_max(iou_scores, axis=4) conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * NO_OBJECT_SCALE # penalize the confidence of the boxes, which are reponsible for corresponding ground truth box conf_mask = conf_mask + y_true[..., 4] * OBJECT_SCALE ### class mask: simply the position of the ground truth boxes (the predictors) class_mask = y_true[..., 4] * tf.gather(CLASS_WEIGHTS, true_box_class) * CLASS_SCALE """ Warm-up training """ no_boxes_mask = tf.to_float(coord_mask < COORD_SCALE/2.) seen = tf.assign_add(seen, 1.) true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, WARM_UP_BATCHES), lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask, true_box_wh + tf.ones_like(true_box_wh) * np.reshape(ANCHORS, [1,1,1,BOX,2]) * no_boxes_mask, tf.ones_like(coord_mask)], lambda: [true_box_xy, true_box_wh, coord_mask]) """ Finalize the loss """ nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0)) nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0)) nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0)) loss_xy = tf.reduce_sum(tf.square(true_box_xy-pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2. loss_wh = tf.reduce_sum(tf.square(true_box_wh-pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2. loss_conf = tf.reduce_sum(tf.square(true_box_conf-pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2. loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class) loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6) loss = loss_xy + loss_wh + loss_conf + loss_class nb_true_box = tf.reduce_sum(y_true[..., 4]) nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.5) * tf.to_float(pred_box_conf > 0.3)) """ Debugging code """ current_recall = nb_pred_box/(nb_true_box + 1e-6) total_recall = tf.assign_add(total_recall, current_recall) #loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \t', summarize=1000) #loss = tf.Print(loss, [loss_xy], message='Loss XY \t', summarize=1000) #loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000) #loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000) #loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000) #loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000) #loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000) #loss = tf.Print(loss, [total_recall/seen], message='Average Recall \t', summarize=1000) loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \t') loss = tf.Print(loss, [loss_xy], message='Loss XY \t') loss = tf.Print(loss, [loss_wh], message='Loss WH \t') loss = tf.Print(loss, [loss_conf], message='Loss Conf \t') loss = tf.Print(loss, [loss_class], message='Loss Class \t') loss = tf.Print(loss, [loss], message='Total Loss \t') loss = tf.Print(loss, [current_recall], message='Current Recall \t') loss = tf.Print(loss, [total_recall/seen], message='Average Recall \t') return loss ``` **Parse the annotations to construct train generator and validation generator** ``` generator_config = { 'IMAGE_H' : IMAGE_H, 'IMAGE_W' : IMAGE_W, 'GRID_H' : GRID_H, 'GRID_W' : GRID_W, 'BOX' : BOX, 'LABELS' : LABELS, 'CLASS' : len(LABELS), 'ANCHORS' : ANCHORS, 'BATCH_SIZE' : BATCH_SIZE, 'TRUE_BOX_BUFFER' : 50, } def normalize(image): return image / 255. print(train_annot_folder) ``` **Setup a few callbacks and start the training** ``` early_stop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3, mode='min', verbose=1) checkpoint = ModelCheckpoint('botellas.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', period=1) #tb_counter = len([log for log in os.listdir(os.path.expanduser('~/logs/')) if 'coco_' in log]) + 1 #tensorboard = TensorBoard(log_dir=os.path.expanduser('~/logs/') + 'coco_' + '_' + str(tb_counter), # histogram_freq=0, # write_graph=True, # write_images=False) optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9) #optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(loss=custom_loss, optimizer=optimizer,metrics=['accuracy']) #'loss_xy','loss_wh','loss_conf','loss_classloss','current_recall','total_recall/seen' stad = model.fit_generator(generator = train_batch, steps_per_epoch = len(train_batch), epochs = 3, verbose = 1, validation_data = valid_batch, validation_steps = len(valid_batch), callbacks = [early_stop, checkpoint], max_queue_size = 3) #model.fit_generator(generator = train_batch, # steps_per_epoch = len(train_batch), # epochs = 100, # verbose = 1, # validation_data = valid_batch, # validation_steps = len(valid_batch), # callbacks = [early_stop, checkpoint, tensorboard], # max_queue_size = 3) image = batches[0][0][0][0] plt.imshow(image.astype('uint8'))plt.figure(0) plt.plot(stad.history['acc'],'r') plt.plot(stad.history['val_acc'],'g') plt.xlabel("Num of Epochs") plt.ylabel("Accuracy") plt.title("Training Accuracy vs Validation Accuracy") plt.legend(['train','validation']) plt.savefig("Grafica_1.jpg", bbox_inches = 'tight') plt.figure(1) plt.plot(stad.history['loss'],'r') plt.plot(stad.history['val_loss'],'g') plt.xlabel("Num of Epochs") plt.ylabel("Loss") plt.title("Training Loss vs Validation Loss") plt.legend(['train','validation']) plt.savefig("Grafica_2.jpg", bbox_inches = 'tight') plt.show() ``` # Perform detection on image ``` model.load_weights("botellas.h5") import cv2 import matplotlib.pyplot as plt plt.figure() input_image = cv2.imread("/content/drive/My Drive/dataset/images/1.png") input_image = cv2.resize(input_image, (416, 416)) dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4)) input_image = input_image / 255. input_image = input_image[:,:,::-1] input_image = np.expand_dims(input_image, 0) netout = model.predict([input_image, dummy_array]) boxes = decode_netout(netout[0], obj_threshold=OBJ_THRESHOLD, nms_threshold=NMS_THRESHOLD, anchors=ANCHORS, nb_class=CLASS) imagen = draw_boxes(imagen, boxes, labels=LABELS) imagen = cv2.resize(imagen,(640,380)) plt.imshow(imagen[:,:,::-1]); plt.show() ``` # Perform detection on video ``` #model.load_weights("weights_coco.h5") #dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4)) #video_inp = '../basic-yolo-keras/images/phnom_penh.mp4' #video_out = '../basic-yolo-keras/images/phnom_penh_bbox.mp4' #video_reader = cv2.VideoCapture(video_inp) #nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT)) #frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)) #frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH)) #video_writer = cv2.VideoWriter(video_out, # cv2.VideoWriter_fourcc(*'XVID'), # 50.0, # (frame_w, frame_h)) #for i in tqdm(range(nb_frames)): # ret, image = video_reader.read() # input_image = cv2.resize(image, (416, 416)) # input_image = input_image / 255. # input_image = input_image[:,:,::-1] # input_image = np.expand_dims(input_image, 0) # netout = model.predict([input_image, dummy_array]) # boxes = decode_netout(netout[0], # obj_threshold=0.3, # nms_threshold=NMS_THRESHOLD, # anchors=ANCHORS, # nb_class=CLASS) # image = draw_boxes(image, boxes, labels=LABELS) # video_writer.write(np.uint8(image)) #video_reader.release() #video_writer.release() ```
github_jupyter
<!-- HTML file automatically generated from DocOnce source (https://github.com/doconce/doconce/) doconce format html solutionhw4.do.txt --> <!-- dom:TITLE: PHY321: Classical Mechanics 1 --> # PHY321: Classical Mechanics 1 **Homework 4, due Monday February 15** Date: **Feb 14, 2022** ### Practicalities about homeworks and projects 1. You can work in groups (optimal groups are often 2-3 people) or by yourself. If you work as a group you can hand in one answer only if you wish. **Remember to write your name(s)**! 2. Homeworks are available Wednesday/Thursday the week before the deadline. The deadline is at the Friday lecture. 3. How do I(we) hand in? You can hand in the paper and pencil exercises as a hand-written document. For this homework this applies to exercises 1-5. Alternatively, you can hand in everyhting (if you are ok with typing mathematical formulae using say Latex) as a jupyter notebook at D2L. The numerical exercise(s) (exercise 6 here) should always be handed in as a jupyter notebook by the deadline at D2L. ### Introduction to homework 4 This week's sets of classical pen and paper and computational exercises deal with simple motion problems and conservation laws; energy, momentum and angular momentum. These conservation laws are central in Physics and understanding them properly lays the foundation for understanding and analyzing more complicated physics problems. The relevant reading background is 1. chapters 3, 4.1, 4.2 and 4.3 of Taylor (there are many good examples there) and 2. chapters 10-13 of Malthe-Sørenssen. In both textbooks there are many nice worked out examples. Malthe-Sørenssen's text contains also several coding examples you may find useful. The numerical homework focuses on another motion problem where you can use the code you developed in homework 3, almost entirely. Please take a look at the posted solution (jupyter-notebook) for homework 3. You need only to change the forces at play. ### Exercise 1 (10 pt), Conservation laws, Energy and momentum * 1a (2pt) How do we define a conservative force? A conservative force is a force whose property is that the total work done in moving an object between two points is independent of the taken path. This means that the work on an object under the influence of a conservative force, is independent on the path of the object. It depends only on the spatial degrees of freedom and it is possible to assign a numerical value for the potential at any point. It leads to conservation of energy. The gravitational force is an example of a conservative force. If you wish to read more about conservative forces or not, Feyman's lectures from 1963 are quite interesting. He states for example that **All fundamental forces in nature appear to be conservative**. This statement was made while developing his argument that *there are no nonconservative forces*. You may enjoy the link to [Feynman's lecture](http://www.feynmanlectures.caltech.edu/I_14.html). An important condition for the final work to be independent of the path is that the **curl** of the force is zero, that $$ \boldsymbol{\nabla} \times \boldsymbol{F}=0 $$ * 1b (4pt) Use the work-energy theorem to show that energy is conserved with a conservative force. The work-energy theorem states that the work done $W$ by a force $\boldsymbol{F}$ that moves an object from a position $\boldsymbol{r}_0$ to a new position $\boldsymbol{r}_1$ $$ W=\int_{\boldsymbol{r}_0}^{\boldsymbol{r}_1}\boldsymbol{F}\boldsymbol{dr}=\frac{1}{2}mv_1^2-\frac{1}{2}mv_0^2, $$ where $v_1^2$ is the velocity squared at a time $t_1$ and $v_0^2$ the corresponding quantity at a time $t_0$. The work done is thus the difference in kinetic energies. We can rewrite the above equation as $$ \frac{1}{2}mv_1^2=\int_{\boldsymbol{r}_0}^{\boldsymbol{r}_1}\boldsymbol{F}\boldsymbol{dr}+\frac{1}{2}mv_0^2, $$ that is the final kinetic energy is equal to the initial kinetic energy plus the work done by the force over a given path from a position $\boldsymbol{r}_0$ at time $t_0$ to a final position position $\boldsymbol{r}_1$ at a later time $t_1$. * 1c (4pt) Assume that you have only internal two-body forces acting on $N$ objects in an isolated system. The force from object $i$ on object $j$ is $\boldsymbol{f}_{ij}$. Show that the linear momentum is conserved. Here we use Newton's third law and assume that our system is only affected by so-called internal forces. This means that the force $\boldsymbol{f}_{ij}$ from object $i$ acting on object $j$ is equal to the force acting on object $j$ from object $i$ but with opposite sign, that is $\boldsymbol{f}_{ij}=-\boldsymbol{f}_{ji}$. The total linear momentum is defined as $$ \boldsymbol{P}=\sum_{i=1}^N\boldsymbol{p}_i=\sum_{i=1}^Nm_i\boldsymbol{v}_i, $$ where $i$ runs over all objects, $m_i$ is the mass of object $i$ and $\boldsymbol{v}_i$ its corresponding velocity. The force acting on object $i$ from all the other objects is (lower case letters for individual objects and upper case letters for total quantities) $$ \boldsymbol{f}_i=\sum_{j=1}^N\boldsymbol{f}_{ji}. $$ Summing over all objects the net force is $$ \sum_{i=1}^N\boldsymbol{f}_i=\sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}. $$ We are summing freely over all objects with the constraint that $i\ne j$ (no self-interactions). We can now manipulate the double sum as $$ \sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{f}_{ji}+\boldsymbol{f}_{ij}). $$ Convince yourself about this by setting $N=2$ and $N=3$. Nweton's third law says $\boldsymbol{f}_{ij}=-\boldsymbol{f}_{ji}$, which means we have $$ \sum_{i=1}^N\sum_{j=1;j\ne i}^N\boldsymbol{f}_{ji}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{f}_{ji}-\boldsymbol{f}_{ji})=0. $$ The total force due to internal degrees of freedom only is thus $0$. If we then use the definition that $$ \sum_{i=1}^N\boldsymbol{f}_i=\sum_{i=1}^Nm_i\frac{d\boldsymbol{v}_i}{dt}=\sum_{i=1}^N\frac{d\boldsymbol{p}_i}{dt}=\frac{d \boldsymbol{P}}{dt}=0, $$ where we assumed that $m_i$ is independent of time, we see that time derivative of the total momentum is zero. We say then that the linear momentum is a constant of the motion. It is conserved. ### Exercise 2 (10 pt), Conservation of angular momentum * 2a (2pt) Define angular momentum and the torque for a single object with external forces only. The angular moment $\boldsymbol{l}_i$ for a given object $i$ is defined as $$ \boldsymbol{l}_i = \boldsymbol{r}_i \times \boldsymbol{p}_i, $$ where $\boldsymbol{p}_i=m_i\boldsymbol{v}_i$. With external forces only defining the acceleration and the mass being time independent, the momentum is the integral over the external force as function of time, that is $$ \boldsymbol{p}_i(t)=\boldsymbol{p}_i(t_0)+\int_{t_0}^t \boldsymbol{f}_i^{\mathrm{ext}}(t')dt'. $$ The torque for one object is $$ \boldsymbol{\tau}_i=\frac{d\boldsymbol{l}_i}{dt} = \frac{dt(\boldsymbol{r}_i \times \boldsymbol{p}_i)}{dt}=\boldsymbol{r}_i \times \frac{d\boldsymbol{p}_i}{dt}=\boldsymbol{r}_i \times \boldsymbol{f}_i, $$ * 2b (4pt) Define angular momentum and the torque for a system with $N$ objects/particles with external and internal forces. The force from object $i$ on object $j$ is $\boldsymbol{F}_{ij}$. The total angular momentum $\boldsymbol{L}$ is defined as $$ \boldsymbol{L}=\sum_{i=1}^N\boldsymbol{l}_i = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{p}_i. $$ and the total torque is (using the expression for one object from 2a) $$ \boldsymbol{\tau}=\sum_{i=1}^N\frac{d\boldsymbol{l}_i}{dt} = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{f}_i. $$ The force acting on one object is $\boldsymbol{f}_i=\boldsymbol{f}_i^{\mathrm{ext}}+\sum_{j=1}^N\boldsymbol{f}_{ji}$. * 2c (4pt) With internal forces only, what is the mathematical form of the forces that allows for angular momentum to be conserved? Using the results from 1c, we can rewrite without external forces our torque as $$ \boldsymbol{\tau}=\sum_{i=1}^N\frac{d\boldsymbol{l}_i}{dt} = \sum_{i=1}^N\boldsymbol{r}_i \times \boldsymbol{f}_i=\sum_{i=1}^N(\boldsymbol{r}_i \times \sum_{j=1}^N\boldsymbol{f}_{ji}), $$ which gives $$ \boldsymbol{\tau}=\sum_{i=1}^N\sum_{j=1;j\ne i}^N(\boldsymbol{r}_i \times \boldsymbol{f}_{ji}). $$ We can rewrite this as (convince yourself again about this) $$ \boldsymbol{\tau}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{r}_i \times \boldsymbol{f}_{ji}+\boldsymbol{r}_j \times \boldsymbol{f}_{ij}), $$ and using Newton's third law we have $$ \boldsymbol{\tau}=\sum_{i=1}^N\sum_{j>i}^N(\boldsymbol{r}_i -\boldsymbol{r}_j) \times \boldsymbol{f}_{ji}. $$ If the force is proportional to $\boldsymbol{r}_i -\boldsymbol{r}_j$ then angular momentum is conserved since the cross-product of a vector with itself is zero. We say thus that angular momentum is a constant of the motion. ### Exsercise 3 (10pt), Example of potential Consider a particle of mass $m$ moving according to the potential $$ V(x,y,z)=A\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}. $$ * 3a (2pt) Is energy conserved? If so, why? In this exercise $A$ and $a$ are constants. The force is given by the derivative of $V$ with respect to the spatial degrees of freedom and since the potential depends only on position, the force is conservative and energy is conserved. Furthermore, the curl of the force is zero. To see this we need first to compute the derivatives of the potential with respect to $x$, $y$ and $z$. We have that $$ F_x = -\frac{\partial V}{\partial x}=-\frac{xA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}, $$ and $$ F_y = 0, $$ and $$ F_z = -\frac{\partial V}{\partial z}=-\frac{zA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}. $$ The components of the **curl** of $\boldsymbol{F}$ are $$ (\boldsymbol{\nabla}\times\boldsymbol{F})_x = \frac{\partial F_y}{\partial z}-\frac{\partial F_z}{\partial y}=0, $$ and $$ (\boldsymbol{\nabla}\times\boldsymbol{F})_y = \frac{\partial F_x}{\partial z}-\frac{\partial F_z}{\partial x}=\frac{xzA}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{xzA}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}=0, $$ and $$ (\boldsymbol{\nabla}\times\boldsymbol{F})_z = \frac{\partial F_y}{\partial x}-\frac{\partial F_x}{\partial y}=0. $$ The force is a conservative one. * 3b (4pt) Which of the quantities, $p_x,p_y,p_z$ are conserved? Taking the derivatives with respect to time shows that only $p_y$ is conserved We see this directly from the above expressions for the force, since the derivative with respect to time of the momentum is simply the force. Thus, only the $y$-component of the momentum is conserved, see the expressions above for the forces, For the next exercise (3c), we need also the following derivatives $$ \frac{\partial F_x}{\partial x} = \frac{x^2A}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{A}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}, $$ and $$ \frac{\partial F_y}{\partial y} = 0, $$ and $$ \frac{\partial F_z}{\partial z} = \frac{z^2A}{a^4}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}-\frac{A}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}, $$ * 3c (4pt) Which of the quantities, $L_x,L_y,L_z$ are conserved? Using that $\boldsymbol{L}=\boldsymbol{r}\times\boldsymbol{p}$ and that $$ \frac{d\boldsymbol{L}}{dt}=\boldsymbol{r}\times\boldsymbol{F}, $$ we have that the different components are $$ (\boldsymbol{r}\times\boldsymbol{F})_x = zF_y-yF_z=\frac{yzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}. $$ and $$ (\boldsymbol{r}\times\boldsymbol{F})_y = xF_z-zF_x=-\frac{xzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}+\frac{xzA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}=0, $$ and $$ (\boldsymbol{r}\times\boldsymbol{F})_z = xF_y-yF_x=\frac{yxA}{a^2}\exp\left\{-\frac{x^2+z^2}{2a^2}\right\}. $$ Only $L_y$ is conserved. ### Exercise 4 (10pt), Angular momentum case At $t=0$ we have a single object with position $\boldsymbol{r}_0=x_0\boldsymbol{e}_x+y_0\boldsymbol{e}_y$. We add also a force in the $x$-direction at $t=0$. We assume that the object is at rest at $t=0$. $$ \boldsymbol{F} = F\boldsymbol{e}_x. $$ * 4a (3pt) Find the velocity and momentum at a given time $t$ by integrating over time with the above initial conditions. There is no velocity in the $x$- and $y$-directions at $t=0$, thus $\boldsymbol{v}_0=0$. The force is constant and acting only in the $x$-direction. We have then (dropping vector symbols and setting $t_0=0$) $$ v_x(t) = \int_0^t a(t')dt'=\int_0^t\frac{F}{m}dt'=\frac{F}{m}t. $$ * 4b (3pt) Find also the position at a time $t$. In the $x$-direction we have then $$ x(t) = \int_0^t v_x(t')dt'=x_0+\frac{F}{2m}t^2, $$ resulting in $$ \boldsymbol{r}(t)=(x_0+\frac{F}{2m}t^2)\boldsymbol{e}_x+y_0\boldsymbol{e}_y. $$ * 4c (4pt) Use the position and the momentum to find the angular momentum and the torque. Is angular momentum conserved? Velocity and position are defined in the $xy$-plane only which means that only angular momentum in the $z$-direction is non-zero. The angular momentum is $$ \boldsymbol{l} = (x(t)v_y(t)-y(t)v_x(t))\boldsymbol{e}_z=-y_0\frac{F}{m}t\boldsymbol{e}_z, $$ which results in a torque $\boldsymbol{\tau}=-y_0\frac{F}{m}\boldsymbol{e}_z$, which is not zero. Thus, angular momentum is not conserved. ### Exercise 5 (10pt), forces and potentials A particle of mass $m$ has velocity $v=\alpha/x$, where $x$ is its displacement. * 5a (3pt) Find the force $F(x)$ responsible for the motion. Here, since the force is assumed to be conservative (only dependence on $x$), we can use energy conservation. Assuming that the total energy at $t=0$ is $E_0$, we have $$ E_0=V(x)+\frac{1}{2}mv^2=V(x)+\frac{1}{2}m\frac{\alpha^2}{x^2}. $$ Taking the derivative wrt $x$ we have $$ \frac{dV}{dx}-m\frac{\alpha^2}{x^3}=0, $$ and since $F(x)=-dV/dx$ we have $$ F(x)=-m\frac{\alpha^2}{x^3}. $$ A particle is thereafter under the influence of a force $F=-kx+kx^3/\alpha^2$, where $k$ and $\alpha$ are constants and $k$ is positive. * 5b (3pt) Determine $V(x)$ and discuss the motion. It can be convenient here to make a sketch/plot of the potential as function of $x$. We assume that the potential is zero at say $x=0$. Integrating the force from zero to $x$ gives $$ V(x) = -\int_0^x F(x')dx'=\frac{kx^2}{2}-\frac{kx^4}{4\alpha^2}. $$ The following code plots the potential. We have chosen values of $\alpha=k=1.0$. Feel free to experiment with other values. We plot $V(x)$ for a domain of $x\in [-2,2]$. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import math x0= -2.0 xn = 2.1 Deltax = 0.1 alpha = 1.0 k = 1.0 #set up arrays x = np.arange(x0,xn,Deltax) n = np.size(x) V = np.zeros(n) V = 0.5*k*x*x-0.25*k*(x**4)/(alpha*alpha) plt.plot(x, V) plt.xlabel("x") plt.ylabel("V") plt.show() ``` From the plot here (with the chosen parameters) 1. we see that with a given initial velocity we can overcome the potential energy barrier and leave the potential well for good. 1. If the initial velocity is smaller (see next exercise) than a certain value, it will remain trapped in the potential well and oscillate back and forth around $x=0$. This is where the potential has its minimum value. 2. If the kinetic energy at $x=0$ equals the maximum potential energy, the object will oscillate back and forth between the minimum potential energy at $x=0$ and the turning points where the kinetic energy turns zero. These are the so-called non-equilibrium points. * 5c (4pt) What happens when the energy of the particle is $E=(1/4)k\alpha^2$? Hint: what is the maximum value of the potential energy? From the figure we see that the potential has a minimum at at $x=0$ then rises until $x=\alpha$ before falling off again. The maximum potential, $V(x\pm \alpha) = k\alpha^2/4$. If the energy is higher, the particle cannot be contained in the well. The turning points are thus defined by $x=\pm \alpha$. And from the previous plot you can easily see that this is the case ($\alpha=1$ in the abovementioned Python code). ### Exercise 6 (40pt) ### Exercise 6 (40pt), Numerical elements, adding the bouncing from the floor to the code from hw 3, exercise 6 **This exercise should be handed in as a jupyter-notebook** at D2L. Remember to write your name(s). Till now we have only introduced gravity and air resistance and studied their effects via a constant acceleration due to gravity and the force arising from air resistance. But what happens when the ball hits the floor? What if we would like to simulate the normal force from the floor acting on the ball? We need then to include a force model for the normal force from the floor on the ball. The simplest approach to such a system is to introduce a contact force model represented by a spring model. We model the interaction between the floor and the ball as a single spring. But the normal force is zero when there is no contact. Here we define a simple model that allows us to include such effects in our models. The normal force from the floor on the ball is represented by a spring force. This is a strong simplification of the actual deformation process occurring at the contact between the ball and the floor due to the deformation of both the ball and the floor. The deformed region corresponds roughly to the region of **overlap** between the ball and the floor. The depth of this region is $\Delta y = R − y(t)$, where $R$ is the radius of the ball. This is supposed to represent the compression of the spring. Our model for the normal force acting on the ball is then $$ \boldsymbol{N} = −k (R − y(t)) \boldsymbol{e}_y. $$ The normal force must act upward when $y < R$, hence the sign must be negative. However, we must also ensure that the normal force only acts when the ball is in contact with the floor, otherwise the normal force is zero. The full formation of the normal force is therefore $$ \boldsymbol{N} = −k (R − y(t)) \boldsymbol{e}_y, $$ when $y(t) < R$ and zero when $y(t) \le R$. In the numerical calculations you can choose $R=0.1$ m and the spring constant $k=1000$ N/m. * 6a (10pt) Identify the forces acting on the ball and set up a diagram with the forces acting on the ball. Find the acceleration of the falling ball now with the normal force as well. * 6b (30pt) Choose a large enough final time so you can study the ball bouncing up and down several times. Add the normal force and compute the height of the ball as function of time with and without air resistance. Comment your results. For 6a, see Malthe-Sørenssen chapter 7.5.1, in particular figure 7.10. The forces are in equation (7.10). The following code shows how to set up the problem with gravitation, a drag force and a normal force from the ground. The normal force makes the ball bounce up again. The code here includes all forces. Commenting out the air resistance will result in a ball which bounces up and down to the same height. Furthermore, you will note that for larger values of $\Delta t$ the results will not be physically meaningful. Can you figure out why? Try also different values for the step size in order to see whether the final results agrees with what you expect. ``` # Exercise 6, hw4, smarter way with declaration of vx, vy, x and y # Here we have added a normal force from the ground # Common imports import numpy as np import pandas as pd from math import * import matplotlib.pyplot as plt import os # Where to save the figures and data files PROJECT_ROOT_DIR = "Results" FIGURE_ID = "Results/FigureFiles" DATA_ID = "DataFiles/" if not os.path.exists(PROJECT_ROOT_DIR): os.mkdir(PROJECT_ROOT_DIR) if not os.path.exists(FIGURE_ID): os.makedirs(FIGURE_ID) if not os.path.exists(DATA_ID): os.makedirs(DATA_ID) def image_path(fig_id): return os.path.join(FIGURE_ID, fig_id) def data_path(dat_id): return os.path.join(DATA_ID, dat_id) def save_fig(fig_id): plt.savefig(image_path(fig_id) + ".png", format='png') from pylab import plt, mpl plt.style.use('seaborn') mpl.rcParams['font.family'] = 'serif' # Define constants g = 9.80655 #in m/s^2 D = 0.0245 # in mass/length, kg/m m = 0.2 # in kg R = 0.1 # in meters k = 1000.0 # in mass/time^2 # Define Gravitational force as a vector in x and y, zero x component G = -m*g*np.array([0.0,1]) DeltaT = 0.001 #set up arrays tfinal = 15.0 n = ceil(tfinal/DeltaT) # set up arrays for t, v, and r, the latter contain the x and y comps t = np.zeros(n) v = np.zeros((n,2)) r = np.zeros((n,2)) # Initial conditions r0 = np.array([0.0,2.0]) v0 = np.array([1.0,10.0]) r[0] = r0 v[0] = v0 # Start integrating using Euler's method for i in range(n-1): # Set up forces, air resistance FD if ( r[i,1] < R): N = k*(R-r[i,1])*np.array([0,1]) else: N = np.array([0,0]) vabs = sqrt(sum(v[i]*v[i])) FD = 0.0# -D*v[i]*vabs Fnet = FD+G+N a = Fnet/m # update velocity, time and position v[i+1] = v[i] + DeltaT*a r[i+1] = r[i] + DeltaT*v[i] t[i+1] = t[i] + DeltaT fig, ax = plt.subplots() ax.set_xlim(0, tfinal) ax.set_ylabel('y[m]') ax.set_xlabel('x[m]') ax.plot(r[:,0], r[:,1]) fig.tight_layout() save_fig("BouncingBallEuler") plt.show() ```
github_jupyter
``` import numpy as np import librosa import os import random import tflearn import tensorflow as tf lr = 0.001 iterations_train = 30 bsize = 64 audio_features = 20 utterance_length = 35 ndigits = 10 def get_mfcc_features(fpath): raw_w,sampling_rate = librosa.load(fpath,mono=True) mfcc_features = librosa.feature.mfcc(raw_w,sampling_rate) if(mfcc_features.shape[1]>utterance_length): mfcc_features = mfcc_features[:,0:utterance_length] else: mfcc_features=np.pad(mfcc_features,((0,0),(0,utterance_length-mfcc_features.shape[1])), mode='constant', constant_values=0) return mfcc_features import matplotlib.pyplot as plt import librosa.display %matplotlib inline mfcc_features = get_mfcc_features('../../speech_dset/recordings/train/5_theo_45.wav') plt.figure(figsize=(10, 6)) plt.subplot(2, 1, 1) librosa.display.specshow(mfcc_features, x_axis='time') print("Feature shape: ", mfcc_features.shape) print("Features: ", mfcc_features[:,0]) def get_batch_mfcc(fpath,batch_size=256): ft_batch = [] labels_batch = [] files = os.listdir(fpath) while True: print("Total %d files" % len(files)) random.shuffle(files) for fname in files: if not fname.endswith(".wav"): continue mfcc_features = get_mfcc_features(fpath+fname) label = np.eye(10)[int(fname[0])] labels_batch.append(label) ft_batch.append(mfcc_features) if len(ft_batch) >= batch_size: yield ft_batch, labels_batch ft_batch = [] labels_batch = [] train_batch = get_batch_mfcc('../../speech_dset/recordings/train/') sp_network = tflearn.input_data([None, audio_features, utterance_length]) sp_network = tflearn.lstm(sp_network, 128*4, dropout=0.5) sp_network = tflearn.fully_connected(sp_network, ndigits, activation='softmax') sp_network = tflearn.regression(sp_network, optimizer='adam', learning_rate=lr, loss='categorical_crossentropy') sp_model = tflearn.DNN(sp_network, tensorboard_verbose=0) while iterations_train > 0: X_tr, y_tr = next(train_batch) X_test, y_test = next(train_batch) sp_model.fit(X_tr, y_tr, n_epoch=10, validation_set=(X_test, y_test), show_metric=True, batch_size=bsize) iterations_train-=1 sp_model.save("/tmp/speech_recognition.lstm") sp_model.load('/tmp/speech_recognition.lstm') mfcc_features = get_mfcc_features('../../speech_dset/recordings/test/4_jackson_40.wav') mfcc_features = mfcc_features.reshape((1,mfcc_features.shape[0],mfcc_features.shape[1])) prediction_digit = sp_model.predict(mfcc_features) print(prediction_digit) print("Digit predicted: ", np.argmax(prediction_digit)) ```
github_jupyter
# Deploying Machine Learning Models using ksonnet and Ambassador ## Prerequistes You will need - [Git clone of Seldon Core](https://github.com/SeldonIO/seldon-core) - [Minikube](https://github.com/kubernetes/minikube) version v0.24.0 or greater - [python grpc tools](https://grpc.io/docs/quickstart/python.html) - [ksonnet client](https://ksonnet.io/) Start minikube and ensure custom resource validation is activated and there is 5G of memory. **2018-06-13** : At present we find the most stable version of minikube across platforms is 0.25.2 as there are issues with 0.26 and 0.27 on some systems. We also find the default VirtualBox driver can be problematic on some systems so we suggest using the [KVM2 driver](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#kvm2-driver). Your start command would then look like: ``` minikube start --vm-driver kvm2 --memory 4096 --feature-gates=CustomResourceValidation=true --extra-config=apiserver.Authorization.Mode=RBAC ``` ## Setup When you have a running minikube cluster run: ``` !kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default !kubectl create namespace seldon ``` ## Install Ambassador See the Abassador [getting started](https://www.getambassador.io/user-guide/getting-started) docs. Eventually, this would also be done via ksonnet. ``` !kubectl apply -f resources/ambassador-rbac.yaml -n seldon ``` ## Install Seldon Core Create a ksonnet app and install the prototypes from our registry. ``` !ks init my-ml-deployment --api-spec=version:v1.8.0 !cd my-ml-deployment && \ ks registry add seldon-core github.com/SeldonIO/seldon-core/tree/master/seldon-core && \ ks pkg install seldon-core/seldon-core@master && \ ks generate seldon-core seldon-core --withApife=false --namespace=seldon --withRbac=true !cd my-ml-deployment && \ ks apply default ``` ## Set up REST and gRPC methods **Ensure you port forward ambassador**: ``` kubectl port-forward $(kubectl get pods -n seldon -l service=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8002:80 ``` Install gRPC modules for the prediction protos. ``` !cp ../proto/prediction.proto ./proto !python -m grpc.tools.protoc -I. --python_out=. --grpc_python_out=. ./proto/prediction.proto ``` Illustration of both REST and gRPC requests. ``` import requests from requests.auth import HTTPBasicAuth from proto import prediction_pb2 from proto import prediction_pb2_grpc import grpc AMBASSADOR_API="localhost:8002" def rest_request(deploymentName): payload = {"data":{"names":["a","b"],"tensor":{"shape":[2,2],"values":[0,0,1,1]}}} response = requests.post( "http://"+AMBASSADOR_API+"/seldon/"+deploymentName+"/api/v0.1/predictions", json=payload) print(response.status_code) print(response.text) def rest_request_auth(deploymentName,username,password): payload = {"data":{"names":["a","b"],"tensor":{"shape":[2,2],"values":[0,0,1,1]}}} response = requests.post( "http://"+AMBASSADOR_API+"/seldon/"+deploymentName+"/api/v0.1/predictions", json=payload, auth=HTTPBasicAuth(username, password)) print(response.status_code) print(response.text) def grpc_request(deploymentName): datadef = prediction_pb2.DefaultData( names = ["a","b"], tensor = prediction_pb2.Tensor( shape = [3,2], values = [1.0,1.0,2.0,3.0,4.0,5.0] ) ) request = prediction_pb2.SeldonMessage(data = datadef) channel = grpc.insecure_channel(AMBASSADOR_API) stub = prediction_pb2_grpc.SeldonStub(channel) metadata = [('seldon',deploymentName)] response = stub.Predict(request=request,metadata=metadata) print(response) ``` ## Create Seldon Deployment **Check everything is running before continuing** ``` !kubectl get pods -n seldon !kubectl apply -f resources/model.json -n seldon ``` Check status of deployment before continuing. **ReplicasAvailable must be equal to 1** First time might take some time to download images. ``` !kubectl get seldondeployments seldon-deployment-example -o jsonpath='{.status}' -n seldon ``` ## Get predictions #### REST Request ``` rest_request("seldon-deployment-example") ``` #### gRPC Request ``` grpc_request("seldon-deployment-example") ``` ## Adding Authentication We will add the example authentication from the Ambassador tutorial. ``` !kubectl apply -f resources/ambassador-auth-service-setup.yaml -n seldon ``` ** Need to wait until running before adding Ambassador config ** ``` !kubectl get pods -n seldon !kubectl apply -f resources/ambassador-auth-service-config.yaml -n seldon ``` Show failed request when auth is running ``` rest_request("seldon-deployment-example") ``` Show successful request with auth ``` rest_request_auth("seldon-deployment-example","username","password") ``` # Tear down ``` !kubectl delete -f resources/ambassador-auth-service-setup.yaml -n seldon !kubectl delete -f resources/ambassador-rbac.yaml -n seldon !kubectl delete -f resources/model.json !cd my-ml-deployment && ks delete default !rm -rf my-ml-deployment ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Text Classification In this notebook we will classify movie reviews as being either `positive` or `negative`. We'll use the [IMDB dataset](https://www.tensorflow.org/datasets/catalog/imdb_reviews) that contains the text of 50,000 movie reviews from the [Internet Movie Database](https://www.imdb.com/). These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are *balanced*, meaning they contain an equal number of positive and negative reviews. <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%204%20-%20TensorFlow%20Serving/Week%202/Examples/text_classification.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/lmoroney/dlaicourse/blob/master/TensorFlow%20Deployment/Course%204%20-%20TensorFlow%20Serving/Week%202/Examples/text_classification.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a> </td> </table> # Setup ``` try: %tensorflow_version 2.x except: pass import tensorflow as tf import tensorflow_hub as hub import tensorflow_datasets as tfds tfds.disable_progress_bar() print("\u2022 Using TensorFlow Version:", tf.__version__) ``` ## Download the IMDB Dataset We will download the [IMDB dataset](https://www.tensorflow.org/datasets/catalog/imdb_reviews) using TensorFlow Datasets. We will use a training set, a validation set, and a test set. Since the IMDB dataset doesn't have a validation split, we will use the first 60\% of the training set for training, and the last 40\% of the training set for validation. ``` splits = ['train[:60%]', 'train[-40%:]', 'test'] splits, info = tfds.load(name="imdb_reviews", with_info=True, split=splits, as_supervised=True) train_data, validation_data, test_data = splits ``` ## Explore the Data Let's take a moment to look at the data. ``` num_train_examples = info.splits['train'].num_examples num_test_examples = info.splits['test'].num_examples num_classes = info.features['label'].num_classes print('The Dataset has a total of:') print('\u2022 {:,} classes'.format(num_classes)) print('\u2022 {:,} movie reviews for training'.format(num_train_examples)) print('\u2022 {:,} movie reviews for testing'.format(num_test_examples)) ``` The labels are either 0 or 1, where 0 is a negative review, and 1 is a positive review. We will create a list with the corresponding class names, so that we can map labels to class names later on. ``` class_names = ['negative', 'positive'] ``` Each example consists of a sentence representing the movie review and a corresponding label. The sentence is not preprocessed in any way. Let's take a look at the first example of the training set. ``` for review, label in train_data.take(1): review = review.numpy() label = label.numpy() print('\nMovie Review:\n\n', review) print('\nLabel:', class_names[label]) ``` ## Load Word Embeddings In this example, the input data consists of sentences. The labels to predict are either 0 or 1. One way to represent the text is to convert sentences into word embeddings. Word embeddings, are an efficient way to represent words using dense vectors, where semantically similar words have similar vectors. We can use a pre-trained text embedding as the first layer of our model, which will have two advantages: * We don't have to worry anout text preprocessing. * We can benefit from transfer learning. For this example we will use a model from [TensorFlow Hub](https://tfhub.dev/) called [google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). We'll create a `hub.KerasLayer` that uses the TensorFlow Hub model to embed the sentences. We can choose to fine-tune the TF hub module weights during training by setting the `trainable` parameter to `True`. ``` embedding = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1" hub_layer = hub.KerasLayer(embedding, input_shape=[], dtype=tf.string, trainable=True) ``` ## Build Pipeline ``` batch_size = 512 train_batches = train_data.shuffle(num_train_examples // 4).batch(batch_size).prefetch(1) validation_batches = validation_data.batch(batch_size).prefetch(1) test_batches = test_data.batch(batch_size) ``` ## Build the Model In the code below we will build a Keras `Sequential` model with the following layers: 1. The first layer is a TensorFlow Hub layer. This layer uses a pre-trained SavedModel to map a sentence into its embedding vector. The model that we are using ([google/tf2-preview/gnews-swivel-20dim/1](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1)) splits the sentence into tokens, embeds each token and then combines the embedding. The resulting dimensions are: `(num_examples, embedding_dimension)`. 2. This fixed-length output vector is piped through a fully-connected (`Dense`) layer with 16 hidden units. 3. The last layer is densely connected with a single output node. Using the `sigmoid` activation function, this value is a float between 0 and 1, representing a probability, or confidence level. ``` model = tf.keras.Sequential([ hub_layer, tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) ``` ## Train the Model Since this is a binary classification problem and the model outputs a probability (a single-unit layer with a sigmoid activation), we'll use the `binary_crossentropy` loss function. ``` model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit(train_batches, epochs=20, validation_data=validation_batches) ``` ## Evaluate the Model We will now see how well our model performs on the testing set. ``` eval_results = model.evaluate(test_batches, verbose=0) for metric, value in zip(model.metrics_names, eval_results): print(metric + ': {:.3}'.format(value)) ```
github_jupyter
<a href="https://colab.research.google.com/github/tjido/ControlCharts/blob/master/Control_Chart_Implementation_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Control Chart_Python Implementation 1 This script takes a date input and creates a Control Chart. This is done in a python cloud environment using a Jupiter notebook in Google Colab. Contact: Shingai Manjengwa, Technical Education Specialist, Vector Institute (Twitter: @Tjido) This work is part of a research paper by Shingai Manjengwa: 'Visualizing a Disruption, using Control Charts to understand the impact of Covid19.' ``` #Import relevant packages import pandas as pd import plotly.graph_objects as go import datetime #Load the dataset url = 'https://raw.githubusercontent.com/tjido/ControlCharts/master/US_CIVPART_01042020.csv' df_data = pd.read_csv(url) df_data.head() #Manage the date format while True: date_entry = input('Enter the disruption date in the format YYYY-MM-DD: ') year, month, day = map(int, date_entry.split('-')) date1 = datetime.date(year, month, day) value_found = False for a in range(len(df_data)): if df_data['Date'][a] ==str(date1): print(a) date_value = a value_found=True break if value_found==True: print('Thank you! ') break break else: print('Kindly Enter the date again, As you entered an invalid date') #Set the standard deviation standard_deviation_level = 1 #Compute the Upper Control Limit, Lower Control Limit and the Mean UCL_a = df_data['CIVPART'][:int(date_value)].mean()+(standard_deviation_level*df_data['CIVPART'][:int(date_value)].std()) #a UCL LCL_a = df_data['CIVPART'][:int(date_value)].mean()-(standard_deviation_level*df_data['CIVPART'][:int(date_value)].std()) #a LCL MEAN_a = df_data['CIVPART'][:int(date_value)].mean() #Overall Mean UCL_data_y = [UCL_a]*len(df_data['Date'] ) #make list equal to number of data with UCL LCL_data_y = [LCL_a]*len(df_data['Date']) #make list equal to number of data with LCL MEAN_data_y = [MEAN_a]*len(df_data['Date']) #make list equal to number of data with MEAN UCL_afterline = df_data['CIVPART'][int(date_value):].mean()+(standard_deviation_level*df_data['CIVPART'][int(date_value):].std()) #UCL after line LCL_afterline = df_data['CIVPART'][int(date_value):].mean()-(standard_deviation_level*df_data['CIVPART'][int(date_value):].std()) #LCL after line Mean_afterline = df_data['CIVPART'][int(date_value):].mean() #Mean after line UCL_data_y[int(date_value):] = [UCL_afterline]*len(UCL_data_y[int(date_value):]) #Replace UCL after line data with new values LCL_data_y[int(date_value):] = [LCL_afterline]*len(LCL_data_y[int(date_value):]) #Replace LCL after line data with new values MEAN_data_y[int(date_value):] = [Mean_afterline]*len(MEAN_data_y[int(date_value):]) #Replace MEAN after line data with new values df_data['UCL'] = UCL_data_y df_data['LCL'] = LCL_data_y df_data['MEAN'] = MEAN_data_y #Set formatting of point outside the standard deviation df_color = [] for a in range(len(df_data)): if df_data['CIVPART'][a]>df_data['UCL'][a]: df_color.append('Red') elif df_data['CIVPART'][a]<df_data['LCL'][a]: df_color.append('Red') else: df_color.append('Blue') #Set formatting, axis labels and heading df_data['color'] = df_color fig = go.Figure() fig.add_trace(go.Scatter(x=df_data['Date'], y=df_data['CIVPART'],marker_color=df_color, mode='lines+markers', name='Labor_Force_Participation',)) #Display CIVPART data fig.add_trace(go.Scatter(x=df_data['Date'] ,y=UCL_data_y, mode='lines', name='UCL',line=dict(color='red', width=2))) #UCL Line fig.add_trace(go.Scatter(x=df_data['Date'] ,y=LCL_data_y, mode='lines', name='LCL',line=dict(color='red', width=2))) #LCL Line fig.add_trace(go.Scatter(x=df_data['Date'] ,y=MEAN_data_y, mode='lines', name='Mean',line=dict(color='green', width=4))) #MEAN Line #Insert custom vertical line at point of disruption fig.update_layout( shapes=[ dict( type= 'line', yref= 'paper', y0= 0, y1= 1, xref= 'x', x0=df_data['Date'][int(date_value)-1],x1=df_data['Date'][int(date_value)-1], ##Set the line at user specified date line=dict( color="black", width=3, dash="dot", ) ) ], autosize=True, height=600, title="Labor Force Participation, April 2020", xaxis_title="Date", yaxis_title="% Participation", margin=dict( l=10, r=50, b=50, t=100, pad=4 ), font=dict( family="Amiri", ), paper_bgcolor="LightSteelBlue", ) #Write output to HTML fig.show() fig.write_html('Control Chart Visualization_1.html') ``` # Thanks for your time! Feedback welcome, stay in touch - Twitter: @Tjido
github_jupyter
# Deep Learning Intro ``` %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np ``` ## Shallow and Deep Networks ``` from sklearn.datasets import make_moons X, y = make_moons(n_samples=1000, noise=0.1, random_state=0) plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5) plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5) plt.legend(['0', '1']) X.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) from keras.models import Sequential from keras.layers import Dense from keras.optimizers import SGD, Adam ``` ### Shallow Model ``` model = Sequential() model.add(Dense(1, input_shape=(2,), activation='sigmoid')) model.compile(Adam(lr=0.05), 'binary_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=200, verbose=0) results = model.evaluate(X_test, y_test) results print("The Accuracy score on the Train set is:\t{:0.3f}".format(results[1])) def plot_decision_boundary(model, X, y): amin, bmin = X.min(axis=0) - 0.1 amax, bmax = X.max(axis=0) + 0.1 hticks = np.linspace(amin, amax, 101) vticks = np.linspace(bmin, bmax, 101) aa, bb = np.meshgrid(hticks, vticks) ab = np.c_[aa.ravel(), bb.ravel()] c = model.predict(ab) cc = c.reshape(aa.shape) plt.figure(figsize=(12, 8)) plt.contourf(aa, bb, cc, cmap='bwr', alpha=0.2) plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5) plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5) plt.legend(['0', '1']) plot_decision_boundary(model, X, y) ``` ### Deep model ``` model = Sequential() model.add(Dense(4, input_shape=(2,), activation='tanh')) model.add(Dense(2, activation='tanh')) model.add(Dense(1, activation='sigmoid')) model.compile(Adam(lr=0.05), 'binary_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=100, verbose=0) model.evaluate(X_test, y_test) from sklearn.metrics import accuracy_score, confusion_matrix y_train_pred = model.predict_classes(X_train) y_test_pred = model.predict_classes(X_test) print("The Accuracy score on the Train set is:\t{:0.3f}".format(accuracy_score(y_train, y_train_pred))) print("The Accuracy score on the Test set is:\t{:0.3f}".format(accuracy_score(y_test, y_test_pred))) plot_decision_boundary(model, X, y) ``` ## Multiclass classification ### The Iris dataset ``` df = pd.read_csv('../data/iris.csv') import seaborn as sns sns.pairplot(df, hue="species") df.head() X = df.drop('species', axis=1) X.head() target_names = df['species'].unique() target_names target_dict = {n:i for i, n in enumerate(target_names)} target_dict y= df['species'].map(target_dict) y.head() from keras.utils.np_utils import to_categorical y_cat = to_categorical(y) y_cat[:10] X_train, X_test, y_train, y_test = train_test_split(X.values, y_cat, test_size=0.2) model = Sequential() model.add(Dense(3, input_shape=(4,), activation='softmax')) model.compile(Adam(lr=0.1), loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=20, validation_split=0.1) y_pred = model.predict(X_test) y_pred[:5] y_test_class = np.argmax(y_test, axis=1) y_pred_class = np.argmax(y_pred, axis=1) from sklearn.metrics import classification_report print(classification_report(y_test_class, y_pred_class)) confusion_matrix(y_test_class, y_pred_class) ``` ## Exercise 1 The [Pima Indians dataset](https://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes) is a very famous dataset distributed by UCI and originally collected from the National Institute of Diabetes and Digestive and Kidney Diseases. It contains data from clinical exams for women age 21 and above of Pima indian origins. The objective is to predict based on diagnostic measurements whether a patient has diabetes. It has the following features: - Pregnancies: Number of times pregnant - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test - BloodPressure: Diastolic blood pressure (mm Hg) - SkinThickness: Triceps skin fold thickness (mm) - Insulin: 2-Hour serum insulin (mu U/ml) - BMI: Body mass index (weight in kg/(height in m)^2) - DiabetesPedigreeFunction: Diabetes pedigree function - Age: Age (years) The last colum is the outcome, and it is a binary variable. In this first exercise we will explore it through the following steps: 1. Load the ..data/diabetes.csv dataset, use pandas to explore the range of each feature - For each feature draw a histogram. Bonus points if you draw all the histograms in the same figure. - Explore correlations of features with the outcome column. You can do this in several ways, for example using the `sns.pairplot` we used above or drawing a heatmap of the correlations. - Do features need standardization? If so what stardardization technique will you use? MinMax? Standard? - Prepare your final `X` and `y` variables to be used by a ML model. Make sure you define your target variable well. Will you need dummy columns? ## Exercise 2 Build a fully connected NN model that predicts diabetes. Follow these steps: 1. Split your data in a train/test with a test size of 20% and a `random_state = 22` - define a sequential model with at least one inner layer. You will have to make choices for the following things: - what is the size of the input? - how many nodes will you use in each layer? - what is the size of the output? - what activation functions will you use in the inner layers? - what activation function will you use at output? - what loss function will you use? - what optimizer will you use? - fit your model on the training set, using a validation_split of 0.1 - test your trained model on the test data from the train/test split - check the accuracy score, the confusion matrix and the classification report ## Exercise 3 Compare your work with the results presented in [this notebook](https://www.kaggle.com/futurist/d/uciml/pima-indians-diabetes-database/pima-data-visualisation-and-machine-learning). Are your Neural Network results better or worse than the results obtained by traditional Machine Learning techniques? - Try training a Support Vector Machine or a Random Forest model on the exact same train/test split. Is the performance better or worse? - Try restricting your features to only 4 features like in the suggested notebook. How does model performance change? ## Exercise 4 [Tensorflow playground](http://playground.tensorflow.org/) is a web based neural network demo. It is really useful to develop an intuition about what happens when you change architecture, activation function or other parameters. Try playing with it for a few minutes. You don't need do understand the meaning of every knob and button in the page, just get a sense for what happens if you change something. In the next chapter we'll explore these things in more detail.
github_jupyter
``` import sys sys.path.append('../../../GraphGallery/') sys.path.append('../../../GraphAdv/') import tensorflow as tf import numpy as np import networkx as nx import scipy.sparse as sp from graphgallery.nn.models import GCN from graphgallery.nn.functions import softmax from graphadv.attack.targeted import IGA import matplotlib.pyplot as plt plt.style.use(['no-latex', 'ieee']) from graphgallery.data import NPZDataset data = NPZDataset('citeseer', root="~/GraphData/datasets/", verbose=False, standardize=True) adj, x, labels = data.graph.unpack() idx_train, idx_val, idx_test = data.split(random_state=15) n_classes = labels.max() + 1 target = 0 assert target in idx_test print(f"Attack target {target} with class label {labels[target]}") attacker = IGA(adj, x, labels, idx_train, seed=None, surrogate_args={'idx_val':idx_val}) attacker.reset() attacker.attack(target, direct_attack=True, structure_attack=True, feature_attack=False) # show logs attacker.show_edge_flips(detail=True) ``` # Before Attack ``` model = GCN(adj, x, labels, device='GPU', norm_x=None, seed=42) model.build() his = model.train(idx_train, idx_val, verbose=1, epochs=100) softmax(model.predict(target).ravel()) ``` # After Attack ``` model = GCN(attacker.A, x, labels, device='GPU', norm_x=None, seed=42) model.build() his = model.train(idx_train, idx_val, verbose=1, epochs=100) softmax(model.predict(target).ravel()) ``` # Visulation ``` def evaluate(adj, x, retrain_iters=5): classification_margins = [] class_distrs = [] for _ in range(retrain_iters): print(f"... {_+1}/{retrain_iters} ") model = GCN(adj, x, labels, device='GPU', norm_x=None, seed=None) model.build() his = model.train(idx_train, idx_val, verbose=0, epochs=100) logit = softmax(model.predict(target).ravel()) class_distrs.append(logit) best_second_class_before = (logit - labels[target]).argmax() margin = logit[labels[target]] - logit[best_second_class_before] classification_margins.append(margin) model.close del model class_distrs = np.array(class_distrs) return class_distrs retrain_iters = 5 print("Before Attack") class_distrs_clean = evaluate(adj, x, retrain_iters=retrain_iters) print(f"After {attacker.name} Attack") class_distrs_retrain = evaluate(attacker.A, x, retrain_iters=retrain_iters) def make_xlabel(ix, correct): if ix == correct: return "Class {}\n(correct)".format(ix) return "Class {}".format(ix) figure = plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) center_ixs_clean = [] for ix, block in enumerate(class_distrs_clean.T): x_ixs = np.arange(len(block)) + ix*(len(block)+2) center_ixs_clean.append(np.mean(x_ixs)) color = '#555555' if ix == labels[target]: color = 'darkgreen' plt.bar(x_ixs, block, color=color) ax = plt.gca() plt.ylim((-.05, 1.05)) plt.ylabel("Predicted probability") ax.set_xticks(center_ixs_clean) ax.set_xticklabels([make_xlabel(k, labels[target]) for k in range(n_classes)]) ax.set_title(f"Predicted class probabilities for node {target} on clean data\n({retrain_iters} re-trainings)") fig = plt.subplot(1, 2, 2) center_ixs_retrain = [] for ix, block in enumerate(class_distrs_retrain.T): x_ixs = np.arange(len(block)) + ix*(len(block)+2) center_ixs_retrain.append(np.mean(x_ixs)) color = '#555555' if ix == labels[target]: color = 'darkgreen' plt.bar(x_ixs, block, color=color) ax = plt.gca() plt.ylim((-.05, 1.05)) ax.set_xticks(center_ixs_retrain) ax.set_xticklabels([make_xlabel(k, labels[target]) for k in range(n_classes)]) ax.set_title(f"Predicted class probabilities for node {target} after {attacker.n_perturbations} perturbations\n({retrain_iters} re-trainings)") plt.tight_layout() plt.show() ```
github_jupyter
``` import numpy as np import pandas as pd from sklearn.datasets import load_wine from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve from sklearn.utils import shuffle import matplotlib.pyplot as plt wine = load_wine() # store the feature matrix (X) and response vector (y) x = wine.data y = wine.target # splitting X and y into training and testing sets x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state = 42,shuffle = True, stratify = y) x_train y_train def classwise_distribution (arr): unique_elements, counts_elements = np.unique(arr, return_counts=True) plt.bar(unique_elements, counts_elements, 1) plt.title('Class Frequency') plt.xlabel('Class') plt.ylabel('Frequency') plt.show() def fit(X, Y): X = pd.DataFrame(X, columns=wine.feature_names) Y = pd.DataFrame(Y).loc[:,0] mean = X.groupby(by=Y).mean() var = X.groupby(by=Y).var() mean = np.array(mean) var = np.array(var) return mean, var def cond_prob_xC (v, mean, var): prob = (1/np.sqrt(2*np.pi*var))*np.exp(-1*np.square(v - mean)/(2*var)) return prob def pri_prob (y_train): val, count = np.unique(y_train, return_counts=True) pri_prob = (count/len(y_train)).reshape((len(val), 1)) return pri_prob def po_prob (prob, prior_prob): prob = np.prod(prob, axis = 1).reshape((3,1)) post_prob = prob * prior_prob return post_prob def predict(x_train, y_train, x_test, y_test ): print('class-wise distribution of Training Set') classwise_distribution (y_train) print('=============================') print('class-wise distribution of Test Set') classwise_distribution (y_test) print('=============================') print('Initiating Prediction') print('=============================') mean, var = fit(x_train, y_train) print("Mean of the training data is") print(mean) print('Varince of the training data is') print(var) print('=============================') print('Calculating Prior Probability') prior_prob = pri_prob(y_train) print("Prior Probability of training data is") print(prior_prob) print('=============================') y_score = np.zeros((len(y_test), (len(np.unique(y_test))))) pred = np.zeros(shape=(1, len(x_test))) for i in range (len(x_test)): cond_prob = cond_prob_xC (x_test[i], mean, var) post_prob = po_prob (cond_prob, prior_prob) y_score[i] = post_prob.reshape(3,) result, _ = np.where(post_prob == np.amax(post_prob)) pred[0][i] = result print("Prediction on the training data is as follows") print(pred) print('=============================') print("Calculating Accuracy") truth = pred == y_test count = np.count_nonzero(truth) acc = (count/len(y_test)) * 100 print("Accuracy achieved is:", acc) print("=============================") print("Printing confusion matrix") print(confusion_matrix(y_test, pred.reshape(len(y_test),))) return pred.reshape(len(y_test),) pred = predict(x_train, y_train, x_test, y_test ) def predict_1(x_train, y_train, x_test, y_test ): print('class-wise distribution of Training Set') classwise_distribution (y_train) print('=============================') print('class-wise distribution of Test Set') classwise_distribution (y_test) print('=============================') print('Initiating Prediction') print('=============================') mean, var = fit(x_train, y_train) print("Mean of the training data is") print(mean) print('Varince of the training data is') print(var) print('=============================') print('Calculating Prior Probability') prior_prob = np.array([[40], [40], [20]]) print("Prior Probability of training data is") print(prior_prob) print('=============================') y_score = np.zeros((len(y_test), (len(np.unique(y_test))))) pred = np.zeros(shape=(1, len(x_test))) for i in range (len(x_test)): cond_prob = cond_prob_xC (x_test[i], mean, var) post_prob = po_prob (cond_prob, prior_prob) y_score[i] = post_prob.reshape(3,) result, _ = np.where(post_prob == np.amax(post_prob)) pred[0][i] = result print("Prediction on the training data is as follows") print(pred) print('=============================') print("Calculating Accuracy") truth = pred == y_test count = np.count_nonzero(truth) acc = (count/len(y_test)) * 100 print("Accuracy achieved is:", acc) print("=============================") print("Printing confusion matrix") print(confusion_matrix(y_test, pred.reshape(len(y_test),))) return pred.reshape(len(y_test),) pred = predict_1(x_train, y_train, x_test, y_test ) def predict_2(x_train, y_train, x_test, y_test ): print('class-wise distribution of Training Set') classwise_distribution (y_train) print('=============================') print('class-wise distribution of Test Set') classwise_distribution (y_test) print('=============================') print('Initiating Prediction') print('=============================') mean, var = fit(x_train, y_train) print("Mean of the training data is") print(mean) print('Varince of the training data is') print(var) print('=============================') print('Calculating Prior Probability') prior_prob = np.array([[80], [100], [100]]) print("Prior Probability of training data is") print(prior_prob) print('=============================') y_score = np.zeros((len(y_test), (len(np.unique(y_test))))) pred = np.zeros(shape=(1, len(x_test))) for i in range (len(x_test)): cond_prob = cond_prob_xC (x_test[i], mean, var) post_prob = po_prob (cond_prob, prior_prob) y_score[i] = post_prob.reshape(3,) result, _ = np.where(post_prob == np.amax(post_prob)) pred[0][i] = result print("Prediction on the training data is as follows") print(pred) print('=============================') print("Calculating Accuracy") truth = pred == y_test count = np.count_nonzero(truth) acc = (count/len(y_test)) * 100 print("Accuracy achieved is:", acc) print("=============================") print("Printing confusion matrix") print(confusion_matrix(y_test, pred.reshape(len(y_test),))) return pred.reshape(len(y_test),) pred = predict_2(x_train, y_train, x_test, y_test ) ```
github_jupyter
<a href="https://colab.research.google.com/github/julianovale/project_trains/blob/master/Genetico_Job_Shop.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` ! pip install chart_studio import os if not os.path.isfile('/content/JSP_dataset.xlsx'): ! wget https://github.com/julianovale/project_trains/raw/master/dados/JSP_dataset.xlsx ! ls ''' Solving job shop scheduling problem by gentic algorithm ''' # importing required modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import pandas as pd import chart_studio.plotly as py import plotly.figure_factory as ff import datetime import time import copy import openpyxl as xl import json def data_excel_json(excel_sheet): """ convert excel into json """ data_excel = xl.load_workbook(excel_sheet) data = {} sheet_name = data_excel.sheetnames for sheet in sheet_name: wb_sheet = data_excel[sheet] cell_values = wb_sheet.values df = pd.DataFrame(cell_values, columns=next(cell_values)) df.iloc[:, 0] = df.iloc[:, 0].apply(lambda x : x.strip()) df.index = df.iloc[:, 0] df.drop(columns = df.columns[0], inplace=True) data[sheet] = df.T.to_dict() return data def json_to_df(json_data): """ convert json into excel """ dict_data = {} for key in json_data.keys(): dict_data[key] = pd.DataFrame(json_data.get(key)).T return dict_data def generate_initial_population(population_size, num_gene): """ generate initial population for Genetic Algorithm """ best_list, best_obj = [], [] population_list = [] makespan_record = [] for i in range(population_size): nxm_random_num = list(np.random.permutation(num_gene)) # generate a random permutation of 0 to num_job*num_mc-1 population_list.append(nxm_random_num) # add to the population_list for j in range(num_gene): population_list[i][j] = population_list[i][j] % num_job # convert to job number format, every job appears m times return population_list def job_schedule(data_dict, population_size = 30, crossover_rate = 0.8, mutation_rate = 0.2, mutation_selection_rate = 0.2, num_iteration = 2000): """ initialize genetic algorithm parameters and read data """ data_json = json_to_df(data_dict) machine_sequence_tmp = data_json['Machines Sequence'] process_time_tmp = data_json['Processing Time'] df_shape = process_time_tmp.shape num_machines = df_shape[1] # number of machines num_job = df_shape[0] # number of jobs num_gene = num_machines * num_job # number of genes in a chromosome num_mutation_jobs = round(num_gene * mutation_selection_rate) process_time = [list(map(int, process_time_tmp.iloc[i])) for i in range(num_job)] machine_sequence = [list(map(int, machine_sequence_tmp.iloc[i])) for i in range(num_job)] #start_time = time.time() Tbest = 999999999999999 best_list, best_obj = [], [] population_list = [] makespan_record = [] for i in range(population_size): nxm_random_num = list(np.random.permutation(num_gene)) # generate a random permutation of 0 to num_job*num_mc-1 population_list.append(nxm_random_num) # add to the population_list for j in range(num_gene): population_list[i][j] = population_list[i][j] % num_job # convert to job number format, every job appears m times #population_list = generate_initial_population(population_size=population_size, num_gene=num_gene) for iteration in range(num_iteration): Tbest_now = 99999999999 """ Two Point Cross-Over """ parent_list = copy.deepcopy(population_list) offspring_list = copy.deepcopy(population_list) # generate a random sequence to select the parent chromosome to crossover pop_random_size = list(np.random.permutation(population_size)) for size in range(int(population_size/2)): crossover_prob = np.random.rand() if crossover_rate >= crossover_prob: parent_1 = population_list[pop_random_size[2*size]][:] parent_2 = population_list[pop_random_size[2*size+1]][:] child_1 = parent_1[:] child_2 = parent_2[:] cutpoint = list(np.random.choice(num_gene, 2, replace=False)) cutpoint.sort() child_1[cutpoint[0]:cutpoint[1]] = parent_2[cutpoint[0]:cutpoint[1]] child_2[cutpoint[0]:cutpoint[1]] = parent_1[cutpoint[0]:cutpoint[1]] offspring_list[pop_random_size[2*size]] = child_1[:] offspring_list[pop_random_size[2*size+1]] = child_2[:] for pop in range(population_size): """ Repairment """ job_count = {} larger, less = [], [] # 'larger' record jobs appear in the chromosome more than pop times, and 'less' records less than pop times. for job in range(num_job): if job in offspring_list[pop]: count = offspring_list[pop].count(job) pos = offspring_list[pop].index(job) job_count[job] = [count, pos] # store the above two values to the job_count dictionary else: count = 0 job_count[job] = [count, 0] if count > num_machines: larger.append(job) elif count < num_machines: less.append(job) for large in range(len(larger)): change_job = larger[large] while job_count[change_job][0] > num_machines: for les in range(len(less)): if job_count[less[les]][0] < num_machines: offspring_list[pop][job_count[change_job][1]] = less[les] job_count[change_job][1] = offspring_list[pop].index(change_job) job_count[change_job][0] = job_count[change_job][0]-1 job_count[less[les]][0] = job_count[less[les]][0]+1 if job_count[change_job][0] == num_machines: break for off_spring in range(len(offspring_list)): """ Mutations """ mutation_prob = np.random.rand() if mutation_rate >= mutation_prob: m_change = list(np.random.choice(num_gene, num_mutation_jobs, replace=False)) # chooses the position to mutation t_value_last = offspring_list[off_spring][m_change[0]] # save the value which is on the first mutation position for i in range(num_mutation_jobs-1): offspring_list[off_spring][m_change[i]] = offspring_list[off_spring][m_change[i+1]] # displacement # move the value of the first mutation position to the last mutation position offspring_list[off_spring][m_change[num_mutation_jobs-1]] = t_value_last """ fitness value (calculate makespan) """ total_chromosome = copy.deepcopy(parent_list) + copy.deepcopy(offspring_list) # parent and offspring chromosomes combination chrom_fitness, chrom_fit = [], [] total_fitness = 0 for pop_size in range(population_size*2): j_keys = [j for j in range(num_job)] key_count = {key:0 for key in j_keys} j_count = {key:0 for key in j_keys} m_keys = [j+1 for j in range(num_machines)] m_count = {key:0 for key in m_keys} for i in total_chromosome[pop_size]: gen_t = int(process_time[i][key_count[i]]) gen_m = int(machine_sequence[i][key_count[i]]) j_count[i] = j_count[i] + gen_t m_count[gen_m] = m_count[gen_m] + gen_t if m_count[gen_m] < j_count[i]: m_count[gen_m] = j_count[i] elif m_count[gen_m] > j_count[i]: j_count[i] = m_count[gen_m] key_count[i] = key_count[i] + 1 makespan = max(j_count.values()) chrom_fitness.append(1/makespan) chrom_fit.append(makespan) total_fitness = total_fitness + chrom_fitness[pop_size] """ Selection (roulette wheel approach) """ pk, qk = [], [] for size in range(population_size * 2): pk.append(chrom_fitness[size] / total_fitness) for size in range(population_size * 2): cumulative = 0 for j in range(0, size+1): cumulative = cumulative + pk[j] qk.append(cumulative) selection_rand = [np.random.rand() for i in range(population_size)] for pop_size in range(population_size): if selection_rand[pop_size] <= qk[0]: population_list[pop_size] = copy.deepcopy(total_chromosome[0]) else: for j in range(0, population_size * 2-1): if selection_rand[pop_size] > qk[j] and selection_rand[pop_size] <= qk[j+1]: population_list[pop_size] = copy.deepcopy(total_chromosome[j+1]) break """ comparison """ for pop_size in range(population_size * 2): if chrom_fit[pop_size] < Tbest_now: Tbest_now = chrom_fit[pop_size] sequence_now = copy.deepcopy(total_chromosome[pop_size]) if Tbest_now <= Tbest: Tbest = Tbest_now sequence_best = copy.deepcopy(sequence_now) makespan_record.append(Tbest) """ Results - Makespan """ print("optimal sequence", sequence_best) print("optimal value:%f"%Tbest) print("\n") #print('the elapsed time:%s'% (time.time() - start_time)) #%matplotlib inline plt.plot([i for i in range(len(makespan_record))],makespan_record,'b') plt.ylabel('makespan', fontsize=15) plt.xlabel('generation', fontsize=15) plt.show() """ plot gantt chart """ m_keys = [j+1 for j in range(num_machines)] j_keys = [j for j in range(num_job)] key_count = {key:0 for key in j_keys} j_count = {key:0 for key in j_keys} m_count = {key:0 for key in m_keys} j_record = {} for i in sequence_best: gen_t = int(process_time[i][key_count[i]]) gen_m = int(machine_sequence[i][key_count[i]]) j_count[i] = j_count[i] + gen_t m_count[gen_m] = m_count[gen_m] + gen_t if m_count[gen_m] < j_count[i]: m_count[gen_m] = j_count[i] elif m_count[gen_m] > j_count[i]: j_count[i] = m_count[gen_m] start_time = str(datetime.timedelta(seconds = j_count[i] - process_time[i][key_count[i]])) # convert seconds to hours, minutes and seconds end_time = str(datetime.timedelta(seconds = j_count[i])) j_record[(i, gen_m)] = [start_time, end_time] key_count[i] = key_count[i] + 1 df = [] for m in m_keys: for j in j_keys: df.append(dict(Task='Machine %s'%(m), Start='2020-02-01 %s'%(str(j_record[(j,m)][0])), \ Finish='2020-02-01 %s'%(str(j_record[(j,m)][1])),Resource='Job %s'%(j+1))) df_ = pd.DataFrame(df) df_.Start = pd.to_datetime(df_['Start']) df_.Finish = pd.to_datetime(df_['Finish']) start = df_.Start.min() end = df_.Finish.max() df_.Start = df_.Start.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) df_.Finish = df_.Finish.apply(lambda x: x.strftime('%Y-%m-%dT%H:%M:%S')) data = df_.to_dict('record') final_data ={ 'start':start.strftime('%Y-%m-%dT%H:%M:%S'), 'end':end.strftime('%Y-%m-%dT%H:%M:%S'), 'data':data} fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, title='Job shop Schedule') fig.show() #iplot(fig, filename='GA_job_shop_scheduling') return final_data, df %%time """ Job_Shop_Schedule """ data = data_excel_json('JSP_dataset.xlsx') schedule = job_schedule(data_dict=data) # JSON to draw gantt chart schedule[0] import chart_studio.plotly as py import plotly.figure_factory as ff df = schedule[1] fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True, title='Job shop Schedule') fig.show() ```
github_jupyter
# HAND SIGN DATASET ## Introduction The dataset format is patterned to match closely with the classic MNIST. Each training and test case represents a label (0-25) as a one-to-one map for each alphabet letter A-Z (and no cases for 9=J or 25=Z because of gesture motions). The training data (27,455 cases) and test data (7172 cases) are approximately half the size of the standard MNIST but otherwise similar with a header row of label, pixel1,pixel2....pixel784 which represent a single 28x28 pixel image with grayscale values between 0-255. The original hand gesture image data represented multiple users repeating the gesture against different backgrounds. ## Tools 1.Python 2.scikit-learn / sklearn 3.Pandas 4.NumPy 5.matplotlib 6.Jupyter ## Objectives (A.)View the data as an image (B.)Train different classifiers (C.)Compare performance for different classifiers using various metrics ``` #importing libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns ``` ## Data Exploration ``` #reading csv file df=pd.read_csv('sign_mnist_train.csv') #shape of data df.shape df.describe() #finding if any null values df.isnull().values.any() #defining correlation using heat maop corr_m = df.corr() sns.heatmap(corr_m) df.head() #plotting the total number of each type of label in data sns.countplot(df['label']) plt.show() X = df.iloc[:,1:] Y = df.iloc[:,0] print(Y) #forming pictures from pixels first = X.iloc[1,:] second = X.iloc[2,:] third = X.iloc[3,:] fourth = X.iloc[4,:] first = np.array(first , dtype='float') second = np.array(second , dtype='float') pixel = first.reshape((28,28)) pixel2 = second.reshape((28,28)) third = np.array(third , dtype='float') pixel3 = third.reshape((28,28)) fourth = np.array(fourth , dtype='float') pixel4 = fourth.reshape((28,28)) plt.imshow(pixel) plt.show() plt.imshow(pixel2) plt.show() plt.imshow(pixel3) plt.show() plt.imshow(pixel4) plt.show() print(Y.iloc[1]) ``` ### Splitting The Data ``` from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size = 0.2,random_state = 0) ``` ## KNN In pattern recognition, the k-nearest neighbors algorithm (k-NN) is a non-parametric method used for classification and regression. In both cases, the input consists of the k closest training examples in the feature space. The output depends on whether k-NN is used for classification or regression: In k-NN classification, the output is a class membership. An object is classified by a plurality vote of its neighbors, with the object being assigned to the class most common among its k nearest neighbors (k is a positive integer, typically small). If k = 1, then the object is simply assigned to the class of that single nearest neighbor. In k-NN regression, the output is the property value for the object. This value is the average of the values of k nearest neighbors. ``` from sklearn.neighbors import KNeighborsClassifier # instantiate classifier = KNeighborsClassifier() # fitting the data classifier.fit(X_train,Y_train) # predict Y_pred=classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(Y_test,Y_pred) sns.heatmap(cm) from sklearn.metrics import accuracy_score #accuracy score ascore=accuracy_score(Y_test , Y_pred , normalize=True) print(ascore) from sklearn.metrics import f1_score #f1_score score=f1_score(Y_pred, Y_test,average='weighted') print(score) ``` ## RandomForest Random forests or random decision forests are an ensemble learning method for classification, regression and other tasks that operates by constructing a multitude of decision trees at training time and outputting the class that is the mode of the classes (classification) or mean prediction (regression) of the individual trees. Random decision forests correct for decision trees' habit of overfitting to their training set ``` from sklearn.ensemble import RandomForestClassifier # instantiate rc = RandomForestClassifier() # fitting the data rc.fit(X_train , Y_train) # predict rc_pred = rc.predict(X_test) ascore2=accuracy_score(Y_test , rc_pred) print(ascore2) score2=f1_score(Y_test, rc_pred,average='weighted') print(score2) ``` ## SVM In machine learning, support-vector machines (SVMs, also support-vector networks) are supervised learning models with associated learning algorithms that analyze data used for classification and regression analysis. Given a set of training examples, each marked as belonging to one or the other of two categories, an SVM training algorithm builds a model that assigns new examples to one category or the other, making it a non-probabilistic binary linear classifier (although methods such as Platt scaling exist to use SVM in a probabilistic classification setting). A SVM model is a representation of the examples as points in space, mapped so that the examples of the separate categories are divided by a clear gap that is as wide as possible. New examples are then mapped into that same space and predicted to belong to a category based on which side of the gap they fall. In addition to performing linear classification, SVMs can efficiently perform a non-linear classification using what is called the kernel trick, implicitly mapping their inputs into high-dimensional feature spaces. ``` from sklearn.svm import SVC # instantiate svc = SVC() # fitting the data svc.fit(X_train , Y_train) # predict sv_pred = svc.predict(X_test) ascore3=accuracy_score(Y_test , sv_pred) print(ascore3) score3=f1_score(Y_test, sv_pred,average='weighted') print(score3) ``` ## Naive Bayes In machine learning, naive Bayes classifiers are a family of simple "probabilistic classifiers" based on applying Bayes' theorem with strong (naive) independence assumptions between the features. Naive Bayes is a simple technique for constructing classifiers: models that assign class labels to problem instances, represented as vectors of feature values, where the class labels are drawn from some finite set. There is not a single algorithm for training such classifiers, but a family of algorithms based on a common principle: all naive Bayes classifiers assume that the value of a particular feature is independent of the value of any other feature, given the class variable. For example, a fruit may be considered to be an apple if it is red, round, and about 10 cm in diameter. A naive Bayes classifier considers each of these features to contribute independently to the probability that this fruit is an apple, regardless of any possible correlations between the color, roundness, and diameter features. ``` from sklearn.naive_bayes import GaussianNB # instantiate obj = GaussianNB() #fitting the data obj.fit(X_train,Y_train) # predict Y_pred = obj.predict(X_test) ascore4=accuracy_score(Y_test,Y_pred) print(ascore4) score4=f1_score(Y_test, Y_pred,average='weighted') print(score4) ``` ## MultinomialNB ``` from sklearn.naive_bayes import MultinomialNB # instantiate ob = MultinomialNB() # fitting the data ob.fit(X_train,Y_train) # predict Y_pred = ob.predict(X_test) ascore5=accuracy_score(Y_test,Y_pred) print(ascore5) score5=f1_score(Y_test, Y_pred,average='weighted') print(score5) ``` ## Decision Tree Classifier Decision tree learning is a method commonly used in data mining.[1] The goal is to create a model that predicts the value of a target variable based on several input variables. An example is shown in the diagram at right. Each interior node corresponds to one of the input variables; there are edges to children for each of the possible values of that input variable. Each leaf represents a value of the target variable given the values of the input variables represented by the path from the root to the leaf. ``` from sklearn.tree import DecisionTreeClassifier # instantiate dtc = DecisionTreeClassifier() # fitting the data dtc.fit(X_train, Y_train) # predict Y_pred = dtc.predict(X_test) #accuracy ascore6=accuracy_score(Y_test,Y_pred) print(ascore6) # f1 score score6 = f1_score(Y_pred, Y_test,average='weighted') print(score6) ``` ## Conclusion ``` Accuracy = [ascore,ascore2,ascore3,ascore4,ascore5,ascore6] data1 = { 'Accuracy':Accuracy, 'Algorithm': ['KNN','Random Forest Classifier','SVM linear',"Naive Baye's","MultinominalNB",'Decision Tree']} df1 = pd.DataFrame(data1) F1_score = [score,score2,score3,score4,score5,score6] data2 = { 'F1_score':F1_score, 'Algorithm': ['KNN','Random Forest Classifier','SVM linear',"Naive Baye's","MultinominalNB",'Decision Tree']} df2 = pd.DataFrame(data2) sns.barplot(x = df1.Accuracy, y = df1.Algorithm) sns.barplot(x = df2.F1_score, y = df2.Algorithm) ```
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Synthetic simulation of historical TCs parameters using Gaussian copulas (Rueda et al. 2016) and subsequent selection of representative cases using Maximum Dissimilarity (MaxDiss) algorithm (Camus et al. 2011) inputs required: * Historical TC parameters that affect the site (output of *notebook 05*) * number of synthetic simulations to run * number of representative cases to be selected using MaxDiss in this notebook: * synthetic generation of TCs tracks based on gaussian copulas of the TC parameters * MDA selection of representative number of events ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # pip import xarray as xr import numpy as np # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..', '..')) # teslakit from teslakit.database import Database from teslakit.statistical import CopulaSimulation from teslakit.mda import MaxDiss_Simplified_NoThreshold from teslakit.plotting.storms import Plot_TCs_Params_MDAvsSIM, \ Plot_TCs_Params_HISTvsSIM, Plot_TCs_Params_HISTvsSIM_histogram ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/Users/nico/Projects/TESLA-kit/TeslaKit/data' db = Database(p_data) # set site db.SetSite('ROI') # -------------------------------------- # load data and set parameters _, TCs_r2_params = db.Load_TCs_r2_hist() # TCs parameters inside radius 2 # TCs random generation and MDA parameters num_sim_rnd = 100000 num_sel_mda = 1000 ``` ## Historical TCs - Probabilistic Simulation ``` # -------------------------------------- # Probabilistic simulation Historical TCs # aux functions def adjust_to_pareto(var): 'Fix data. It needs to start at 0 for Pareto adjustment ' var = var.astype(float) var_pareto = np.amax(var) - var + 0.00001 return var_pareto def adjust_from_pareto(var_base, var_pareto): 'Returns data from pareto adjustment' var = np.amax(var_base) - var_pareto + 0.00001 return var # use small radius parameters (4º) pmean = TCs_r2_params.pressure_mean.values[:] pmin = TCs_r2_params.pressure_min.values[:] gamma = TCs_r2_params.gamma.values[:] delta = TCs_r2_params.delta.values[:] vmean = TCs_r2_params.velocity_mean.values[:] # fix pressure for p pmean_p = adjust_to_pareto(pmean) pmin_p = adjust_to_pareto(pmin) # join storm parameters for copula simulation storm_params = np.column_stack( (pmean_p, pmin_p, gamma, delta, vmean) ) # statistical simulate PCs using copulas kernels = ['GPareto', 'GPareto', 'ECDF', 'ECDF', 'ECDF'] storm_params_sim = CopulaSimulation(storm_params, kernels, num_sim_rnd) # adjust back pressures from pareto pmean_sim = adjust_from_pareto(pmean, storm_params_sim[:,0]) pmin_sim = adjust_from_pareto(pmin, storm_params_sim[:,1]) # store simulated storms - parameters TCs_r2_sim_params = xr.Dataset( { 'pressure_mean':(('storm'), pmean_sim), 'pressure_min':(('storm'), pmin_sim), 'gamma':(('storm'), storm_params_sim[:,2]), 'delta':(('storm'), storm_params_sim[:,3]), 'velocity_mean':(('storm'), storm_params_sim[:,4]), }, coords = { 'storm':(('storm'), np.arange(num_sim_rnd)) }, ) print(TCs_r2_sim_params) db.Save_TCs_r2_sim_params(TCs_r2_sim_params) # Historical vs Simulated: scatter plot parameters Plot_TCs_Params_HISTvsSIM(TCs_r2_params, TCs_r2_sim_params); # Historical vs Simulated: histogram parameters Plot_TCs_Params_HISTvsSIM_histogram(TCs_r2_params, TCs_r2_sim_params); ``` ## Simulated TCs - MaxDiss classification ``` # -------------------------------------- # MaxDiss classification # get simulated parameters pmean_s = TCs_r2_sim_params.pressure_mean.values[:] pmin_s = TCs_r2_sim_params.pressure_min.values[:] gamma_s = TCs_r2_sim_params.gamma.values[:] delta_s = TCs_r2_sim_params.delta.values[:] vmean_s = TCs_r2_sim_params.velocity_mean.values[:] # subset, scalar and directional indexes data_mda = np.column_stack((pmean_s, pmin_s, vmean_s, delta_s, gamma_s)) ix_scalar = [0,1,2] ix_directional = [3,4] centroids = MaxDiss_Simplified_NoThreshold( data_mda, num_sel_mda, ix_scalar, ix_directional ) # store MDA storms - parameters TCs_r2_MDA_params = xr.Dataset( { 'pressure_mean':(('storm'), centroids[:,0]), 'pressure_min':(('storm'), centroids[:,1]), 'velocity_mean':(('storm'), centroids[:,2]), 'delta':(('storm'), centroids[:,3]), 'gamma':(('storm'), centroids[:,4]), }, coords = { 'storm':(('storm'), np.arange(num_sel_mda)) }, ) print(TCs_r2_MDA_params) #db.Save_TCs_r2_mda_params(TCs_r2_MDA_params) # Historical vs Simulated: scatter plot parameters Plot_TCs_Params_MDAvsSIM(TCs_r2_MDA_params, TCs_r2_sim_params); ``` ## Historical TCs (MDA centroids) Waves Simulation Waves data is generated by numerically simulating selected storms. This methodology is not included inside teslakit python library. This step needs to be done before continuing with notebook 07
github_jupyter
Candidate Site Identification for Classification === Identify of candidate sites for the purposes of broader classification. Using these sampling restrictions: - Health condition is "cancer" - Site's "isDeleted" != 1 - Age of site is "adult" or "teen" - Site's createdAt > 2009-01-01 - Site's last journal post is < 2016-04-01 - Last journal createdAt - first journal createdAt > 1 month - Num journals in the site >= 5 - Created by the patient * We use the proportion of posts that are patient-authored according to the authorship classifier. * We require that 95% of the posts are patient-authored. * We are currently assessing if this threshold should be lowered or increased in complexity in some way. Previously, we had also required the following: - If authorship annotations are present for the site, we require the ratio of P to CG/PCG authored posts to be > 50%. - The site's "isForSelf" key was set. ``` %matplotlib inline import os import numpy as np import pandas as pd import itertools import matplotlib.pyplot as plt import matplotlib.dates as md import matplotlib import pylab as pl import datetime as dt import time from collections import Counter import json import os import re import random import itertools import multiprocessing as mp from IPython.core.display import display, HTML import datetime as dt import sqlite3 from nltk import word_tokenize from html.parser import HTMLParser from tqdm import tqdm # set filepath for writing working_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/identify_candidate_sites" valid_classification_sites_filename = os.path.join(working_dir, "valid_classification_sites.txt") valid_sites_filtered_filename = os.path.join(working_dir, "valid_classification_sites_filtered.txt") # we also save some figures.... image_dir = "/home/srivbane/levon003/repos/qual-health-journeys/chi2019_tex/figures" !wc -l {working_dir}/* | sort -nr # read the previosuly identified sites (if the file exists already) if os.path.exists(valid_sites_filtered_filename): with open(valid_sites_filtered_filename, 'r') as infile: valid_sites = [int(line.strip()) for line in infile.readlines() if line.strip() != ""] else: valid_sites = [] len(valid_sites) ``` ## Load Site Data and Get Access to Journal Database ``` # load the site dataframe working_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/extract_site_features" feathered_site_df_filename = os.path.join(working_dir, "site_scrubbed.df") df = pd.read_feather(feathered_site_df_filename) len(df) def get_db(): journal_wd="/home/srivbane/shared/caringbridge/data/derived/sqlite" db_filename = os.path.join(journal_wd, "journal.db") db = sqlite3.connect( db_filename, detect_types=sqlite3.PARSE_DECLTYPES ) db.row_factory = sqlite3.Row return db def get_journal_text(site_id, journal_oid): try: db = get_db() cursor = db.execute("""SELECT body FROM journal WHERE site_id = ? AND journal_oid = ?""", (site_id, journal_oid)) body = cursor.fetchone() assert body is not None body_text = body['body'] return body_text finally: db.close() def get_site_journal_oids(site_id): try: db = get_db() cursor = db.execute("""SELECT journal_oid FROM journal WHERE site_id = ?""", (site_id,)) result = cursor.fetchall() journal_oids = [r['journal_oid'] for r in result] return journal_oids finally: db.close() def get_site_journals(site_id, columns=["*"]): # directly returns the journal rows associated with the given site_id try: db = get_db() columns_string = ",".join(columns) cursor = db.execute("""SELECT {columns} FROM journal WHERE site_id = ?""".format(columns=columns_string), (site_id,)) journals = cursor.fetchall() return journals finally: db.close() ``` ## Identify Candidate Sites Filter down to only sites of interest according to criteria. ``` # include only cancer sites c_df = df[df["healthCondition_category"] == "Cancer"] len(c_df) # include only non-deleted sites c_df = c_df[c_df["isDeleted"] != 1] len(c_df) # include only adult- and teen-aged sites valid_ages = ['adult', 'teen'] c_df = c_df[c_df["age"].isin(valid_ages)] len(c_df) # include only sites since 2009 earliest_valid_date = dt.datetime(year=2009,month=1,day=1) earliest_valid_date_timestamp = earliest_valid_date.replace(tzinfo=dt.timezone.utc).timestamp() * 1000 earliest_valid_date_timestamp c_df = c_df[c_df["createdAt"] > earliest_valid_date_timestamp] len(c_df) # include only sites created by the patient # note that this cuts out a huge number of sites; the best way to increase our data size would be the addition of an authorship classifier #c_df = c_df[c_df["isForSelf"] == 1.0] #len(c_df) # now, inspect journal-level features to identify valid sites skip = True if not skip: valid_sites = [] journal_counts = [] latest_valid_date = dt.datetime(year=2016,month=4,day=1) latest_valid_date_timestamp = latest_valid_date.replace(tzinfo=dt.timezone.utc).timestamp() * 1000 min_site_time = 1000 * 60 * 60 * 24 * 30 # 30 days, in milliseconds for site_id in tqdm(c_df["_id"]): journals = get_site_journals(site_id, columns=["createdAt"]) # include only sites with at least 5 journals if len(journals) < 5: continue # include only sites with last journal post created before 2016/04/01 if journals[-1]['createdAt'] > latest_valid_date_timestamp: continue # include only sites that lasted at least 30 days diff = journals[-1]['createdAt'] - journals[0]['createdAt'] if diff <= min_site_time: continue valid_sites.append(site_id) journal_counts.append(len(journals)) len(valid_sites) # an alternative computation on the journal-level features using a pre-saved dataframe site_survival_working_dir = "/home/srivbane/shared/caringbridge/data/projects/classify_health_condition/vw_experiments" site_survival_filename = os.path.join(site_survival_working_dir, "site_survival_time.csv") col_names = ("siteId", "siteCreatedAt", "siteUpdatedAt", "firstJournalCreatedAt", "lastJournalCreatedAt", "numJournals", "numJournalsFound") survival_df = pd.read_csv(site_survival_filename, header=None, names=col_names) assert len(survival_df) == len(df) valid_sites = [] journal_counts_dict = {} latest_valid_date = dt.datetime(year=2016,month=4,day=1) latest_valid_date_timestamp = latest_valid_date.replace(tzinfo=dt.timezone.utc).timestamp() * 1000 earliest_valid_date = dt.datetime(year=2009,month=1,day=1) earliest_valid_date_timestamp = earliest_valid_date.replace(tzinfo=dt.timezone.utc).timestamp() * 1000 min_site_time = 1000 * 60 * 60 * 24 * 30 # 30 days, in milliseconds max_site_time = 1000 * 60 * 60 * 24 * 365 * 8 # 8 years, in milliseconds # the maximum site time is really just included to remove likely-erroneous outliers with unexpected timing info for site_id in tqdm(c_df["_id"]): tmp_df = survival_df[survival_df["siteId"] == site_id] assert len(tmp_df) == 1 row = tmp_df.iloc[0] # include only sites with at least 5 journals if row['numJournalsFound'] < 5: continue # include only sites with last journal post created before 2016/04/01 if row['lastJournalCreatedAt'] * 1000 > latest_valid_date_timestamp: continue # include only sites with first journal post created after 2009/01/01 if row['firstJournalCreatedAt'] * 1000 < earliest_valid_date_timestamp: continue # include only sites that lasted at least 30 days diff = row['lastJournalCreatedAt'] * 1000 - row['firstJournalCreatedAt'] * 1000 if diff <= min_site_time: continue if diff >= max_site_time: continue valid_sites.append(site_id) journal_counts_dict[site_id] = row['numJournalsFound'] len(valid_sites) ``` #### Save the valid sites before author type filtering ``` # save the valid sites to an intermediate file before final filtering by author type with open(valid_classification_sites_filename, 'w') as outfile: outfile.write("\n".join([str(sid) for sid in valid_sites])) print("Finished.") ``` #### Bring in the author type classifier's results ``` site_proportions_filepath = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/author_classification/site_proportions.csv" author_df = pd.read_csv(site_proportions_filepath) author_df.head(n=5) def save_threshold_list(sites, threshold, output_filepath): filtered_sites = [] for site_id in tqdm(valid_sites): row = author_df['proportion_patient_authored'][author_df['site_id'] == site_id] if len(row) == 1: proportion_patient_authored = row.iloc[0] elif len(row) == 0: # This site isn't in the list, so treat it as 0% patient-authored #print(site_id, row) proportion_patient_authored = 0 else: raise ValueError("Multiple matches for siteId.") if proportion_patient_authored >= threshold: filtered_sites.append(site_id) with open(output_filepath, 'w') as outfile: outfile.write("\n".join([str(sid) for sid in filtered_sites])) print(f"Wrote sites clearing threshold {threshold} to file '{output_filepath}'.") thresholds = [0.5, 0.75, 0.9] working_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/identify_candidate_sites" for threshold in thresholds: filename = "valid_sites_with_%.0f_pct_patient_journals.txt" % (threshold * 100) output_filepath = os.path.join(working_dir, filename) save_threshold_list(valid_sites, threshold, output_filepath) patient_authorship_threshold = 0.95 to_trim = [] for site_id in tqdm(valid_sites): row = author_df['proportion_patient_authored'][author_df['site_id'] == site_id] if len(row) == 1: proportion_patient_authored = row.iloc[0] elif len(row) == 0: # This site isn't in the list, so treat it as 0% patient-authored #print(site_id, row) proportion_patient_authored = 0 else: raise ValueError("Multiple matches for siteId.") if proportion_patient_authored < patient_authorship_threshold: to_trim.append(site_id) print("Will trim %d sites leaving %d candidate sites." % (len(to_trim), len(valid_sites) - len(to_trim))) for site_id in to_trim: valid_sites.remove(site_id) len(valid_sites) ``` #### Bring in author type annotations as a "trump card" (?) ``` # maybe we should just rely on the classifier and not do any manual exclusions based on tags? # Decision: Yes. skip_manual_annotation_exclusions = True annotation_web_client_database = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/instance/cbAnnotator.sqlite" def get_annotation_db(): db = sqlite3.connect( annotation_web_client_database, detect_types=sqlite3.PARSE_DECLTYPES ) db.row_factory = sqlite3.Row return db def get_author_annotations(site_id): try: db = get_annotation_db() cursor = db.execute( """SELECT data FROM journalAnnotation WHERE annotation_type = "journal_author_type" AND site_id = ? GROUP BY journal_oid ORDER BY id DESC""", (site_id,) ) journal_author_annotations = cursor.fetchall() annotation_strings = [a['data'] for a in journal_author_annotations] return annotation_strings finally: db.close() def get_site_patient_authorship_ratio(site_id): # returns None if the given site has no author annotations author_annotations = get_author_annotations(site_id) counts = Counter(author_annotations) p_count = counts['p'] if 'p' in counts else 0 cg_count = counts['cg'] if 'cg' in counts else 0 pcg_count = counts['pcg'] if 'pcg' in counts else 0 total_valid_annotations = p_count + cg_count + pcg_count if total_valid_annotations == 0: return None ratio = p_count / total_valid_annotations return ratio def is_site_authorship_valid(site_id, patient_proportion_required=0.5): # returns true if the proportion of patient-authored posts is greater than 0.5, and false otherwise ratio = get_site_patient_authorship_ratio(site_id) return ratio is None or ratio >= 0.5 # Test site get_site_patient_authorship_ratio(877534) # Trim valid sites based on authorship annotations if not skip_manual_annotation_exclusions: to_trim = [] for site_id in tqdm(valid_sites): if not is_site_authorship_valid(site_id): to_trim.append(site_id) for site_id in to_trim: valid_sites.remove(site_id) print(len(valid_sites)) ``` #### Save the valid sites ``` # save the valid sites to a file with open(valid_sites_filtered_filename, 'w') as outfile: outfile.write("\n".join([str(sid) for sid in valid_sites])) print("Finished.") ``` ## Visualize Candidate Site Features Graphs and summary stats describing the selected sites. ``` import sys sys.path.append("../annotation_data") from utils import * valid_sites = get_valid_sites_filtered() len(valid_sites) # do some quick summary statistics on the journal counts in the valid sites journal_counts = [journal_counts_dict[site_id] for site_id in valid_sites] from scipy import stats stats.describe(journal_counts) # median, mean, and standard deviation of the number of journals in the selected sites np.median(journal_counts), np.mean(journal_counts), np.std(journal_counts) # percentiles of the number of journal counts np.percentile(journal_counts, [5, 10, 25, 50, 75, 90, 95]) np.sum(journal_counts) title = "Candidate Site Journal Counts" fig, ax = pl.subplots(num=title, figsize=(8,8)) x = [min(journal_counts_dict[site_id], 101) for site_id in valid_sites] patches = plt.hist(x, range=(0, 104), bins=26) ax.set_title(title) ax.set_xlabel("Journal count") ax.set_ylabel("Number of sites") ax.set_xticks([i for i in range(0, 108, 4)]) ax.set_xticklabels([str(i) if i != 104 else "+" for i in range(0, 108, 4)]) #ax.set_yticks([i for i in range(0, 430, 10)]) ax.grid(axis="y", alpha=0.5) plt.show() title = "journal counts" fig, ax = pl.subplots(1, num=title, figsize=(1,0.4), squeeze=True) x = [min(journal_counts_dict[site_id], 101) for site_id in valid_sites] patches = plt.hist(x, range=(0, 100), bins=26, align="left", color="black") #plt.ylim(0, y_limit) # plot the mean as a black line #m = np.mean(x) #std = np.std(x) #summary_text = "M=%.2f\\\\ SD=%.2f" % (m, std) #print(summary_text) ax.set_xticks([]) ax.set_yticks([]) plt.axis('off') plt.tight_layout(pad=0) plt.subplots_adjust(top = 0.4, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0) plt.margins(0,0) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) bbox = matplotlib.transforms.Bbox.from_bounds(0,0,1,0.2) image_shortfilename = "journal_updates_summary_hist_short.pdf" image_filename = os.path.join(image_dir, image_shortfilename) plt.savefig(image_filename, format='pdf', dpi=200, pad_inches=0, bbox_inches=bbox) #, transparent=True) plt.show() # get the subset of the site df that contains only the valid sites df_subset = df[df["_id"].isin(valid_sites)] len(df_subset) # get the subset of the survival df that contains only the valid sites survival_df_subset = survival_df[survival_df["siteId"].isin(valid_sites)] len(survival_df_subset) # Add urls to the annotation client, for convenience def get_url(site_id, port=5000): url = "http://127.0.0.1:%d/siteId/%d" % (port, site_id) return '<a href="{}">{}</a>'.format(url, url) df_subset['annotation_url'] = [get_url(site_id) for site_id in df_subset["_id"]] title = "Candidate Site Visits" fig, ax = pl.subplots(num=title, figsize=(8,8)) x = [min(visits, 50001) for visits in df_subset["visits"]] patches = plt.hist(x, log=True, bins=26, range=(0,52000))#, range=(0, 104), bins=26) ax.set_title(title) ax.set_xlabel("Visits") ax.set_ylabel("Number of sites") ax.set_xticks([i for i in range(0, 52000, 5000)]) ax.grid(axis="y", which="minor", alpha=0.5) plt.show() title = "journal counts" fig, ax = pl.subplots(1, num=title, figsize=(1,0.4), squeeze=True) x = [min(visits, 50001) for visits in df_subset["visits"]] patches = plt.hist(x, log=False, bins=26, range=(0,10000), align="left", color="black") #plt.ylim(0, y_limit) # plot the mean as a black line #m = np.mean(x) #std = np.std(x) #summary_text = "M=%.2f\\\\ SD=%.2f" % (m, std) #print(summary_text) ax.set_xticks([]) ax.set_yticks([]) plt.axis('off') plt.tight_layout(pad=0) plt.subplots_adjust(top = 0.4, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0) plt.margins(0,0) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) bbox = matplotlib.transforms.Bbox.from_bounds(0,0,1,0.2) image_shortfilename = "site_visits_summary_hist_short.pdf" image_filename = os.path.join(image_dir, image_shortfilename) plt.savefig(image_filename, format='pdf', dpi=200, pad_inches=0, bbox_inches=bbox) #, transparent=True) plt.show() stats.describe(df_subset["visits"]), np.median(df_subset["visits"]), np.std(df_subset["visits"]) ``` #### Site Survival Time ``` def get_month(survival_df, site_id): single_entry_df = survival_df[survival_df["siteId"] == site_id] if len(single_entry_df) != 1: raise ValueError("Invalid site id %d." % site_id) site_survival = single_entry_df.iloc[0] end_date = site_survival['lastJournalCreatedAt'] start_date = site_survival['firstJournalCreatedAt'] if end_date < start_date: raise ValueError("Invalid site timing info.") survival_time = end_date - start_date one_month = 60 * 60 * 24 * 30 approx_months_survived = survival_time / one_month return approx_months_survived site_survival_times = [get_month(survival_df_subset, site_id) for site_id in valid_sites] site_survival_times[:10] stats.describe(site_survival_times) np.median(site_survival_times), np.mean(site_survival_times), np.std(site_survival_times) title = "Candidate Site Survival Time" fig, ax = pl.subplots(num=title, figsize=(8,8)) x = site_survival_times #[min(site_survival_time, 8 * 12) for site_survival_time in site_survival_times] month_range = 7 * 12 + 4 #2009-01 to 2016-04 is the max number of months patches = plt.hist(x, log=True, bins=26, range=(0,7*12+4))#, range=(0, 104), bins=26) ax.set_title(title) ax.set_xlabel("Site survival time (months)") ax.set_ylabel("Number of sites") ax.set_xticks([i for i in range(0, 7*12 + 4, 6)]) ax.grid(axis="y", which="minor", alpha=0.5) plt.show() title = "site survival" fig, ax = pl.subplots(1, num=title, figsize=(1,0.4), squeeze=True) x = site_survival_times #[min(site_survival_time, 8 * 12) for site_survival_time in site_survival_times] month_range = 7 * 12 + 4 #2009-01 to 2016-04 is the max number of months patches = plt.hist(x, log=False, bins=26, range=(0,5*12), align="left", color="black") #patches = plt.hist(x, log=False, bins=26, range=(0,10000), align="left", color="black") #plt.ylim(0, y_limit) # plot the mean as a black line #m = np.mean(x) #std = np.std(x) #summary_text = "M=%.2f\\\\ SD=%.2f" % (m, std) #print(summary_text) ax.set_xticks([]) ax.set_yticks([]) plt.axis('off') plt.tight_layout(pad=0) plt.subplots_adjust(top = 0.4, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0) plt.margins(0,0) plt.gca().xaxis.set_major_locator(plt.NullLocator()) plt.gca().yaxis.set_major_locator(plt.NullLocator()) bbox = matplotlib.transforms.Bbox.from_bounds(0,0,1,0.2) image_shortfilename = "survival_time_summary_hist_short.pdf" image_filename = os.path.join(image_dir, image_shortfilename) plt.savefig(image_filename, format='pdf', dpi=200, pad_inches=0, bbox_inches=bbox) #, transparent=True) plt.show() # longest site? i = np.argmax(site_survival_times) site_id = valid_sites[i] site_survival = survival_df_subset[survival_df_subset['siteId'] == site_id].iloc[0] (dt.datetime.utcfromtimestamp(site_survival['firstJournalCreatedAt']), dt.datetime.utcfromtimestamp(site_survival['lastJournalCreatedAt']), site_survival['numJournalsFound']) ``` #### Health condition ``` health_condition_counter = Counter(df_subset['healthCondition_name']) health_condition_counter.most_common() assert sum([t[1] for t in health_condition_counter.most_common()]) == len(valid_sites) health_condition_dict = {condition: count for condition, count in health_condition_counter.most_common()} not_specified_list = ['Cancer', 'not provided'] other_threshold = 65 # include a health condition in the count for 'Other' if it fails to clear this count # the other_threshold was set from an inspection of the output of the counter above for cond_name in list(health_condition_dict.keys()): if cond_name != 'Other' and cond_name not in not_specified_list and health_condition_dict[cond_name] < other_threshold: health_condition_dict['Other'] += health_condition_dict[cond_name] del health_condition_dict[cond_name] health_condition_dict['Not Specified'] = 0 for not_spec_name in not_specified_list: health_condition_dict['Not Specified'] += health_condition_dict[not_spec_name] del health_condition_dict[not_spec_name] sorted_filtered_conditions = sorted([(condition, count) for condition, count in health_condition_dict.items()], key=lambda tup: tup[1], reverse=True) sorted_filtered_conditions print("\\begin{tabular}[c]{@{}rlrl@{}}") halfway = len(sorted_filtered_conditions) // 2 inds = zip(range(0, halfway), range(halfway, len(sorted_filtered_conditions))) for i, j in inds: cond1, count1 = sorted_filtered_conditions[i] pct1 = count1 / len(valid_sites) * 100 cond2, count2 = sorted_filtered_conditions[j] pct2 = count2 / len(valid_sites) * 100 print("%s & %d (%.1f\\%%) & %s & %d (%.1f\\%%) \\\\" % (cond1, count1, pct1, cond2, count2, pct2)) print("\\end{tabular}") cond_set = set(df_subset['healthCondition_name']) for cond in ['Cancer', 'Other', 'not provided']: cond_set.remove(cond) cond_list = list(cond_set) cond_list.sort(key=lambda cond: health_condition_counter[cond], reverse=True) ", ".join(cond_list) Counter(df_subset['age']).most_common() # it's possible we need to be omitting the non-adult sites from consideration? # we produce a view of the non-adult sites including their annotation urls # the original result of this analysis was manually annotating authorship on these sites to eliminate non-patient-authored sites nonadult = df_subset[df_subset['age'] != 'adult'][['_id', 'age', 'visits', 'annotation_url']] nonadult = nonadult.sort_values(by=['age', 'visits'], ascending=False) pd.set_option('display.max_colwidth', -1) # allow the entirety of the url to show by removing column width limits HTML(nonadult.to_html(escape=False, max_rows=20)) Counter(df_subset['isDeleted'].fillna("nan")).most_common() Counter(df_subset['isSpam'].fillna("nan")).most_common() Counter(df_subset['privacy']).most_common() title = "Candidate Site Tributes" fig, ax = pl.subplots(num=title, figsize=(8,8)) x = df_subset['numTributes'].fillna(0) patches = plt.hist(x, log=True, range=(0, 20), bins=20) ax.set_title(title) ax.set_xlabel("Tributes") ax.set_ylabel("Number of sites") ax.set_xticks(range(0,21,1)) ax.grid(axis="y", alpha=0.5) plt.show() title = "Candidate Site Visitor Invites" fig, ax = pl.subplots(num=title, figsize=(8,8)) x = df_subset['numVisitorInvites'].fillna(0) patches = plt.hist(x, log=True, range=(0,20), bins=20) ax.set_title(title) ax.set_xlabel("Visitor invites") ax.set_ylabel("Number of sites") ax.set_xticks(range(0,21,1)) ax.grid(axis="y", alpha=0.5) plt.show() ``` ## IRR set identification ``` random.seed(1000) random.shuffle(valid_sites) len(valid_sites) irr_set_size = 10 irr_set = valid_sites[:irr_set_size] irr_set.sort() irr_set irr_df = df_subset[df_subset['_id'].isin(irr_set)] assert len(irr_df) == irr_set_size irr_df = irr_df.sort_values(by=['_id']) irr_df = irr_df[['_id', 'title', 'visits', 'numJournals', 'annotation_url']] pd.set_option('display.max_colwidth', -1) # allow the entirety of the url to show by removing column width limits HTML(irr_df.to_html(escape=False)) # write the irr_set as an assignment to the appropriate users irr_assigned_users = ['levon003', 'rubya001'] base_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/instance/annotation_data/assignments" header = "3: Fourth IRR set (tag for both phases and responsibilities)" for user in irr_assigned_users: irr_filename = "irr_set_4_20180901.txt" irr_filepath = os.path.join(base_dir, user, irr_filename) with open(irr_filepath, 'w') as outfile: outfile.write(header + "\n") outfile.write("\n".join([str(sid) for sid in irr_set])) print("Finished.") # what health conditions are included in this set? irr_df = df_subset[df_subset['_id'].isin(irr_set)] Counter(irr_df['healthCondition_name']).most_common() raise ValueError("This cell is a protection against accidentally running all the cells in this notebook.") ``` ## Phase set for Wenqi This section added to generate a random set of sites for phase annotation. ``` valid_sites = [int(line.strip()) for line in open(valid_sites_filtered_filename, 'r').readlines() if line.strip() != ""] len(valid_sites) selected_sites = np.random.choice(valid_sites, size=50) selected_sites[:10] irr_assigned_users = ['levon003', 'luoxx498'] base_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/instance/annotation_data/assignments" header = "Phases 5: Sites for phase annotation" for user in irr_assigned_users: filename = "phases_set_5_20180904.txt" filepath = os.path.join(base_dir, user, filename) with open(filepath, 'w') as outfile: outfile.write(header + "\n") outfile.write("\n".join([str(sid) for sid in selected_sites])) print("Finished.") ``` ## Phase set for active learning ``` valid_sites = [int(line.strip()) for line in open(valid_sites_filtered_filename, 'r').readlines() if line.strip() != ""] len(valid_sites) n = 1000 selected_sites = np.random.choice(valid_sites, size=n) selected_sites[:10] active_learning_set_filepath = os.path.join(working_dir, 'active_learning_set_%d.txt' % n) print(active_learning_set_filepath) with open(active_learning_set_filepath, 'w') as outfile: outfile.write("\n".join([str(sid) for sid in selected_sites])) print("Finished.") ``` ## Multiset 2 for Sabirat This section added to generate a random set of sites for phase and responsibility annotation. ``` valid_sites = [int(line.strip()) for line in open(valid_sites_filtered_filename, 'r').readlines() if line.strip() != ""] len(valid_sites) selected_sites = np.random.choice(valid_sites, size=20) selected_sites irr_assigned_users = ['rubya001'] base_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/instance/annotation_data/assignments" header = "Multiset 2: Sites for phase and responsibility annotation" for user in irr_assigned_users: filename = "multiset_2_20181009.txt" filepath = os.path.join(base_dir, user, filename) with open(filepath, 'w') as outfile: outfile.write(header + "\n") outfile.write("\n".join([str(sid) for sid in selected_sites])) print("Finished.") ``` ## Multiset 3 for Wenqi and Drew ``` valid_sites = [int(line.strip()) for line in open(valid_sites_filtered_filename, 'r').readlines() if line.strip() != ""] len(valid_sites) selected_sites = np.random.choice(valid_sites, size=20) selected_sites irr_assigned_users = ['eriks074', 'luoxx498'] base_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/instance/annotation_data/assignments" header = "Multiset 3: Sites for phase and responsibility annotation" for user in irr_assigned_users: filename = "multiset_3_20181019.txt" filepath = os.path.join(base_dir, user, filename) with open(filepath, 'w') as outfile: outfile.write(header + "\n") outfile.write("\n".join([str(sid) for sid in selected_sites])) print("Finished.") ``` ## Multiset 4 for Wenqi and Drew ``` valid_sites = [int(line.strip()) for line in open(valid_sites_filtered_filename, 'r').readlines() if line.strip() != ""] len(valid_sites) selected_sites = np.random.choice(valid_sites, size=20) selected_sites irr_assigned_users = ['eriks074', 'luoxx498'] base_dir = "/home/srivbane/shared/caringbridge/data/projects/qual-health-journeys/instance/annotation_data/assignments" header = "Multiset 4: Sites for responsibility annotation" for user in irr_assigned_users: filename = "multiset_4_20181206.txt" filepath = os.path.join(base_dir, user, filename) with open(filepath, 'w') as outfile: outfile.write(header + "\n") outfile.write("\n".join([str(sid) for sid in selected_sites])) print("Finished.") ```
github_jupyter
<a href="https://colab.research.google.com/github/rpinheiro83/1.WebDevelopment/blob/main/Python%20Basic.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> #Aula 03 - Tipos de Variáveis ``` print("Hello world") # Linhas iniciadas com # são comentários. # Comentários são ignorados pelo Python e servem para explicar o código. ''' O símbolo # é um comentário de apenas 1 linha. Usando 3 aspas simples consecutivas é possível abrir um bloco de comentário de múltiplas linhas. O bloco se encerra com outras 3 aspas simples. ''' ``` #Aula 04 - Operadores ##Podemos fazer operações aritméticas simples ``` a = 2 + 3 # Soma b = 2 - 3 # Subtração c = 2 * 3 # Multiplicação d = 2 / 3 # Divisão e = 2 // 3 # Divisão inteira f = 2 ** 3 # Potência g = 2 % 3 # Resto de divisão print(a, b, c, d , e, f ,g) ``` ##Podemos fazer operações dentro do print ``` print(a+1, b+1) ``` ##Podemos fazer operações com variáveis não inteiras ``` nome = input('Digite seu primeiro nome:') nome = nome + ' Pinheiro' print(nome) ``` ##Operadores relacionais ``` comparacao = 5 > 3 comparacao2 = 5 < 3 print(comparacao) print(comparacao2) ``` ###O Python possui 6 operadores relacionais: #### * Maior que: > * Maior ou igual: >= * Menor que: < * Menor ou igual: <= * Igual: == * Diferente: != * Igual: == ##Operadores lógicos ``` comparacao1 = 5 > 3 and 6 > 3 comparacao2 = 5 < 3 and 6 > 3 print(comparacao1, comparacao2) ``` #Aula 05 - Estruturas Sequenciais ##Inputs ``` nome = input('Digite o seu nome: ') print(nome) ``` ### Tudo que é lido por input() é considerado uma string (str). ### Para tratar como outros tipos de dados é necessário realizar a conversão: ``` peso = float(input('Digite o seu peso:')) idade = int(input('Digite a sua idade:')) print("Seu peso é ", peso) print("Sua idade é ", idade) ``` ##Outputs ``` y = 3.14 # uma variável do tipo real (float) escola = "Let's Code" # uma variável literal (string) # Podemos exibir textos na tela e/ou valores de variáveis com a função print(). print('eu estudo na ', escola) print('pi vale', y) # Podemos fazer operações dentro do print: print (y+1, y**2) ``` #Aula 06 - Estruturas Condicionais ##Else ###Em alguns casos, queremos que o programa escolha entre 2 casos mutuamente exclusivos. Para isso utilizamos o else. O else não possui condição para verificar. O else sempre vem imediatamente após um if e é executado se o if for ignorado. ``` idade = int(input("Digite sua idade:")) altura = float(input("Digite sua altura em metros:")) if idade >= 12 and altura >= 1.60: print("You can get on the roller coaster.") else: print("You can't get on the roller coaster") print("Thank you very much") ``` ###É possível "aninhar" diversos if's e else's. O programa abaixo só deixa a pessoa entrar no brinquedo se tiver idade e altura mínimas: ``` idade = int(input("Enter your age: ")) if idade >= 12: responda = input("Would you like get on the roller coaster?") if (responda == "yes"): print("Please come in.") else: print("Okay then") else: print("You haven't old enough to get into this toy.") ``` ###Podemos testar diversos casos mutuamente exclusivos utilizando o 'elif'. O comando elif é a contração de "else if" - ou seja, caso um if não seja executado, você pode propor uma nova condição para ser testada. ``` exercises = int(input("How many Python exercises have you already done?")) if exercises > 30: print("It's already getting professional!") elif exercises > 20: print("It's going well, let's do some more!") elif exercises > 10: print("Shall we catch up?") else: print("xiiiiii...........") ``` #Aula 07 - Estruturas de Repetição - While ##While ###O while é bastante parecido com um 'if': ele possui uma expressão, e é executado caso ela seja verdadeira. Mas o if é executado apenas uma vez, e depois o código segue adiante. O while não: ao final de sua execução, ele torna a testar a expressão, e caso ela seja verdadeira, ele repete sua execução. ``` horario = int(input('Qual horario é agora? ')) while 0 < horario < 6: print('Você está no horario da madrugada') horario = horario + 1 else: print('Você nao está no horario da madrugada') ``` ##Validação de entrada ###Uma utilidade interessante do while é obrigar o usuário a digitar apenas entradas válidas. ``` salario = float(input('Digite seu salario: ')) while salario < 998.0: salario = float(input('Entre com um salario MAIOR DO QUE 998.0: ')) else: print('O salario que você entrou foi: ', salario) ``` ##Contador ###Todo tipo de código que deve se repetir várias vezes pode ser feito com o while, como somar vários valores, gerar uma sequência etc. Nestes casos, é normal utilizar um contador: ``` # Declaramos um contador como 0: contador = 0 # Definimos o número de repetições: numero = int(input('Digite um numero: ')) # Rodamos o while até o contador se igualar ao número de repetições: while contador < numero: print(contador) contador = contador + 1 ``` ##Break ###Um jeito de forçar um loop a ser interrompido é utilizando o comando 'break'. O loop abaixo em tese seria infinito, mas se a condição do if for verificada, o break é executado e conseguimos escapar do loop: ``` while True: resposta = input('Digite OK: ') if resposta == 'OK': break ```
github_jupyter
# Hello World! Here's an example notebook with some documentation on how to access CMIP data. ``` %matplotlib inline import xarray as xr import intake # util.py is in the local directory # it contains code that is common across project notebooks # or routines that are too extensive and might otherwise clutter # the notebook design import util print('hello world!') ``` ## Demonstrate how to use `intake-esm` [Intake-esm](https://intake-esm.readthedocs.io) is a data cataloging utility that facilitates access to CMIP data. It's pretty awesome. An `intake-esm` collection object establishes a link to a database that contains file locations and associated metadata (i.e., which experiement, model, the come from). ### Opening a collection First step is to open the collection by pointing the collection definition file, which is a JSON file that conforms to the [ESM Collection Specification](https://github.com/NCAR/esm-collection-spec). The collection JSON files are stored locally in this repository for purposes of reproducibility---and because Cheyenne compute nodes don't have Internet access. The primary source for these files is the [intake-esm-datastore](https://github.com/NCAR/intake-esm-datastore) repository. Any changes made to these files should be pulled from that repo. For instance, the Pangeo cloud collection is available [here](https://raw.githubusercontent.com/NCAR/intake-esm-datastore/master/catalogs/pangeo-cmip6.json). ``` if util.is_ncar_host(): col = intake.open_esm_datastore("../catalogs/glade-cmip6.json") else: col = intake.open_esm_datastore("../catalogs/pangeo-cmip6.json") col ``` `intake-esm` is build on top of [pandas](https://pandas.pydata.org/pandas-docs/stable). It is possible to view the `pandas.DataFrame` as follows. ``` col.df.head() ``` It is possible to interact with the `DataFrame`; for instance, we can see what the "attributes" of the datasets are by printing the columns. ``` col.df.columns ``` ### Search and discovery #### Finding unique entries Let's query the data to see what models ("source_id"), experiments ("experiment_id") and temporal frequencies ("table_id") are available. ``` import pprint uni_dict = col.unique(['source_id', 'experiment_id', 'table_id']) pprint.pprint(uni_dict, compact=True) ``` #### Searching for specific datasets Let's find all the dissolved oxygen data at annual frequency from the ocean for the `historical` and `ssp585` experiments. ``` cat = col.search(experiment_id=['historical', 'ssp585'], table_id='Oyr', variable_id='o2', grid_label='gn') cat.df ``` It might be desirable to get more specific. For instance, we may want to select only the models that have *both* `historical` and `ssp585` data. We coud do this as follows. ``` models = set(uni_dict['source_id']['values']) # all the models for experiment_id in ['historical', 'ssp585']: query = dict(experiment_id=experiment_id, table_id='Oyr', variable_id='o2', grid_label='gn') cat = col.search(**query) models = models.intersection({model for model in cat.df.source_id.unique().tolist()}) # ensure the CESM2 models are not included (oxygen was erroneously submitted to the archive) models = models - {'CESM2-WACCM', 'CESM2'} models = list(models) models cat = col.search(experiment_id=['historical', 'ssp585'], table_id='Oyr', variable_id='o2', grid_label='gn', source_id=models) cat.df ``` ### Loading data `intake-esm` enables loading data directly into an [xarray.Dataset](http://xarray.pydata.org/en/stable/api.html#dataset). Note that data on the cloud are in [zarr](https://zarr.readthedocs.io/en/stable/) and data on [glade](https://www2.cisl.ucar.edu/resources/storage-and-file-systems/glade-file-spaces) are stored as [netCDF](https://www.unidata.ucar.edu/software/netcdf/) files. This is opaque to the user. `intake-esm` has rules for aggegating datasets; these rules are defined in the collection-specification file. ``` dset_dict = cat.to_dataset_dict(zarr_kwargs={'consolidated': True, 'decode_times': False}, cdf_kwargs={'chunks': {}, 'decode_times': False}) ``` `dset_dict` is a dictionary of `xarray.Dataset`'s; its keys are constructed to refer to compatible groups. ``` dset_dict.keys() ``` We can access a particular dataset as follows. ``` dset_dict['CMIP.CCCma.CanESM5.historical.Oyr.gn'] ``` ### More advanced queries As motivation for diving into more advanced manipulations with `intake-esm`, let's consider the task of getting access to grid information in the `Ofx` table_id. ``` cat_fx = col.search(experiment_id=['historical', 'ssp585'], source_id=models, table_id='Ofx', grid_label='gn') cat_fx.df ``` This, however, comes with lots of redundant information. Additionally, it may be necessary to do more targeted manipulations of the search. For instance, we've found a handful of corrupted files on `glade` and might need to work around loading these. As an illustration of this, in the code below, we specify a list of to queries (in this case one) to eliminate. ``` import numpy as np # specify a list of queries to eliminate corrupt_data = [dict(variable_id='areacello', source_id='IPSL-CM6A-LR', experiment_id='historical', member_id='r2i1p1f1') ] # copy the dataframe df = cat_fx.df.copy() # eliminate data for elim in corrupt_data: condition = np.ones(len(df), dtype=bool) for key, val in elim.items(): condition = condition & (df[key] == val) df = df.loc[~condition] df ``` We then drop duplicates. ``` df.drop_duplicates(subset=['source_id', 'variable_id'], inplace=True) ``` Now, since we've only retained one ensemble member, we need to eliminate that column. If we omit this step, `intake-esm` will throw an error, complaining that different variables are present for each ensemble member. Setting the `member_id` column to NaN precludes attempts to join along the ensemble dimension. After this final manipulation, we copy the `DataFrame` back to the collection object and procede with loading the data. ``` df['member_id'] = np.nan cat_fx.df = df fx_dsets = cat_fx.to_dataset_dict(zarr_kwargs={'consolidated': True}, cdf_kwargs={'chunks': {}}) fx_dsets.keys() for key, ds in fx_dsets.items(): print(ds.data_vars) ``` ## Demonstrate how spin-up a dask cluster If you expect to require Big Data capabilities, here's how you spin up a [dask](https://dask.org) cluster using [dask-jobqueue](https://dask-jobqueue.readthedocs.io/en/latest/). The syntax is different if on an NCAR machine versus the cloud. ``` if util.is_ncar_host(): from ncar_jobqueue import NCARCluster cluster = NCARCluster(project='UCGD0006') cluster.adapt(minimum_jobs=1, maximum_jobs=10) else: from dask_kubernetes import KubeCluster cluster = KubeCluster() cluster.adapt(minimum=1, maximum=10) cluster from dask.distributed import Client client = Client(cluster) # Connect this local process to remote workers client ```
github_jupyter
# Preprocessing Part ## Author: Xiaochi (George) Li Input: "data.xlsx" provided by the professor Output: "processed_data.pickle" with target variable "Salary" as the last column. And all the missing value should be imputed or dropped. ### Summary In this part, we read the data from the file, did some exploratory data analysis on the data and processed the data for further analysis and synthesis. #### Exploratory Data Analysis * Correlation analysis * Missing value analysis * Unique percentage analysis #### Process * Removed 1. Need NLP: "MOU", "MOU Title", "Title", "Department", 2. No meaning:"Record Number", 3. \>50% missing: "POBP" * Imputed 1. p_dep: mean 2. p_grade: add new category 3. Lump Sum Pay:0 4. benefit: add new category 5. Rate:mean 6. o_pay:median ``` import numpy as np import pandas as pd import sklearn import seaborn as sns import matplotlib.pyplot as plt np.random.seed(42) df = pd.read_excel("data.xlsx",thousands=",") #seperations in thousands df.info() """Correlation analysis""" corr = df.corr() mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, vmin=-1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) corr """Missing rate for each feature""" null_rate = df.isnull().sum(axis = 0).sort_values(ascending = False)/float((len(df))) null_rate """Unique Rate for each feature""" unique_rate = df.apply(lambda x: len(pd.unique(x)),axis = 0).sort_values(ascending = False) #unique rate and sort print(unique_rate) def column_analyse(x,df = df): #print count for columns that only has few uniques print(df[x].value_counts(),"\n",df[x].value_counts().sum() ,"\n",df[x].value_counts()/len(df[x]), "\n-----------------------") column_analyse("e_type") column_analyse("benefit") column_analyse("Time") column_analyse("p_grade") """Feature selection""" categotical_features = ["e_type", "benefit", "Time", "p_grade"] not_include_features = ["MOU", "MOU Title", "Title", "Department", "Record Number", "POBP"] selected_features = [i for i in df.columns if i not in not_include_features] X_selected = df.loc[:,selected_features] X_selected["p_dep"].hist(bins=50) X_selected["p_dep"].describe() X_selected["Lump Sum Pay"].hist(bins=50) X_selected["Lump Sum Pay"].describe() X_selected["Rate"].hist(bins=50) X_selected["Rate"].describe() X_selected["o_pay"].hist(bins=50) X_selected["o_pay"].describe() ``` |Feature Name|Missing Rate|Imputation Method| |----|----|----| |p_dep|0.189287|Mean| |p_grade|0.189287|add new category| |Lump Sum Pay|0.185537|0| |benefit|0.178262|add new category| |Rate|0.058162|mean| |o_pay|0.003750|median| ``` """imputation""" X_selected["p_dep"] = X_selected["p_dep"].fillna(X_selected["p_dep"].mean()) X_selected["Lump Sum Pay"] = X_selected["Lump Sum Pay"].fillna(0) X_selected["Rate"] = X_selected["Rate"].fillna(X_selected["Rate"].mean()) X_selected["o_pay"] = X_selected["o_pay"].fillna(X_selected["o_pay"].median()) X_selected["p_grade"] = X_selected["p_grade"].fillna(-1) X_selected["benefit"] = X_selected["benefit"].fillna(-1) X_selected.head() X_selected.to_pickle("processed_data.pickle") ```
github_jupyter
# MPIJob and Horovod Runtime ## Running distributed workloads Training a Deep Neural Network is a hard task. With growing datasets, wider and deeper networks, training our Neural Network can require a lot of resources (CPUs / GPUs / Mem and Time). There are two main reasons why we would like to distribute our Deep Learning workloads: 1. **Model Parallelism** &mdash; The **Model** is too big to fit a single GPU. In this case the model contains too many parameters to hold within a single GPU. To negate this we can use strategies like **Parameter Server** or slicing the model into slices of consecutive layers which we can fit in a single GPU. Both strategies require **Synchronization** between the layers held on different GPUs / Parameter Server shards. 2. **Data Parallelism** &mdash; The **Dataset** is too big to fit a single GPU. Using methods like **Stochastic Gradient Descent** we can send batches of data to our models for gradient estimation. This comes at the cost of longer time to converge since the estimated gradient may not fully represent the actual gradient. To increase the likelihood of estimating the actual gradient we could use bigger batches, by sending small batches to different GPUs running the same Neural Network, calculating the batch gradient and then running a **Synchronization Step** to calculate the average gradient over the batches and update the Neural Networks running on the different GPUs. > It is important to understand that the act of distribution adds extra **Synchronization Costs** which may vary according to your cluster's configuration. > <br> > As the gradients and NN needs to be propagated to each GPU in the cluster every epoch (or a number of steps), Networking can become a bottleneck and sometimes different configurations need to be used for optimal performance. > <br> > **Scaling Efficiency** is the metric used to show by how much each additional GPU should benefit the training process with Horovod showing up to 90% (When running with a well written code and good parameters). ![Horovod scaling](https://user-images.githubusercontent.com/16640218/38965607-bf5c46ca-4332-11e8-895a-b9c137e86013.png) ## How can we distribute our training There are two different cluster configurations (which can be combined) we need to take into account. - **Multi Node** &mdash; GPUs are distributed over multiple nodes in the cluster. - **Multi GPU** &mdash; GPUs are within a single Node. In this demo we show a **Multi Node Multi GPU** &mdash; **Data Parallel** enabled training using Horovod. However, you should always try and use the best distribution strategy for your use case (due to the added costs of the distribution itself, ability to run in an optimized way on specific hardware or other considerations that may arise). ## How Horovod works? Horovod's primary motivation is to make it easy to take a single-GPU training script and successfully scale it to train across many GPUs in parallel. This has two aspects: - How much modification does one have to make to a program to make it distributed, and how easy is it to run it? - How much faster would it run in distributed mode? Horovod Supports TensorFlow, Keras, PyTorch, and Apache MXNet. in MLRun we use Horovod with MPI in order to create cluster resources and allow for optimized networking. **Note:** Horovod and MPI may use [NCCL](https://developer.nvidia.com/nccl) when applicable which may require some specific configuration arguments to run optimally. Horovod uses this MPI and NCCL concepts for distributed computation and messaging to quickly and easily synchronize between the different nodes or GPUs. ![Ring Allreduce Strategy](https://miro.medium.com/max/700/1*XdMlfmOgPCUG9ZOYLTeP9w.jpeg) Horovod will run your code on all the given nodes (Specific node can be addressed via `hvd.rank()`) while using an `hvd.DistributedOptimizer` wrapper to run the **synchronization cycles** between the copies of your Neural Network running at each node. **Note:** Since all the copies of your Neural Network must be the same, Your workers will adjust themselves to the rate of the slowest worker (simply by waiting for it to finish the epoch and receive its updates). Thus try not to make a specific worker do a lot of additional work on each epoch (Like a lot of saving, extra calculations, etc...) since this can affect the overall training time. ## How do we integrate TF2 with Horovod? As it's one of the main motivations, integration is fairly easy and requires only a few steps: ([You can read the full instructions for all the different frameworks on Horovod's documentation website](https://horovod.readthedocs.io/en/stable/tensorflow.html)). 1. Run `hvd.init()`. 2. Pin each GPU to a single process. With the typical setup of one GPU per process, set this to local rank. The first process on the server will be allocated the first GPU, the second process will be allocated the second GPU, and so forth. ``` gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') ``` 3. Scale the learning rate by the number of workers. Effective batch size in synchronous distributed training is scaled by the number of workers. An increase in learning rate compensates for the increased batch size. 4. Wrap the optimizer in `hvd.DistributedOptimizer`. The distributed optimizer delegates gradient computation to the original optimizer, averages gradients using allreduce or allgather, and then applies those averaged gradients. For TensorFlow v2, when using a `tf.GradientTape`, wrap the tape in `hvd.DistributedGradientTape` instead of wrapping the optimizer. 1. Broadcast the initial variable states from rank 0 to all other processes. This is necessary to ensure consistent initialization of all workers when training is started with random weights or restored from a checkpoint. For TensorFlow v2, use `hvd.broadcast_variables` after models and optimizers have been initialized. 1. Modify your code to save checkpoints only on worker 0 to prevent other workers from corrupting them. For TensorFlow v2, construct a `tf.train.Checkpoint` and only call `checkpoint.save()` when `hvd.rank() == 0`. You can go to [Horovod's Documentation](https://horovod.readthedocs.io/en/stable) to read more about horovod. ## Image classification use case See the end to end [**Image Classification with Distributed Training Demo**](https://github.com/mlrun/demos/tree/0.6.x/image-classification-with-distributed-training)
github_jupyter
``` #importando as bibliotecas import pandas as pd #biblioteca utilizada para o tratamento de dados via dataframes import numpy as np #biblioteca utilizada para o tratamento de valores numéricos (vetores e matrizes) import matplotlib.pyplot as plt #biblioteca utilizada para construir os gráficos from sklearn.metrics import r2_score #método para o cálculo do R2 (coeficiente de determinação) #importa o modelo de regressão linear univariada from sklearn.linear_model import LinearRegression #análise do modelo from sklearn.metrics import r2_score,mean_squared_error,mean_absolute_error,accuracy_score,precision_score,recall_score,f1_score,roc_auc_score from sklearn.model_selection import KFold, cross_val_score, train_test_split from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.metrics import silhouette_score, davies_bouldin_score, mutual_info_score from google.colab import drive drive.mount('/content/drive') diab = pd.read_csv('/content/drive/My Drive/Colab Notebooks/IGTI/mod3/trabalho-pratico/diabetes_numeric.csv') blood = pd.read_csv("/content/drive/My Drive/Colab Notebooks/IGTI/mod3/trabalho-pratico/bloodtransf.csv") wine = pd.read_csv("/content/drive/My Drive/Colab Notebooks/IGTI/mod3/trabalho-pratico/wine.csv") ``` ## Questão 1 - número de atributos ``` diab.shape ``` ## Questão 2 - Número de instâncias ``` blood.shape ``` ## Questão 3 - Sobre a base de clusterização, marque a alternativa CORRETA: ``` wine.shape wine['class'].value_counts() ``` ## Questão 4 - Sobre dados faltantes, marque a alternativa CORRETA: ``` wine.isnull().sum().sum() diab.isnull().sum().sum() blood.isnull().sum().sum() ``` ## Questão 5 - Em relação a modelagem utilizando a regressão linear, marque a alternativa CORRETA sobre a métrica r2: ``` diab.head() X = diab.iloc[:,:2] Y = diab.iloc[:,2] # Criando os conjuntos de dados de treino e de teste X_treino, X_teste, Y_treino, Y_teste = train_test_split(X, Y, test_size = 0.25, random_state = 42) #Realiza a construção do modelo de regressão reg = LinearRegression() reg.fit (X_treino,Y_treino) # encontra os coeficientes (realiza a regressão) #realiza a previsão previsao = reg.predict(X_teste) R_2 = r2_score(Y_teste, previsao) #realiza o cálculo do R2 print("Coeficiente de Determinação (R2):", R_2) ``` ## Questão 6 - Em relação a modelagem utilizando a regressão linear, marque a alternativa CORRETA sobre a métrica MAE ``` MAE = mean_absolute_error(Y_teste, previsao) #realiza o cálculo do R2 print("MAE:", MAE) ``` ## Questão 7 - Em relação a modelagem utilizando a regressão linear, marque a alternativa CORRETA sobre a métrica MSE. ``` MSE = mean_squared_error(Y_teste, previsao) #realiza o cálculo do R2 print("MAE:", MSE) ``` ## Questão 8 - Em relação a modelagem utilizando o SVM, marque a alternativa CORRETA sobre a métrica acurácia: ``` svm = SVC(kernel='rbf') blood['Class'].value_counts() def conversao(x): if x == 1: return 0 if x == 2: return 1 blood['Class'] = blood['Class'].apply(lambda x: conversao(x)) blood.head() X = blood.iloc[:,:4] Y = blood.iloc[:,4] # Criando os conjuntos de dados de treino e de teste X_treino, X_teste, Y_treino, Y_teste = train_test_split(X, Y, test_size = 0.25, random_state = 42) svm.fit(X_treino,Y_treino) previsao = svm.predict(X_teste) '{:.2f}'.format(accuracy_score(Y_teste, previsao)) ``` ## Questão 9 - Em relação a modelagem utilizando o SVM, marque a alternativa CORRETA sobre as métricas precision e recall Recall ``` recall_score(Y_teste, previsao) ``` Precisão ``` precision_score(Y_teste,previsao) ``` ## Questão 10 - Em relação a modelagem utilizando o SVM, marque a alternativa CORRETA sobre a métrica f1 ``` f1_score(Y_teste, previsao) ``` ## Questão 11 - Em relação a modelagem utilizando o SVM, marque a alternativa CORRETA sobre a métrica AUROC ``` roc_auc_score(Y_teste, previsao) baseline_preds = np.random.choice([0,1], size = len(Y_teste)) print('\nAUCROC com BaseLine', roc_auc_score(Y_teste, baseline_preds)) ``` O valor de AUROUC é similar a de um baseline ## Questão 12 - Em relação a modelagem utilizando o Kmeans, marque a alternativa CORRETA sobre o número de clusters: ``` wine['class'].unique() ``` ## Questão 13 - Em relação a modelagem utilizando o Kmeans, marque a alternativa CORRETA sobre a métrica Coeficiente de Silhueta: ``` def conversao2(x): if x == 1: return 0 if x == 2: return 1 if x == 3: return 2 wine['class'] = wine['class'].apply(lambda x: conversao2(x)) y_wine = np.array(wine['class']) x_wine = np.array(wine.drop('class', axis=1)) X_train_wine, X_test_wine, y_train_wine, y_test_wine = train_test_split(x_wine, y_wine, test_size=0.25, random_state=42) clf_km = KMeans(n_clusters=3) clf_km.fit(X_train_wine) y_pred_wine = clf_km.predict(X_test_wine) print('Coeficiente de Silhueta: ',silhouette_score(X_test_wine, y_pred_wine)) print('Davies-Bouldin Score:', davies_bouldin_score(X_test_wine, y_pred_wine)) print('Mutual Information:', mutual_info_score(y_test_wine, y_pred_wine)) ```
github_jupyter
# Training an Encrypted Neural Network In this tutorial, we will walk through an example of how we can train a neural network with CrypTen. This is particularly relevant for the <i>Feature Aggregation</i>, <i>Data Labeling</i> and <i>Data Augmentation</i> use cases. We will focus on the usual two-party setting and show how we can train an accurate neural network for digit classification on the MNIST data. For concreteness, this tutorial will step through the <i>Feature Aggregation</i> use cases: Alice and Bob each have part of the features of the data set, and wish to train a neural network on their combined data, while keeping their data private. ## Setup As usual, we'll begin by importing and initializing the `crypten` and `torch` libraries. We will use the MNIST dataset to demonstrate how Alice and Bob can learn without revealing protected information. For reference, the feature size of each example in the MNIST data is `28 x 28`. Let's assume Alice has the first `28 x 20` features and Bob has last `28 x 8` features. One way to think of this split is that Alice has the (roughly) top 2/3rds of each image, while Bob has the bottom 1/3rd of each image. We'll again use our helper script `mnist_utils.py` that downloads the publicly available MNIST data, and splits the data as required. For simplicity, we will restrict our problem to binary classification: we'll simply learn how to distinguish between 0 and non-zero digits. For speed of execution in the notebook, we will only create a dataset of a 100 examples. ``` import crypten import torch crypten.init() %run ./mnist_utils.py --option features --reduced 100 --binary ``` Next, we'll define the network architecture below, and then describe how to train it on encrypted data in the next section. ``` import torch.nn as nn import torch.nn.functional as F #Define an example network class ExampleNet(nn.Module): def __init__(self): super(ExampleNet, self).__init__() self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0) self.fc1 = nn.Linear(16 * 12 * 12, 100) self.fc2 = nn.Linear(100, 2) # For binary classification, final layer needs only 2 outputs def forward(self, x): out = self.conv1(x) out = F.relu(out) out = F.max_pool2d(out, 2) out = out.view(out.size(0), -1) out = self.fc1(out) out = F.relu(out) out = self.fc2(out) return out ``` ## Encrypted Training After all the material we've covered in earlier tutorials, we only need to know a few additional items for encrypted training. We'll first discuss how the training loop in CrypTen differs from PyTorch. Then, we'll go through a complete example to illustrate training on encrypted data from end-to-end. ### How does CrypTen training differ from PyTorch training? There are three main ways implementing a CrypTen training loop differs from a PyTorch training loop. We'll describe these items first, and then illustrate them with small examples below. <i>(1) Use `AutogradCrypTensor`</i>: We need to transform the input data to `AutogradCrypTensors` from `CrypTensors` before calling the forward pass. (`AutogradCrypTensors` allow the CrypTensors to store gradients and thus enable backpropagation.) As we show in the examples below, this is easily done by simply calling the `AutogradCrypTensor` constructor with the previously encrypted `CrypTensor`. <i>(2) Use one-hot encoding</i>: CrypTen training requires all labels to use one-hot encoding. This means that when using standard datasets such as MNIST, we need to modify the labels to use one-hot encoding. <i>(3) Directly update parameters</i>: CrypTen does not use the PyTorch optimizers. Instead, CrypTen implements encrypted SGD by implementing its own `backward` function, followed by directly updating the parameters. As we will see below, using SGD in CrypTen is very similar to using the PyTorch optimizers. We now show some small examples to illustrate these differences. As before, we will assume Alice has the rank 0 process and Bob has the rank 1 process. ``` # Define source argument values for Alice and Bob ALICE = 0 BOB = 1 # Example: Transforming input data into AutogradCrypTensors from crypten.autograd_cryptensor import AutogradCrypTensor # Load Alice's data data_alice_enc = crypten.load('/tmp/alice_train.pth', src=ALICE) # Create an AutogradCrypTensor from the CrypTensor data_alice_enc_auto = AutogradCrypTensor(data_alice_enc) # We'll now set up the data for our small example below # For illustration purposes, we will create toy data # and encrypt all of it from source ALICE x_small = torch.rand(100, 1, 28, 28) y_small = torch.randint(1, (100,)) # Transform labels into one-hot encoding label_eye = torch.eye(2) y_one_hot = label_eye[y_small] # Transform all data to AutogradCrypTensors x_train = AutogradCrypTensor(crypten.cryptensor(x_small, src=ALICE)) y_train = AutogradCrypTensor(crypten.cryptensor(y_one_hot)) # Instantiate and encrypt a CrypTen model model_plaintext = ExampleNet() dummy_input = torch.empty(1, 1, 28, 28) model = crypten.nn.from_pytorch(model_plaintext, dummy_input) model.encrypt() # Example: Stochastic Gradient Descent in CrypTen model.train() # Change to training mode loss = crypten.nn.MSELoss() # Choose loss functions # Set parameters: learning rate, num_epochs learning_rate = 0.001 num_epochs = 2 # Train the model: SGD on encrypted data for i in range(num_epochs): # forward pass output = model(x_train) loss_value = loss(output, y_train) # set gradients to zero model.zero_grad() # perform backward pass loss_value.backward() # update parameters model.update_parameters(learning_rate) # examine the loss after each epoch print("Epoch: {0:d} Loss: {1:.4f}".format(i, loss_value.get_plain_text())) ``` ### A Complete Example We now put these pieces together for a complete example of training a network in a multi-party setting. As in Tutorial 3, we'll assume Alice has the rank 0 process, and Bob has the rank 1 process; so we'll load and encrypt Alice's data with `src=0`, and load and encrypt Bob's data with `src=1`. We'll then initialize a plaintext model and convert it to an encrypted model, just as we did in Tutorial 4. We'll finally define our loss function, training parameters, and run SGD on the encrypted data. For the purposes of this tutorial we train on 100 samples; training should complete in ~3 minutes per epoch. ``` import crypten.mpc as mpc import crypten.communicator as comm # Convert labels to one-hot encoding # Since labels are public in this use case, we will simply use them from loaded torch tensors labels = torch.load('/tmp/train_labels.pth') labels = labels.long() labels_one_hot = label_eye[labels] @mpc.run_multiprocess(world_size=2) def run_encrypted_training(): # Load data: x_alice_enc = crypten.load('/tmp/alice_train.pth', src=ALICE) x_bob_enc = crypten.load('/tmp/bob_train.pth', src=BOB) # Combine the feature sets: identical to Tutorial 3 x_combined_enc = crypten.cat([x_alice_enc, x_bob_enc], dim=2) # Reshape to match the network architecture x_combined_enc = x_combined_enc.unsqueeze(1) # Initialize a plaintext model and convert to CrypTen model model = crypten.nn.from_pytorch(ExampleNet(), dummy_input) model.encrypt() # Set train mode model.train() # Define a loss function loss = crypten.nn.MSELoss() # Define training parameters learning_rate = 0.001 num_epochs = 2 batch_size = 10 num_batches = x_combined_enc.size(0) // batch_size rank = comm.get().get_rank() for i in range(num_epochs): # Print once for readability if rank == 0: print(f"Epoch {i} in progress:") for batch in range(num_batches): # define the start and end of the training mini-batch start, end = batch * batch_size, (batch + 1) * batch_size # construct AutogradCrypTensors out of training examples / labels x_train = AutogradCrypTensor(x_combined_enc[start:end]) y_batch = labels_one_hot[start:end] y_train = AutogradCrypTensor(crypten.cryptensor(y_batch)) # perform forward pass: output = model(x_train) loss_value = loss(output, y_train) # set gradients to "zero" model.zero_grad() # perform backward pass: loss_value.backward() # update parameters model.update_parameters(learning_rate) # Print progress every batch: batch_loss = loss_value.get_plain_text() if rank == 0: print(f"\tBatch {(batch + 1)} of {num_batches} Loss {batch_loss.item():.4f}") run_encrypted_training() ``` We see that the average batch loss decreases across the epochs, as we expect during training. This completes our tutorial. Before exiting this tutorial, please clean up the files generated using the following code. ``` import os filenames = ['/tmp/alice_train.pth', '/tmp/bob_train.pth', '/tmp/alice_test.pth', '/tmp/bob_test.pth', '/tmp/train_labels.pth', '/tmp/test_labels.pth'] for fn in filenames: if os.path.exists(fn): os.remove(fn) ```
github_jupyter
<a href="https://colab.research.google.com/github/jdilger/tensorflow-notebooks/blob/master/AgroForest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` gpu_info = !nvidia-smi gpu_info = '\n'.join(gpu_info) if gpu_info.find('failed') >= 0: print('Select the Runtime > "Change runtime type" menu to enable a GPU accelerator, ') print('and then re-execute this cell.') else: print(gpu_info) # Use logging to maintain more detailed information for reproducibility import logging def tfLog(): logging.basicConfig(level=logging.DEBUG, filename='myapp.log', format='%(asctime)s %(levelname)s:%(message)s') try: logging.debug('######################################') logging.debug('Config Settings') logging.debug('######################################') logging.debug("Bucket:%s",BUCKET) logging.debug("Folder:%s",FOLDER) logging.debug('Training base:%s',TRAINING_BASE) logging.debug('Eaval base:%s',EVAL_BASE) logging.debug('Band order:%s',BANDS) logging.debug('Response:%s',RESPONSE) logging.debug('Features:%s',FEATURES) logging.debug('Kernal size:%d',KERNEL_SIZE) logging.debug('FEATURES_DICT:%s',FEATURES_DICT) logging.debug('Training size:%d',TRAIN_SIZE) logging.debug('Eval size:%d',EVAL_SIZE) logging.debug('batch size:%d',BATCH_SIZE) logging.debug('Epochs:%d',EPOCHS) logging.debug('Buffer size:%d',BUFFER_SIZE) logging.debug('Optimizer:%s',OPTIMIZER) # logging.debug('Loss:',LOSS) # logging.debug('Other metrics:',METRICS) except Exception as e: print('logging failed') print(e.args) # Cloud authentication. from google.colab import auth auth.authenticate_user() # Import, authenticate and initialize the Earth Engine library. import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() # Tensorflow setup. %tensorflow_version 1.x import tensorflow as tf print(tf.__version__) tf.enable_eager_execution() # INSERT YOUR BUCKET HERE: BUCKET = 'tf-agro-forest' from tensorflow.python.keras import backend # dice coeff and dice loss from Biplov def dice_coeff(y_true, y_pred, smooth=1): y_true_f = backend.flatten(y_true) y_pred_f = backend.flatten(y_pred) intersection = backend.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (backend.sum(y_true_f) + backend.sum(y_pred_f) + smooth) def dice_loss(y_true, y_pred): loss = 1 - dice_coeff(y_true, y_pred) return loss # soft dice loss function from Kel # based on https://arxiv.org/pdf/1707.03237.pdf def dice_loss_soft(y_true, y_pred, smooth=1): intersection = backend.sum(backend.abs(y_true * y_pred), axis=-1) true_sum = backend.sum(backend.square(y_true),-1) pred_sum = backend.sum(backend.square(y_pred),-1) return 1 - ((2. * intersection + smooth) / (true_sum + pred_sum + smooth)) ``` ## Set other global variables ``` from tensorflow.python.keras import metrics # Specify names locations for outputs in Cloud Storage. FOLDER = 'training_data' TRAINING_BASE = 'training_patches_kernalsize_64' EVAL_BASE = 'training_patches_kernalsize_64' # Specify inputs (Landsat bands) to the model and the response variable. BANDS =['red','green','blue','rmed','rmin','rstd','vv','vh'] RESPONSE = ['cocao','forest','seasonalAg' ,'urban','water','banana','savana','orchard','mine','shrubland','sparseTree','bare' ,'grassland','secondaryForest','nodata'] FEATURES = BANDS + RESPONSE # Specify the size and shape of patches expected by the model. KERNEL_SIZE = 64 KERNEL_SHAPE = [KERNEL_SIZE, KERNEL_SIZE] COLUMNS = [ tf.io.FixedLenFeature(shape=KERNEL_SHAPE, dtype=tf.float32) for k in FEATURES ] FEATURES_DICT = dict(zip(FEATURES, COLUMNS)) # Sizes of the training and evaluation datasets. TRAIN_SIZE = 16000 EVAL_SIZE = 3000 # Specify model training parameters. BATCH_SIZE = 1 EPOCHS = 10 BUFFER_SIZE = 7000 OPTIMIZER = 'Adam' LOSS = dice_loss # METRICS = [ # metrics.get('Accuracy'), # dice_coeff,] # og metrics if need to switch back METRICS = [metrics.get('RootMeanSquaredError'), metrics.get('MeanAbsoluteError'), metrics.get('Accuracy'), dice_coeff,] tfLog() !ls !cat myapp.log ``` # Training data Load the data exported from Earth Engine into a `tf.data.Dataset`. The following are helper functions for that. ``` def parse_tfrecord(example_proto): """The parsing function. Read a serialized example into the structure defined by FEATURES_DICT. Args: example_proto: a serialized Example. Returns: A dictionary of tensors, keyed by feature name. """ return tf.io.parse_single_example(example_proto, FEATURES_DICT) def to_tuple(inputs): """Function to convert a dictionary of tensors to a tuple of (inputs, outputs). Turn the tensors returned by parse_tfrecord into a stack in HWC shape. Args: inputs: A dictionary of tensors, keyed by feature name. Returns: A dtuple of (inputs, outputs). """ inputsList = [inputs.get(key) for key in FEATURES] stacked = tf.stack(inputsList, axis=0) # Convert from CHW to HWC stacked = tf.transpose(stacked, [1, 2, 0]) return stacked[:,:,:len(BANDS)], stacked[:,:,len(BANDS):] def get_dataset(pattern,flip=False): """Function to read, parse and format to tuple a set of input tfrecord files. Get all the files matching the pattern, parse and convert to tuple. Args: pattern: A file pattern to match in a Cloud Storage bucket. Returns: A tf.data.Dataset """ glob = tf.gfile.Glob(pattern) dataset = tf.data.TFRecordDataset(glob, compression_type='GZIP') dataset = dataset.map(parse_tfrecord, num_parallel_calls=5) dataset = dataset.map(to_tuple, num_parallel_calls=5) if flip: dataset = dataset.map(transform) return dataset # custom function to randomly augment the data during training # from kels notebooks # adapted with python random rather than tf. Not sure if it works.. import random def transform(features,labels): x = random.random() # flip image on horizontal axis if round(x,2) < 0.12: feat = tf.image.flip_left_right(features) labl = tf.image.flip_left_right(labels) # flip image on vertical axis elif round(x,2) >=0.12 and round(x,2) < 0.24: feat = tf.image.flip_up_down(features) labl = tf.image.flip_up_down(labels) # transpose image on bottom left corner elif round(x,2) >=0.24 and round(x,2) < 0.36: feat = tf.image.flip_left_right(tf.image.flip_up_down(features)) labl = tf.image.flip_left_right(tf.image.flip_up_down(labels)) # rotate to the left 90 degrees elif round(x,2) >=0.36 and round(x,2) < 0.48: feat = tf.image.rot90(features,k=1) labl = tf.image.rot90(labels,k=1) # rotate to the left 180 degrees elif round(x,2) >=0.48 and round(x,2) < 0.60: feat = tf.image.rot90(features,k=2) labl = tf.image.rot90(labels,k=2) # rotate to the left 270 degrees elif round(x,2) >=0.60 and round(x,2) < 0.72: feat = tf.image.rot90(features,k=3) labl = tf.image.rot90(labels,k=3) # transpose image on bottom right corner elif round(x,2) >=0.72 and round(x,2) < 0.84: feat = tf.image.flip_left_right(tf.image.rot90(features,k=2)) labl = tf.image.flip_left_right(tf.image.rot90(labels,k=2)) else: feat = features labl = labels print(x,'I WORK') return feat,labl ``` Use the helpers to read in the training dataset. Print the first record to check. ``` def get_training_dataset(): """Get the preprocessed training dataset Returns: A tf.data.Dataset of training data. """ glob = 'gs://' + BUCKET + '/' + FOLDER + '/' + TRAINING_BASE + '*' dataset = get_dataset(glob,flip=False) dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() return dataset training = get_training_dataset() # print(iter(training.take(1)).next()) ``` # Evaluation data Now do the same thing to get an evaluation dataset. Note that unlike the training dataset, the evaluation dataset has a batch size of 1, is not repeated and is not shuffled. ``` def get_eval_dataset(): """Get the preprocessed evaluation dataset Returns: A tf.data.Dataset of evaluation data. """ glob = 'gs://' + BUCKET + '/' + FOLDER + '/' + EVAL_BASE + '*' dataset = get_dataset(glob) dataset = dataset.batch(1).repeat() return dataset evaluation = get_eval_dataset() ``` # Model Here we use the Keras implementation of the U-Net model as found [in the TensorFlow examples](https://github.com/tensorflow/models/blob/master/samples/outreach/blogs/segmentation_blogpost/image_segmentation.ipynb). The U-Net model takes 256x256 pixel patches as input and outputs per-pixel class probability, label or a continuous output. We can implement the model essentially unmodified, but will use mean squared error loss on the sigmoidal output since we are treating this as a regression problem, rather than a classification problem. Since impervious surface fraction is constrained to [0,1], with many values close to zero or one, a saturating activation function is suitable here. ``` # re orginize model... from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras import metrics from tensorflow.python.keras import optimizers from tensorflow.keras.utils import plot_model # model = models.Sequential() visible = layers.Input(shape=[None, None, len(BANDS)]) # from stackexchange enocded_imag = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(visible) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) enocded_imag = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(visible) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) enocded_imag = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(enocded_imag) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) enocded_imag = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(enocded_imag) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) decoded_imag = layers.Conv2D(64, (2, 2), activation='relu', padding='same')(enocded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(decoded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(decoded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(decoded_imag) # decoded_imag = layers.Dropout(0.2)(decoded_imag) outBranch = layers.Conv2D(128, 3, activation='relu', padding='same',name="out_block_conv1")(decoded_imag) outBranch = layers.SpatialDropout2D(rate=0.2,seed=1,name="out_block_spatialdrop")(outBranch) outBranch = layers.BatchNormalization(name="out_block_batchnorm1")(outBranch) outBranch = layers.Conv2D(len(RESPONSE), (1, 1), activation='relu')(outBranch) outputs = layers.Activation("softmax")(outBranch) model = models.Model(inputs=visible, outputs=outputs) # summarize layers print(model.summary()) # plot graph plot_model(model, to_file='convolutional_neural_network.png') # re orginize model...for 8x8 patch from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras import metrics from tensorflow.python.keras import optimizers from tensorflow.keras.utils import plot_model # model = models.Sequential() visible = layers.Input(shape=[None, None, len(BANDS)]) # from stackexchange enocded_imag = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(visible) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) decoded_imag = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(enocded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Dropout(0.2)(decoded_imag) # outBranch = layers.BatchNormalization(name="out_block_batchnorm1")(decoded_imag) outBranch = layers.Conv2D(len(RESPONSE), (1, 1), activation='relu')(decoded_imag) outputs = layers.Activation("softmax")(outBranch) model = models.Model(inputs=visible, outputs=outputs) # summarize layers print(model.summary()) # plot graph plot_model(model, to_file='convolutional_neural_network.png') # trying new cnn>dnn not working well... from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras import metrics from tensorflow.python.keras import optimizers from tensorflow.keras.utils import plot_model model = models.Sequential() visible = layers.Input(shape=[None, None, len(BANDS)]) conv1 = layers.Conv2D(32, kernel_size=4, padding='same',activation='relu')(visible) pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = layers.Conv2D(16, kernel_size=4, padding='same',activation='relu')(pool1) pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2) flat = layers.GlobalMaxPooling2D()(pool2) # flat = layers.Flatten()(pool2) hidden1 = layers.Dense(128,activation='relu')(flat) output = layers.Conv1D(len(RESPONSE), activation='softmax')(hidden1) # outBranch = layers.Conv2D(len(RESPONSE), (1, 1), activation='sigmoid')(flat) # outputs = layers.Activation("softmax")(outBranch) model = models.Model(inputs=visible, outputs=output) # summarize layers print(model.summary()) # plot graph plot_model(model, to_file='convolutional_neural_network.png') from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras import metrics from tensorflow.python.keras import optimizers from tensorflow.keras.utils import plot_model # model = models.Sequential() visible = layers.Input(shape=[None, None, len(BANDS)]) # from stackexchange enocded_imag = layers.Conv2D(64, (7, 7), activation='relu', padding='same')(visible) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) enocded_imag = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(visible) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) enocded_imag = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(enocded_imag) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) enocded_imag = layers.Conv2D(8, (3, 3), activation='relu', padding='same')(enocded_imag) enocded_imag = layers.MaxPooling2D((2, 2), padding='same')(enocded_imag) decoded_imag = layers.Conv2D(8, (2, 2), activation='relu', padding='same')(enocded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Conv2D(16, (3, 3), activation='relu', padding='same')(decoded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Conv2D(32, (3, 3), activation='relu', padding='same')(decoded_imag) decoded_imag = layers.UpSampling2D((2, 2),interpolation='bilinear')(decoded_imag) decoded_imag = layers.BatchNormalization()(decoded_imag) decoded_imag = layers.Conv2D(64, (3, 3), activation='relu', padding='same')(decoded_imag) # decoded_imag = layers.Dropout(0.2)(decoded_imag) outBranch = layers.Conv2D(128, 3, activation='relu', padding='same',name="out_block_conv1")(decoded_imag) outBranch = layers.SpatialDropout2D(rate=0.2,seed=1,name="out_block_spatialdrop")(outBranch) outBranch = layers.BatchNormalization(name="out_block_batchnorm1")(outBranch) outBranch = layers.Conv2D(len(RESPONSE), (1, 1), activation='relu')(outBranch) outputs = layers.Activation("softmax")(outBranch) model = models.Model(inputs=visible, outputs=outputs) # summarize layers print(model.summary()) # plot graph plot_model(model, to_file='convolutional_neural_network.png') # trying new cnn>dnn not working well model.compile( optimizer=optimizers.Adam(lr=0.001), loss=dice_loss_soft,#losses.get('categorical_crossentropy'),#dice_loss_soft,#losses.get('categorical_crossentropy'), #losses.get(LOSS),dice_loss_soft # binaryxentro works in both loss and metics # metrics.CategoricalAccuracy() metrics=[metrics.get('CategoricalAccuracy')])# dice_coef,[metrics.get(metric) for metric in METRICS]) #[metrics.get('CategoricalAccuracy')]) m = model MODEL_FOLDER = 'smalldicelosssoft' history = m.fit( x=training, epochs=EPOCHS, steps_per_epoch=int(TRAIN_SIZE / BATCH_SIZE), validation_data=evaluation, validation_steps=EVAL_SIZE) modelDir = 'gs://{}/{}/{}'.format(BUCKET,FOLDER,MODEL_FOLDER) # tf.contrib.saved_model.save_keras_model(m, modelDir) # TODO: add something to move log to model folder # tfLog() modelDir = 'gs://{}/{}/{}'.format(BUCKET,FOLDER,MODEL_FOLDER+'_2') tf.contrib.saved_model.save_keras_model(m, modelDir) # TODO: add something to move log to model folder # tfLog() # plot the results of model training # get numpy and matplotlib.pyplot # from kels notebooks %pylab inline fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(10,5.5)) ax[0].plot(history.history['loss'],color='#1f77b4',label='Training Loss') ax[0].plot(history.history['val_loss'],linestyle=':',marker='o',markersize=3,color='#1f77b4',label='Validation Loss') ax[0].set_ylabel('Loss') ax[0].set_ylim(0.0,0.4) ax[0].legend() ax[1].plot(history.history['categorical_accuracy'],color='#ff7f0e',label='Training Acc.') ax[1].plot(history.history['val_categorical_accuracy'],linestyle=':',marker='o',markersize=3,color='#ff7f0e',label='Validation Acc.') ax[1].set_ylabel('Accuracy') ax[1].set_xlabel('Epoch') ax[1].legend(loc="lower right") ax[1].set_xticks(history.epoch) ax[1].set_xticklabels(range(1,len(history.epoch)+1)) ax[1].set_xlabel('Epoch') ax[1].set_ylim(0.0,1) plt.legend() # plt.savefig("/content/drive/My Drive/landsat_qa_samples/training.png",dpi=300,) plt.show() # og gee tf fcnn from tensorflow.python.keras import layers from tensorflow.python.keras import losses from tensorflow.python.keras import models from tensorflow.python.keras import metrics from tensorflow.python.keras import optimizers def conv_block(input_tensor, num_filters): encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(input_tensor) encoder = layers.BatchNormalization()(encoder) encoder = layers.Activation('relu')(encoder) encoder = layers.Conv2D(num_filters, (3, 3), padding='same')(encoder) encoder = layers.BatchNormalization()(encoder) encoder = layers.Activation('relu')(encoder) return encoder def encoder_block(input_tensor, num_filters): encoder = conv_block(input_tensor, num_filters) encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder) return encoder_pool, encoder def decoder_block(input_tensor, concat_tensor, num_filters): decoder = layers.Conv2DTranspose(num_filters, (2, 2), strides=(2, 2), padding='same')(input_tensor) decoder = layers.concatenate([concat_tensor, decoder], axis=-1) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder) decoder = layers.BatchNormalization()(decoder) decoder = layers.Activation('relu')(decoder) return decoder def get_model(): inputs = layers.Input(shape=[None, None, len(BANDS)]) # 256 encoder0_pool, encoder0 = encoder_block(inputs, 32) # 128 encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64) # 64 encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128) # 32 encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256) # 16 encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512) # 8 center = conv_block(encoder4_pool, 1024) # center decoder4 = decoder_block(center, encoder4, 512) # 16 decoder3 = decoder_block(decoder4, encoder3, 256) # 32 decoder2 = decoder_block(decoder3, encoder2, 128) # 64 decoder1 = decoder_block(decoder2, encoder1, 64) # 128 decoder0 = decoder_block(decoder1, encoder0, 32) # 256 outBranch = layers.Conv2D(len(RESPONSE), (1, 1), activation='relu')(decoder0) outputs = layers.Activation("softmax")(outBranch) model = models.Model(inputs=[inputs], outputs=[outputs]) #og compile model.compile( optimizer=optimizers.get(OPTIMIZER), loss=losses.get(LOSS), metrics=[metrics.get('CategoricalAccuracy')])#metrics=[metrics.get(metric) for metric in METRICS]) return model m = get_model() MODEL_FOLDER = 'testsoftmaxmeh' m.fit( x=training, epochs=EPOCHS, steps_per_epoch=int(TRAIN_SIZE / BATCH_SIZE), validation_data=evaluation, validation_steps=EVAL_SIZE) modelDir = 'gs://{}/{}/{}'.format(BUCKET,FOLDER,MODEL_FOLDER) tf.contrib.saved_model.save_keras_model(m, modelDir) # TODO: add something to move log to model folder tfLog() ``` # Training the model You train a Keras model by calling `.fit()` on it. Here we're going to train for 10 epochs, which is suitable for demonstration purposes. For production use, you probably want to optimize this parameter, for example through [hyperparamter tuning](https://cloud.google.com/ml-engine/docs/tensorflow/using-hyperparameter-tuning). Note that the notebook VM is sometimes not heavy-duty enough to get through a whole training job, especially if you have a large buffer size or a large number of epochs. You can still use this notebook for training, but may need to set up an alternative VM ([learn more](https://research.google.com/colaboratory/local-runtimes.html)) for production use. Alternatively, you can package your code for running large training jobs on Google's AI Platform [as described here](https://cloud.google.com/ml-engine/docs/tensorflow/trainer-considerations). The following code loads a pre-trained model, which you can use for predictions right away. ``` from tensorflow.python.tools import saved_model_utils # # modelDir = 'gs://{}/{}/model-dice-256'.format('ee-tf',FOLDER) meta_graph_def = saved_model_utils.get_meta_graph_def(modelDir, 'serve') inputs = meta_graph_def.signature_def['serving_default'].inputs outputs = meta_graph_def.signature_def['serving_default'].outputs # Just get the first thing(s) from the serving signature def. i.e. this # model only has a single input and a single output. input_name = None for k,v in inputs.items(): input_name = v.name break output_name = None for k,v in outputs.items(): output_name = v.name break # Make a dictionary that maps Earth Engine outputs and inputs to # AI Platform inputs and outputs, respectively. import json input_dict = "'" + json.dumps({input_name: "array"}) + "'" output_dict = "'" + json.dumps({output_name: "class"}) + "'" # Put the EEified model next to the trained model directory. # TODO: add eeidied dir, project into to log, add output name EEIFIED_DIR = '{}/eeified'.format(modelDir) PROJECT = 'john-ee-282116' print(input_dict,output_dict) # You need to set the project before using the model prepare command. !earthengine set_project {PROJECT} !earthengine model prepare --source_dir {modelDir} --dest_dir {EEIFIED_DIR} --input {input_dict} --output {output_dict} modelDir import time MODEL_NAME = 'tf256_small2large_2ai' VERSION_NAME = 'v' + str(int(time.time())) print('Creating version: ' + VERSION_NAME) !gcloud ai-platform models create {MODEL_NAME} --project {PROJECT} !gcloud ai-platform versions create {VERSION_NAME} \ --project {PROJECT} \ --model {MODEL_NAME} \ --origin {EEIFIED_DIR} \ --runtime-version=1.14 \ --framework "TENSORFLOW" \ --python-version=3.5 # Load a trained model. MODEL_DIR = 'gs://ee-tf/tahoe-ogfw-02292020/model-ogwf-256' m = tf.contrib.saved_model.load_keras_model(MODEL_DIR) help(m.summary()) ```
github_jupyter
#### TFIDF ``` # loading libraries import pandas as pd from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("english") import nltk import re from sklearn.cluster import SpectralClustering from sklearn.metrics import silhouette_score from sklearn.cluster import KMeans from sklearn.model_selection import GridSearchCV from collections import Counter import ast # importing data ted_main = pd.read_csv('ted_main.csv') ted_main['tags'] = ted_main['tags'].apply(lambda x: ast.literal_eval(x)) transcripts = pd.read_csv('transcripts.csv') ted_merged = pd.merge(left=transcripts, right=ted_main, left_on='url', right_on='url') transcript = ted_merged.transcript def tokenize(text): tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)] filtered_tokens = [] for token in tokens: if re.search('[a-zA-Z]', token): filtered_tokens.append(token) # stems = [stemmer.stem(t) for t in filtered_tokens] return filtered_tokens doc = transcript.tolist() from sklearn.feature_extraction.text import TfidfVectorizer tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.2, stop_words='english', use_idf=True, tokenizer=tokenize, ngram_range=(1,3)) %time tfidf_matrix = tfidf_vectorizer.fit_transform(doc) #fit the vectorizer to synopses print(tfidf_matrix.shape) ``` #### Spectural Clustering ``` n_cluster = range(2,11) best_param = [] list_score = [] for n in n_cluster: model = SpectralClustering(n_clusters=n) model.fit(tfidf_matrix) label = model.labels_ list_score.append(silhouette_score(tfidf_matrix, label)) list_score = np.array(list_score) best_param.append(n_cluster[list_score.argmax()]) print(best_param) model = SpectralClustering(n_clusters=8) model.fit(tfidf_matrix) label = model.labels_ clusters = label.tolist() Counter(clusters) ``` #### KMeans Clustering ``` n_cluster = list(range(2,11)) param_grid = {'n_clusters': n_cluster} kmeans = KMeans() kmeans_cv = GridSearchCV(kmeans, param_grid) kmeans_cv.fit(tfidf_matrix) print("Tuned Kmeans Parameter: {}".format(kmeans_cv.best_params_)) km_model = KMeans(n_clusters=8) km_model.fit(tfidf_matrix) km_label = km_model.labels_ km_clusters = km_label.tolist() Counter(km_clusters) import warnings warnings.filterwarnings("ignore") ted_merged['cluster'] = clusters ted_w_cluster = ted_merged[['title','transcript','tags','cluster']] ted_w_cluster[ted_w_cluster['cluster']==7][:50] ted_w_cluster c0_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 0]['tags'].tolist() for item in sub_list] c1_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 1]['tags'].tolist() for item in sub_list] c2_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 2]['tags'].tolist() for item in sub_list] c3_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 3]['tags'].tolist() for item in sub_list] c4_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 4]['tags'].tolist() for item in sub_list] c5_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 5]['tags'].tolist() for item in sub_list] c6_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 6]['tags'].tolist() for item in sub_list] c7_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 7]['tags'].tolist() for item in sub_list] # c8_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 8]['tags'].tolist() for item in sub_list] # c9_tag = [item for sub_list in ted_w_cluster[ted_w_cluster.cluster == 9]['tags'].tolist() for item in sub_list] c0_tag_stat = pd.Series(Counter(c0_tag)) c1_tag_stat = pd.Series(Counter(c1_tag)) c2_tag_stat = pd.Series(Counter(c2_tag)) c3_tag_stat = pd.Series(Counter(c3_tag)) c4_tag_stat = pd.Series(Counter(c4_tag)) c5_tag_stat = pd.Series(Counter(c5_tag)) c6_tag_stat = pd.Series(Counter(c6_tag)) c7_tag_stat = pd.Series(Counter(c7_tag)) # c8_tag_stat = pd.Series(Counter(c8_tag)) # c9_tag_stat = pd.Series(Counter(c9_tag)) print(c0_tag_stat.nlargest(10)) print ("") print (c1_tag_stat.nlargest(10)) print ("") print (c2_tag_stat.nlargest(10)) print ("") print (c3_tag_stat.nlargest(10)) print ("") print (c4_tag_stat.nlargest(10)) print ("") print (c5_tag_stat.nlargest(10)) print ("") print(c6_tag_stat.nlargest(10)) print ("") print(c7_tag_stat.nlargest(10)) print ("") # print(c8_tag_stat.nlargest(10)) # print ("") # print(c9_tag_stat.nlargest(10)) # print ("") ```
github_jupyter
# Amazon Augmented AI(A2I) Integrated with AWS Marketplace ML Models Sometimes, for some payloads, machine learning (ML) model predictions are just not confident enough and you want more than a machine. Furthermore, training a model can be complicated, time-consuming, and expensive. This is where [AWS Marketplace](https://aws.amazon.com/marketplace/b/6297422012?page=1&filters=FulfillmentOptionType&FulfillmentOptionType=SageMaker&ref_=sa_campaign_pbrao) and [Amazon Augmented AI](https://aws.amazon.com/augmented-ai/) (Amazon A2I) come in. By combining a pretrained ML model in AWS Marketplace with Amazon Augmented AI, you can quickly reap the benefits of pretrained models with validating and augmenting the model's accuracy with human intelligence. AWS Marketplace contains over 400 pretrained ML models. Some models are general purpose. For example, the [GluonCV SSD Object Detector](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?qid=1605041213915&sr=0-5&ref_=sa_campaign_pbrao) can detect objects in an image and place bounding boxes around the objects. AWS Marketplace also offers many purpose-built models such as a [Background Noise Classifier](https://aws.amazon.com/marketplace/pp/prodview-vpd6qdjm4d7u4?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao), a [Hard Hat Detector for Worker Safety](https://aws.amazon.com/marketplace/pp/prodview-jd5tj2egpxxum?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao), and a [Person in Water](https://aws.amazon.com/marketplace/pp/prodview-wlndemzv5pxhw?applicationId=AWS-Sagemaker-Console&ref_=sa_campaign_pbrao). Amazon A2I provides a human-in-loop workflow to review ML predictions. Its configurable human-review workflow solution and customizable user-review console enable you to focus on ML tasks and increase the accuracy of the predictions with human input. ## Overview In this notebook, you will use a pre-trained AWS Marketplace machine learning model with Amazon A2I to detect images as well as trigger a human-in-loop workflow to review, update and add additional labeled objects to an individual image. Furthermore, you can specify configurable threshold criteria for triggering the human-in-loop workflow in Amazon A2I. For example, you can trigger a human-in-loop workflow if there are no objects that are detected with an accuracy of 90% or greater. The following diagram shows the AWS services that are used in this notebook and the steps that you will perform. Here are the high level steps in this notebook: 1. Configure the human-in-loop review using Amazon A2I 1. Select, deploy, and invoke an AWS Marketplace ML model 1. Trigger the human review workflow in Amazon A2I. 1. The private workforce that was created in Amazon SageMaker Ground Truth reviews and edits the objects detected in the image. <img style="float: center;" src="./img/a2i_diagram.png" width="700" height="500"> ## Contents * [Prerequisites](#Prerequisites) * [Step 1 Configure Amazon A2I service](#step1) * [Step 1.1 Creating human review Workteam or Workforce](#step1_1) * [Step 1.2 Create Human Task UI](#step1_2) * [Step 1.3 Create the Flow Definition](#step1_3) * [Step 2 Deploy and invoke AWS Marketplace model](#step2) * [Step 2.1 Create an endpoint](#step2_1) * [Step 2.2 Create input payload](#step2_2) * [Step 2.3 Perform real-time inference](#step2_3) * [Step3 Starting Human Loops](#step3) * [Step 3.1 View Task Results](#step3_1) * [Step 4 Next steps](#step4) * [Step 4.1 Additional resources](#step4_1) * [Step 5 Cleanup Resources](#step5) ### Usage instructions You can run this notebook one cell at a time (By using Shift+Enter for running a cell). ## Prerequisites <a class="anchor" id="prerequisites"></a> This sample notebook requires a subscription to **[GluonCV SSD Object Detector](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?ref_=sa_campaign_pbrao)**, a pre-trained machine learning model package from AWS Marketplace. If your AWS account has not been subscribed to this listing, here is the process you can follow: 1. Open the [listing](https://aws.amazon.com/marketplace/pp/prodview-ggbuxlnrm2lh4?ref_=sa_campaign_pbrao) from AWS Marketplace 1. Read the Highlights section and then product overview section of the listing. 1. View usage information and then additional resources. 1. Note the supported instance types. 1. Next, click on **Continue to subscribe.** 1. Review End-user license agreement, support terms, as well as pricing information. 1. The **Accept Offer** button needs to be selected if your organization agrees with EULA, pricing information as well as support terms. If the Continue to configuration button is active, it means your account already has a subscription to this listing. Once you select the **Continue to configuration** button and then choose **region**, you will see that a Product Arn will appear. This is the **model package ARN** that you need to specify in the following cell. ``` model_package_arn = "arn:aws:sagemaker:us-east-1:865070037744:model-package/gluoncv-ssd-resnet501547760463-0f9e6796d2438a1d64bb9b15aac57bc0" # Update as needed ``` 8. This notebook requires the IAM role associated with this notebook to have *AmazonSageMakerFullAccess* IAM permission. 8. Note: If you want to run this notebook on AWS SageMaker Studio - please use Classic Jupyter mode to be able correctly render visualization. Pick instance type **'ml.m4.xlarge'** or larger. Set kernel to **'Data Science'**. <img style="float: left;" src="./img/classicjupyter.png"> ### Installing Dependencies Import the libraries that are needed for this notebook. ``` # Import necessary libraries import boto3 import json import pandas as pd import pprint import requests import sagemaker import shutil import time import uuid import PIL.Image from IPython.display import Image from IPython.display import Markdown as md from sagemaker import get_execution_role from sagemaker import ModelPackage ``` #### Setup Variables, Bucket and Paths ``` # Setting Role to the default SageMaker Execution Role role = get_execution_role() # Instantiate the SageMaker session and client that will be used throughout the notebook sagemaker_session = sagemaker.Session() sagemaker_client = sagemaker_session.sagemaker_client # Fetch the region region = sagemaker_session.boto_region_name # Create S3 and A2I clients s3 = boto3.client("s3", region) a2i = boto3.client("sagemaker-a2i-runtime", region) # Retrieve the current timestamp timestamp = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime()) # endpoint_name = '<ENDPOINT_NAME>' endpoint_name = "gluoncv-object-detector" # content_type='<CONTENT_TYPE>' content_type = "image/jpeg" # Instance size type to be used for making real-time predictions real_time_inference_instance_type = "ml.m4.xlarge" # Task UI name - this value is unique per account and region. You can also provide your own value here. # task_ui_name = '<TASK_UI_NAME>' task_ui_name = "ui-aws-marketplace-gluon-model-" + timestamp # Flow definition name - this value is unique per account and region. You can also provide your own value here. flow_definition_name = "fd-aws-marketplace-gluon-model-" + timestamp # Name of the image file that will be used in object detection image_file_name = "image.jpg" # Create the sub-directory in the default S3 bucket # that will store the results of the human-in-loop A2I review bucket = sagemaker_session.default_bucket() key = "a2i-results" s3.put_object(Bucket=bucket, Key=(key + "/")) output_path = f"s3://{bucket}/a2i-results" print(f"Results of A2I will be stored in {output_path}.") ``` ## Step 1 Configure Amazon A2I service<a class="anchor" id="step1"></a> In this section, you will create 3 resources: 1. Private workforce 2. Human-in-loop Console UI 3. Workflow definition ### Step 1.1 Creating human review Workteam or Workforce <a class="anchor" id="step1_1"></a> If you have already created a private work team, replace <WORKTEAM_ARN> with the ARN of your work team. If you have never created a private work team, use the instructions below to create one. To learn more about using and managing private work teams, see [Use a Private Workforce](https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-private.html)). 1. In the Amazon SageMaker console in the left sidebar under the Ground Truth heading, open the **Labeling Workforces**. 1. Choose **Private**, and then choose **Create private team**. 1. If you are a new user of SageMaker workforces, it is recommended you select **Create a private work team with AWS Cognito**. 1. For team name, enter "MyTeam". 1. To add workers by email, select **Invite new workers by email** and paste or type a list of up to 50 email addresses, separated by commas, into the email addresses box. If you are following this notebook, specify an email account that you have access to. The system sends an invitation email, which allows users to authenticate and set up their profile for performing human-in-loop review. 1. Enter an organization name - this will be used to customize emails sent to your workers. 1. For contact email, enter an email address you have access to. 1. Select **Create private team**. This will bring you back to the Private tab under labeling workforces, where you can view and manage your private teams and workers. ### **IMPORTANT: After you have created your workteam, from the Team summary section copy the value of the ARN and uncomment and replace `<WORKTEAM_ARN>` below:** ``` # workteam_arn = '<WORKTEAM_ARN>' ``` ### Step 1.2 Create Human Task UI <a class="anchor" id="step1_2"></a> Create a human task UI resource, giving a UI template in liquid HTML. This template will be rendered to the human workers whenever human loop is required. For additional UI templates, check out this repository: https://github.com/aws-samples/amazon-a2i-sample-task-uis. You will be using a slightly modified version of the [object detection UI](https://github.com/aws-samples/amazon-a2i-sample-task-uis/blob/master/images/bounding-box.liquid.html) that provides support for the `initial-value` and `labels` variables in the template. ``` # Create task UI # Read in the template from a local file template = open("./src/worker-task-template.html").read() human_task_ui_response = sagemaker_client.create_human_task_ui( HumanTaskUiName=task_ui_name, UiTemplate={"Content": template} ) human_task_ui_arn = human_task_ui_response["HumanTaskUiArn"] print(human_task_ui_arn) ``` ### Step 1.3 Create the Flow Definition <a class="anchor" id="step1_3"></a> In this section, you will create a flow definition. Flow Definitions allow you to specify: * The workforce that your tasks will be sent to. * The instructions that your workforce will receive. This is called a worker task template. * The configuration of your worker tasks, including the number of workers that receive a task and time limits to complete tasks. * Where your output data will be stored. For more details and instructions, see: https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-create-flow-definition.html. ``` create_workflow_definition_response = sagemaker_client.create_flow_definition( FlowDefinitionName=flow_definition_name, RoleArn=role, HumanLoopConfig={ "WorkteamArn": workteam_arn, "HumanTaskUiArn": human_task_ui_arn, "TaskCount": 1, "TaskDescription": "Identify and locate the object in an image.", "TaskTitle": "Object detection Amazon A2I demo", }, OutputConfig={"S3OutputPath": output_path}, ) flow_definition_arn = create_workflow_definition_response[ "FlowDefinitionArn" ] # let's save this ARN for future use %%time # Describe flow definition - status should be active for x in range(60): describe_flow_definition_response = sagemaker_client.describe_flow_definition( FlowDefinitionName=flow_definition_name ) print(describe_flow_definition_response["FlowDefinitionStatus"]) if describe_flow_definition_response["FlowDefinitionStatus"] == "Active": print("Flow Definition is active") break time.sleep(2) ``` ## Step 2 Deploy and invoke AWS Marketplace model <a class="anchor" id="step2"></a> In this section, you will stand up an Amazon SageMaker endpoint. Each endpoint must have a unique name which you can use for performing inference. ### Step 2.1 Create an Endpoint <a class="anchor" id="step2_1"></a> ``` %%time # Create a deployable model from the model package. model = ModelPackage( role=role, model_package_arn=model_package_arn, sagemaker_session=sagemaker_session, predictor_cls=sagemaker.predictor.Predictor, ) # Deploy the model predictor = model.deploy( initial_instance_count=1, instance_type=real_time_inference_instance_type, endpoint_name=endpoint_name, ) ``` It will take anywhere between 5 to 10 minutes to create the endpoint. Once the endpoint has been created, you would be able to perform real-time inference. ### Step 2.2 Create input payload <a class="anchor" id="step2_2"></a> In this step, you will prepare a payload to perform a prediction. ``` # Download the image file # Open the url image, set stream to True, this will return the stream content. r = requests.get("https://images.pexels.com/photos/763398/pexels-photo-763398.jpeg", stream=True) # Open a local file with wb ( write binary ) permission to save it locally. with open(image_file_name, "wb") as f: shutil.copyfileobj(r.raw, f) ``` Resize the image and upload the file to S3 so that the image can be referenced from the worker console UI. ``` # Load the image image = PIL.Image.open(image_file_name) # Resize the image resized_image = image.resize((600, 400)) # Save the resized image file locally resized_image.save(image_file_name) # Save file to S3 s3 = boto3.client("s3") with open(image_file_name, "rb") as f: s3.upload_fileobj(f, bucket, image_file_name) # Display the image from IPython.core.display import Image, display Image(filename=image_file_name, width=600, height=400) ``` ### Step 2.3 Perform real-time inference <a class="anchor" id="step2_3"></a> Submit the image file to the model and it will detect the objects in the image. ``` with open(image_file_name, "rb") as f: payload = f.read() response = sagemaker_session.sagemaker_runtime_client.invoke_endpoint( EndpointName=endpoint_name, ContentType=content_type, Accept="json", Body=payload ) result = json.loads(response["Body"].read().decode()) # Convert list to JSON json_result = json.dumps(result) df = pd.read_json(json_result) # Display confidence scores < 0.90 df = df[df.score < 0.90] print(df.head()) ``` ## Step 3 Starting Human Loops <a class="anchor" id="step3"></a> In a previous step, you have already submitted your image to the model for prediction and stored the output in JSON format in the `result` variable. You simply need to modify the X, Y coordinates of the bounding boxes. Additionally, you can filter out all predictions that are less than 90% accurate before submitting it to your human-in-loop review. This will insure that your model's predictions are highly accurate and any additional detections of objects will be made by a human. ``` # Helper function to update X,Y coordinates and labels for the bounding boxes def fix_boundingboxes(prediction_results, threshold=0.8): bounding_boxes = [] labels = set() for data in prediction_results: label = data["id"] labels.add(label) if data["score"] > threshold: width = data["right"] - data["left"] height = data["bottom"] - data["top"] top = data["top"] left = data["left"] bounding_boxes.append( {"height": height, "width": width, "top": top, "left": left, "label": label} ) return bounding_boxes, list(labels) bounding_boxes, labels = fix_boundingboxes(result, threshold=0.9) # Define the content that is passed into the human-in-loop workflow and console human_loop_name = str(uuid.uuid4()) input_content = { "initialValue": bounding_boxes, # the bounding box values that have been detected by model prediction "taskObject": f"s3://{bucket}/" + image_file_name, # the s3 object will be passed to the worker task UI to render "labels": labels, # the labels that are displayed in the legend } # Trigger the human-in-loop workflow start_loop_response = a2i.start_human_loop( HumanLoopName=human_loop_name, FlowDefinitionArn=flow_definition_arn, HumanLoopInput={"InputContent": json.dumps(input_content)}, ) ``` Now that the human-in-loop review has been triggered, you can log into the worker console to work on the task and make edits and additions to the object detection bounding boxes from the image. ``` # Fetch the URL for the worker console UI workteam_name = workteam_arn.split("/")[-1] my_workteam = sagemaker_session.sagemaker_client.list_workteams(NameContains=workteam_name) worker_console_url = "https://" + my_workteam["Workteams"][0]["SubDomain"] md( "### Click on the [Worker Console]({}) to begin reviewing the object detection".format( worker_console_url ) ) ``` The below image shows the objects that were detected for the sample image that was used in this notebook by your model and displayed in the worker console. <img src='./img/rain_biker_bb.png' align='center' height=600 width=800/> You can now make edits to the image to detect other objects. For example, in the image above, the model failed to detect the bicycle in the foreground with an accuracy of 90% or greater. However, as a human reviewer, you can clearly see the bicycle and can make a bounding box around it. Once you have finished with your edits, you can submit the result. ### Step 3.1 View Task Results <a class="anchor" id="step3_1"></a> Once work is completed, Amazon A2I stores results in your S3 bucket and sends a Cloudwatch event. Your results should be available in the S3 `output_path` that you specified when all work is completed. Note that the human answer, the label and the bounding box, is returned and saved in the JSON file. **NOTE: You must edit/submit the image in the Worker console so that its status is `Completed`.** ``` # Fetch the details about the human loop review in order to locate the JSON output on S3 resp = a2i.describe_human_loop(HumanLoopName=human_loop_name) # Wait for the human-in-loop review to be completed while True: resp = a2i.describe_human_loop(HumanLoopName=human_loop_name) print("-", sep="", end="", flush=True) if resp["HumanLoopStatus"] == "Completed": print("!") break time.sleep(2) ``` Once its status is `Completed`, you can execute the below cell to view the JSON output that is stored in S3. Under `annotatedResult`, any new bounding boxes will be included along with those that the model predicted, will be included. To learn more about the output data schema, please refer to the documentation about [Output Data From Custom Task Types](https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-output-data.html#sms-output-data-custom). ``` # Once the image has been submitted, display the JSON output that was sent to S3 bucket, key = resp["HumanLoopOutput"]["OutputS3Uri"].replace("s3://", "").split("/", 1) response = s3.get_object(Bucket=bucket, Key=key) content = response["Body"].read() json_output = json.loads(content) print(json.dumps(json_output, indent=1)) ``` ## Step 4 Next Steps <a class="anchor" id="step4"></a> ### Step 4.1 Additional Resources <a class="anchor" id="step4_1"></a> * You can explore additional machine learning models in [AWS Marketplace - Machine Learning](https://aws.amazon.com/marketplace/b/c3714653-8485-4e34-b35b-82c2203e81c1?page=1&filters=FulfillmentOptionType&FulfillmentOptionType=SageMaker&ref_=sa_campaign_pbrao). * Learn more about [Amazon Augmented AI](https://aws.amazon.com/augmented-ai/) * Other AWS blogs that may be of interest are: * [Using AWS Marketplace for machine learning workloads](https://aws.amazon.com/blogs/awsmarketplace/using-aws-marketplace-for-machine-learning-workloads/) * [Adding AI to your applications with ready-to-use models from AWS Marketplace](https://aws.amazon.com/blogs/machine-learning/adding-ai-to-your-applications-with-ready-to-use-models-from-aws-marketplace/) * [Building an end-to-end intelligent document processing solution using AWS](https://aws.amazon.com/blogs/machine-learning/building-an-end-to-end-intelligent-document-processing-solution-using-aws/) ## Step 5 Clean up resources <a class="anchor" id="step5"></a> In order to clean up the resources from this notebook,simply execute the below cells. ``` # Delete Workflow definition sagemaker_client.delete_flow_definition(FlowDefinitionName=flow_definition_name) # Delete Human Task UI sagemaker_client.delete_human_task_ui(HumanTaskUiName=task_ui_name) # Delete Endpoint predictor.delete_endpoint() # Delete Model predictor.delete_model() ``` #### Cancel AWS Marketplace subscription (Optional) Finally, if you subscribed to AWS Marketplace model for an experiment and would like to unsubscribe, you can follow the steps below. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model. **Steps to unsubscribe from the product on AWS Marketplace:** Navigate to Machine Learning tab on Your [Software subscriptions page](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=lbr_tab_ml). Locate the listing that you would need to cancel, and click Cancel Subscription.
github_jupyter
``` ### Load necessary libraries ### import glob import os import librosa import numpy as np from sklearn.model_selection import KFold from sklearn.metrics import accuracy_score import tensorflow as tf from tensorflow import keras ### Define helper functions ### def extract_features(parent_dir,sub_dirs,file_ext="*.wav", bands=60,frames=41): def _windows(data, window_size): start = 0 while start < len(data): yield int(start), int(start + window_size) start += (window_size // 2) window_size = 512 * (frames - 1) features, labels = [], [] for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)): segment_log_specgrams, segment_labels = [], [] sound_clip,sr = librosa.load(fn) label = int(fn.split('/')[2].split('-')[1]) for (start,end) in _windows(sound_clip,window_size): if(len(sound_clip[start:end]) == window_size): signal = sound_clip[start:end] melspec = librosa.feature.melspectrogram(signal,n_mels=bands) logspec = librosa.amplitude_to_db(melspec) logspec = logspec.T.flatten()[:, np.newaxis].T segment_log_specgrams.append(logspec) segment_labels.append(label) segment_log_specgrams = np.asarray(segment_log_specgrams).reshape( len(segment_log_specgrams),bands,frames,1) segment_features = np.concatenate((segment_log_specgrams, np.zeros( np.shape(segment_log_specgrams))), axis=3) for i in range(len(segment_features)): segment_features[i, :, :, 1] = librosa.feature.delta( segment_features[i, :, :, 0]) if len(segment_features) > 0: # check for empty segments features.append(segment_features) labels.append(segment_labels) return features, labels # Pre-process and extract feature from the data parent_dir = 'UrbanSounds8K/audio/' save_dir = "UrbanSounds8K/processed/" folds = sub_dirs = np.array(['fold1','fold2','fold3','fold4', 'fold5','fold6','fold7','fold8', 'fold9','fold10']) for sub_dir in sub_dirs: features, labels = extract_features(parent_dir,sub_dir) np.savez("{0}{1}".format(save_dir, sub_dir), features=features, labels=labels) ### Define convolutional network architecture ### def get_network(): num_filters = [24,32,64,128] pool_size = (2, 2) kernel_size = (3, 3) input_shape = (60, 41, 2) num_classes = 10 keras.backend.clear_session() model = keras.models.Sequential() model.add(keras.layers.Conv2D(24, kernel_size, padding="same", input_shape=input_shape)) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation("relu")) model.add(keras.layers.MaxPooling2D(pool_size=pool_size)) model.add(keras.layers.Conv2D(32, kernel_size, padding="same")) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation("relu")) model.add(keras.layers.MaxPooling2D(pool_size=pool_size)) model.add(keras.layers.Conv2D(64, kernel_size, padding="same")) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation("relu")) model.add(keras.layers.MaxPooling2D(pool_size=pool_size)) model.add(keras.layers.Conv2D(128, kernel_size, padding="same")) model.add(keras.layers.BatchNormalization()) model.add(keras.layers.Activation("relu")) model.add(keras.layers.GlobalMaxPooling2D()) model.add(keras.layers.Dense(128, activation="relu")) model.add(keras.layers.Dense(num_classes, activation="softmax")) model.compile(optimizer=keras.optimizers.Adam(1e-4), loss=keras.losses.SparseCategoricalCrossentropy(), metrics=["accuracy"]) return model ### Train and evaluate via 10-Folds cross-validation ### accuracies = [] folds = np.array(['fold1','fold2','fold3','fold4', 'fold5','fold6','fold7','fold8', 'fold9','fold10']) load_dir = "UrbanSounds8K/processed/" kf = KFold(n_splits=10) for train_index, test_index in kf.split(folds): x_train, y_train = [], [] for ind in train_index: # read features or segments of an audio file train_data = np.load("{0}/{1}.npz".format(load_dir,folds[ind]), allow_pickle=True) # for training stack all the segments so that they are treated as an example/instance features = np.concatenate(train_data["features"], axis=0) labels = np.concatenate(train_data["labels"], axis=0) x_train.append(features) y_train.append(labels) # stack x,y pairs of all training folds x_train = np.concatenate(x_train, axis = 0).astype(np.float32) y_train = np.concatenate(y_train, axis = 0).astype(np.float32) # for testing we will make predictions on each segment and average them to # produce signle label for an entire sound clip. test_data = np.load("{0}/{1}.npz".format(load_dir, folds[test_index][0]), allow_pickle=True) x_test = test_data["features"] y_test = test_data["labels"] model = get_network() model.fit(x_train, y_train, epochs = 50, batch_size = 24, verbose = 0) # evaluate on test set/fold y_true, y_pred = [], [] for x, y in zip(x_test, y_test): # average predictions over segments of a sound clip avg_p = np.argmax(np.mean(model.predict(x), axis = 0)) y_pred.append(avg_p) # pick single label via np.unique for a sound clip y_true.append(np.unique(y)[0]) accuracies.append(accuracy_score(y_true, y_pred)) print("Average 10 Folds Accuracy: {0}".format(np.mean(accuracies))) ```
github_jupyter
# NLP - Using spaCy library - **Created by Andrés Segura Tinoco** - **Created on June 04, 2019** - **Updated on October 29, 2021** **Natural language processing (NLP):** is a discipline where computer science, artificial intelligence and cognitive logic are intercepted, with the objective that machines can read and understand our language for decision making <a href="#link_one">[1]</a>. **spaCy:** features fast statistical NER as well as an open-source named-entity visualizer <a href="#link_two">[2]</a>. ## Example with a document in Spanish ``` # Load Python libraries import io import random from collections import Counter # Load NLP libraries from spacy import spacy # Verify installed spacy version spacy.__version__ ``` ### Step 1 - Read natural text from a book ``` # Util function to read a plain text file def read_text_file(file_path, encoding='ISO-8859-1'): text = "" with open(file_path, 'r', encoding=encoding) as f: text = f.read() return text # Get text sample file_path = "../data/es/El Grillo del Hogar - Charles Dickens.txt" book_text = read_text_file(file_path) # Show first 1000 raw characters of document book_text[:1000] ``` ### Step 2 - Create a NLP model ``` # Create NLP model for spanish language nlp = spacy.load('es_core_news_sm') doc_es = nlp(book_text) ``` **- Vocabulary:** unique words of the document. ``` # Get vocabulary vocabulary_es = set(str(token).lower() for token in doc_es if not token.is_stop and token.is_alpha) len(vocabulary_es) # Show 100 random words of the vocabulary print(random.sample(vocabulary_es, 100)) ``` **- Stopwords:** refers to the most common words in a language, which do not significantly affect the meaning of the text. ``` # Get unique stop-words stop_words_es = set(str(token).lower() for token in doc_es if token.is_stop) len(stop_words_es) # Show unique stop-words print(stop_words_es) ``` **- Entity:** can be any word or series of words that consistently refers to the same thing. ``` # Returns a text with data quality def text_quality(text): new_text = text.replace('\n', '') return new_text.strip('\r\n') # Print out named first 50 entities for ix in range(50): ent = doc_es.ents[ix] ent_text = text_quality(ent.text) if len(ent_text) > 3: print((ix + 1), '- Entity:', ent_text, ', Label:', ent.label_) ``` ### Step 3 - Working with POS, NER and sentences **- POS:** the parts of speech explain how a word is used in a sentence. ``` # Part of speech (POS) used in this document set(token.pos_ for token in doc_es) ``` **- Sentences:** a set of words that is complete in itself and typically containing a subject and predicate. ``` # How many sentences are in this text? sentences = [s for s in doc_es.sents] len(sentences) # Show first 10 sentences sentences[1:11] # Get the sentences in which the 'grillo' appears pattern = 'grillo' cricket_sent = [sent for sent in doc_es.sents if pattern in sent.text] len(cricket_sent) # Show the first 10 sentences in which the 'grillo' appears for sent in cricket_sent[1:11]: print('-', sent) ``` **- NER:** Named Entity Recognition. ``` # Returns the most common entities and their quantity def find_entities(doc, ent_type, n): entities = Counter() for ent in doc.ents: if ent.label_ == ent_type: ent_name = text_quality(ent.lemma_) entities[ent_name] += 1 return entities.most_common(n) # Show entities of type PERSON find_entities(doc_es, 'PER', 20) # Returns persons adjectives def get_person_adj(doc, person): adjectives = [] for ent in doc.ents: if ent.lemma_ == person: for token in ent.subtree: if token.pos_ == 'ADJ': # Adjective adjectives.append(token.lemma_) for ent in doc.ents: if ent.lemma_ == person: if ent.root.dep_ == 'nsubj': # Nominal subject for child in ent.root.head.children: if child.dep_ == 'acomp': # Adjectival complement adjectives.append(child.lemma_) return set(adjectives) # Show the adjectives used for John (most common entity) curr_person = 'John' print(get_person_adj(doc_es, curr_person)) # Returns the people who use a certain verb def verb_persons(doc, verb, n): verb_count = Counter() for ent in doc.ents: if ent.label_ == 'PER' and ent.root.head.lemma_ == verb: verb_count[ent.text] += 1 return verb_count.most_common(n) # Show the people who use a certain verb curr_verb = 'hacer' verb_persons(doc_es, curr_verb, 10) # Get ADJ type labels adj_tokens = set(str(token.orth_).lower() for token in doc_es if token.pos_ == 'ADJ') len(adj_tokens) # Show 50 random ADJ type labels print(random.sample(adj_tokens, 50)) # Get PROPN type labels propn_tokens = set(str(token.orth_).lower() for token in doc_es if token.pos_ == 'PROPN') len(adj_tokens) # Show 50 random PROPN type labels print(random.sample(propn_tokens, 50)) ``` ## Reference <a name='link_one' href='https://en.wikipedia.org/wiki/Natural_language_processing' target='_blank' >[1]</a> Wikipedia - Natural language processing. <a name='link_two' href='https://spacy.io/' target='_blank' >[2]</a> spaCy website. <hr> <p><a href="https://ansegura7.github.io/NLP/">« Home</a></p>
github_jupyter
# Entrada e Saída de Dados Este tutorial tem o objetivo de mostrar brevemente as funções nativas da linguagem Python, utilizadas para entrada e saída padrão. 1. print 2. raw_input 3. casting (int, float, str) ## Comando print Na versão 2 da linguagem Python, o comando responsável pela saída padrão de dados é o __print__. ``` print "Imprimindo na saida padrao" print "O Tamburetei é top!" ``` ## Função raw_input() A função __raw_input()__ é a responsável por coletar dados diretamente do teclado, que é a entrada padrão. Os dados coletados pela função vão ser transformadas em string. ``` # Espera por uma entrada e salva na variável mensagem mensagem = raw_input() # Imprime o que está em mensagem print mensagem ``` Observe que ao executar o trecho de código acima, o sistema espera que o usuário digite algo para então continuar a execução da próxima instrução. Ainda com a função __raw_input()__ é possível imprimir um texto na saída padrão com o objetivo de auxiliar o usuário no que ele deve digitar. Esse texto deve vir como parâmetro da função. ```python variavel_que_guarda_entrada = raw_input("Texto de Ajuda") ``` ``` # Imprime a string que está como parâmetro em raw_input() # em seguida espera a entrada do teclado e guarda na variável primeiro_nome = raw_input("Digite seu primeiro nome: ") # Imprime o que está salvo em primeiro_nome print primeiro_nome ``` ## Funções de Casting Como foi mencionado anteriormente, a função raw_input (que serve como entrada padrão de dados), transforma tudo que é recebido pelo teclado em string. Considerando essa informação, como salvar um numero inteiro ou de ponto flutuante? ou imprimir na saída padrão uma string e em seguida um número? Para isso são usadas as funções de casting. Operação de casting é o nome que se dá para a realização de uma conversão de tipos. Do tipo string para o tipo inteiro, por exemplo. Nesse sentido, as funções mais utilizadas são: Nome da Função | O que faz -------------- | ---------- str() | Realiza a conversão de um tipo em string int() | Realiza a conversão de um tipo em inteiro float() | Realiza a conversão de um tipo em ponto flutuante Realizando a conversão da string recebida da função raw_input() em inteiro: ```python converte_string_para_inteiro = int(raw_input("Digite um Número Inteiro: ")) ``` Realizando a conversão da string recebida da função raw_input() em ponto flutuante: ```python converte_string_para_float = float(raw_input("Digite um Número em Ponto Flutuante: ")) ``` ``` # Guarda um inteiro na variável idade com valor 20 idade = 20 # Converte a idade de inteiro para string, concatena com a frase e imprime print "A idade de Lucas: " + str(idade) # Guarda um ponto flutuante na variável preco com valor 200.00 preco = 200.00 # Converte o preco de float para string, concatena com a frase e imprime print "A mesa custou " + str(preco) ```
github_jupyter
# Tutorial Part 11: Learning Unsupervised Embeddings for Molecules In this example, we will use a `SeqToSeq` model to generate fingerprints for classifying molecules. This is based on the following paper, although some of the implementation details are different: Xu et al., "Seq2seq Fingerprint: An Unsupervised Deep Molecular Embedding for Drug Discovery" (https://doi.org/10.1145/3107411.3107424). Many types of models require their inputs to have a fixed shape. Since molecules can vary widely in the numbers of atoms and bonds they contain, this makes it hard to apply those models to them. We need a way of generating a fixed length "fingerprint" for each molecule. Various ways of doing this have been designed, such as Extended-Connectivity Fingerprints (ECFPs). But in this example, instead of designing a fingerprint by hand, we will let a `SeqToSeq` model learn its own method of creating fingerprints. A `SeqToSeq` model performs sequence to sequence translation. For example, they are often used to translate text from one language to another. It consists of two parts called the "encoder" and "decoder". The encoder is a stack of recurrent layers. The input sequence is fed into it, one token at a time, and it generates a fixed length vector called the "embedding vector". The decoder is another stack of recurrent layers that performs the inverse operation: it takes the embedding vector as input, and generates the output sequence. By training it on appropriately chosen input/output pairs, you can create a model that performs many sorts of transformations. In this case, we will use SMILES strings describing molecules as the input sequences. We will train the model as an autoencoder, so it tries to make the output sequences identical to the input sequences. For that to work, the encoder must create embedding vectors that contain all information from the original sequence. That's exactly what we want in a fingerprint, so perhaps those embedding vectors will then be useful as a way to represent molecules in other models! ## Colab This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/11_Learning_Unsupervised_Embeddings_for_Molecules.ipynb) ## Setup To run DeepChem within Colab, you'll need to run the following cell of installation commands. This will take about 5 minutes to run to completion and install your environment. This notebook will take a few hours to run on a GPU machine, so we encourage you to run it on Google colab unless you have a good GPU machine available. ``` !wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh !chmod +x Anaconda3-2019.10-Linux-x86_64.sh !bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local !conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0 import sys sys.path.append('/usr/local/lib/python3.7/site-packages/') import deepchem as dc ``` Let's start by loading the data. We will use the MUV dataset. It includes 74,501 molecules in the training set, and 9313 molecules in the validation set, so it gives us plenty of SMILES strings to work with. ``` import deepchem as dc tasks, datasets, transformers = dc.molnet.load_muv() train_dataset, valid_dataset, test_dataset = datasets train_smiles = train_dataset.ids valid_smiles = valid_dataset.ids ``` We need to define the "alphabet" for our `SeqToSeq` model, the list of all tokens that can appear in sequences. (It's also possible for input and output sequences to have different alphabets, but since we're training it as an autoencoder, they're identical in this case.) Make a list of every character that appears in any training sequence. ``` tokens = set() for s in train_smiles: tokens = tokens.union(set(c for c in s)) tokens = sorted(list(tokens)) ``` Create the model and define the optimization method to use. In this case, learning works much better if we gradually decrease the learning rate. We use an `ExponentialDecay` to multiply the learning rate by 0.9 after each epoch. ``` from deepchem.models.optimizers import Adam, ExponentialDecay max_length = max(len(s) for s in train_smiles) batch_size = 100 batches_per_epoch = len(train_smiles)/batch_size model = dc.models.SeqToSeq(tokens, tokens, max_length, encoder_layers=2, decoder_layers=2, embedding_dimension=256, model_dir='fingerprint', batch_size=batch_size, learning_rate=ExponentialDecay(0.004, 0.9, batches_per_epoch)) ``` Let's train it! The input to `fit_sequences()` is a generator that produces input/output pairs. On a good GPU, this should take a few hours or less. ``` def generate_sequences(epochs): for i in range(epochs): for s in train_smiles: yield (s, s) model.fit_sequences(generate_sequences(40)) ``` Let's see how well it works as an autoencoder. We'll run the first 500 molecules from the validation set through it, and see how many of them are exactly reproduced. ``` predicted = model.predict_from_sequences(valid_smiles[:500]) count = 0 for s,p in zip(valid_smiles[:500], predicted): if ''.join(p) == s: count += 1 print('reproduced', count, 'of 500 validation SMILES strings') ``` Now we'll trying using the encoder as a way to generate molecular fingerprints. We compute the embedding vectors for all molecules in the training and validation datasets, and create new datasets that have those as their feature vectors. The amount of data is small enough that we can just store everything in memory. ``` train_embeddings = model.predict_embeddings(train_smiles) train_embeddings_dataset = dc.data.NumpyDataset(train_embeddings, train_dataset.y, train_dataset.w, train_dataset.ids) valid_embeddings = model.predict_embeddings(valid_smiles) valid_embeddings_dataset = dc.data.NumpyDataset(valid_embeddings, valid_dataset.y, valid_dataset.w, valid_dataset.ids) ``` For classification, we'll use a simple fully connected network with one hidden layer. ``` classifier = dc.models.MultitaskClassifier(n_tasks=len(tasks), n_features=256, layer_sizes=[512]) classifier.fit(train_embeddings_dataset, nb_epoch=10) ``` Find out how well it worked. Compute the ROC AUC for the training and validation datasets. ``` import numpy as np metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification") train_score = classifier.evaluate(train_embeddings_dataset, [metric], transformers) valid_score = classifier.evaluate(valid_embeddings_dataset, [metric], transformers) print('Training set ROC AUC:', train_score) print('Validation set ROC AUC:', valid_score) ``` # Congratulations! Time to join the Community! Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways: ## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem) This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build. ## Join the DeepChem Gitter The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
github_jupyter
# Matematički softver - prvo predavanje ## Metapodaci ### Materijali Materijali će se nalaziti na Githubu, u repozitoriju kolegija (https://github.com/vedgar/ms). Za potrebe kolegija, svi morate imati _neku_ online bazu vlastitog koda, u kojoj napravite repozitorij 'Matematički softver'. Preporučujem Github. ### Potreban softver Na računalo na kojem ćete pratiti predavanja, pisati zadaće i slično, instalirajte `Anaconda` distribuciju (full (**ne** Miniconda!), Python 3.6, 64bit ako imate 64bitno računalo, ne trebate instalirati Visual Studio Code). Instalacija i pokretanje traje dosta dugo. Pokrenite Anaconda Navigator (iz Start izbornika ili iz komandne linije) i odaberite Jupyter Notebook (launch). Odaberite folder gdje ćete držati datoteke vezane uz kolegij, i napravite novu bilježnicu (u browseru, `New`... `Python 3`). Odaberite `Help`... `User Interfacte Tour` da biste se upoznali sa sučeljem. Na računalima u Pr1 (pod Linuxom!) već je instalirano sve potrebno, samo da biste pokrenuli Anaconda Navigator, morate prvo u terminalu izvršiti naredbu ```bash export PATH=/opt/anaconda3/bin:$PATH anaconda-navigator ``` ### Sadržaj kolegija * IPython / Jupyter * Scipy stack: Numpy, Sympy, Pandas, Matplotlib, Scikit * (_možda_) Sage * (_vrlo možda_) Julia * Markdown * LaTeX ### Cilj kolegija * Razviti sposobnost korištenja Pythona kao moćnog alata za znanstvenike * Osposobiti vas za produkciju visokokvalitetnih publikacija ### Polaganje Ocjena se formira iz dva kolokvija i domaćih zadaća. Kolokviji su _open-book_ tipa i nose 80 bodova. Domaće zadaće su u obliku eseja/projekta sa zadanim elementima. Predaja svih domaćih zadaća nužan je uvjet za polaganje kolegija. Domaća zadaća je uspješno predana samo ako ima _sve_ tražene elemente. Domaće zadaće nose 20 bodova, koji se dodjeljuju za _dodatne_ elemente. Zbog kontinuiranog praćenja, popravni ispit ne postoji. U slučaju opravdane spriječenosti prisustvovanja kolokviju, javite se što prije da dogovorimo alternativne metode ocjenjivanja. ### Literatura * [Lectures on scientific computing with Python](https://github.com/jrjohansson/scientific-python-lectures) - kolegij po uzoru na koji je ovaj nastao (davno) * [Astro 599 Course](http://nbviewer.jupyter.org/github/jakevdp/2013_fall_ASTR599/tree/master/notebooks/) - još jedan sličan kolegij, nažalost u Pythonu 2 ali s puno korisnih informacija * [Reproducible data analysis in Jupyter](http://jakevdp.github.io/blog/2017/03/03/reproducible-data-analysis-in-jupyter/) - serija videâ koji detaljno objašnjavaju _workflow_ potreban za "idealnu" prvu zadaću * [xkcd 1313: Regex Golf](http://nbviewer.jupyter.org/url/norvig.com/ipython/xkcd1313.ipynb) - duhovit ali i poučan primjer netrivijalnog rezultata dobivenog pomoću Jupytera * [A gallery of interesting Jupyter Notebooks](https://github.com/jupyter/jupyter/wiki/A-gallery-of-interesting-Jupyter-Notebooks) - ogromni repozitorij raznovrsnih bilježnica, dobro za upoznavanje s mogućnostima ## Primjeri ### Slaganje bilježnica Možemo uključivati slike, zvukove, videe, matematičke zapise, JSON objekte,... Za detalje pogledati [dokumentaciju](http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html). ``` from IPython.display import Image, YouTubeVideo Image('http://python.org/images/python-logo.gif') YouTubeVideo('k7WXVWej-NY') ``` Možemo i izvršavati sistemske naredbe (počinju uskličnikom), ali time naša bilježnica postaje izvršiva samo na određenom OSu. Zato je dobro to izbjegavati. ``` !dir ``` ### Magične naredbe "Magične" naredbe počinju znakom `%`. Linijske (počinju jednim znakom `%`) primaju argumente do kraja linije, ćelijske (počinju s `%%`) primaju argumente do kraja linije. ``` %lsmagic %who %matplotlib inline %xmode plain a = 0 1 / a %%HTML <h5>Neki naslov</h5> <p>Jedan jednostavan <em>primjer</em>.</p> <p>Od dva odlomka teksta.</p> %%time zbroj = 0 for broj in range(1_000_000): zbroj += broj print(zbroj) # %%js # alert('Bu!') ``` ### Traženje pomoći Dodavanjem `?` (ili `??` za više detalja) na praktički bilo koje ime možemo zatražiti pomoć u vezi objekta imenovanog tim imenom. ``` YouTubeVideo? help(chr) dir(list) ``` ### Kompleksniji primjer ``` from skimage import data import matplotlib.pyplot as plt coins = data.coins() plt.imshow(coins, cmap='gray') coins coins.shape coins_zoom = coins[10:80, 300:370] from skimage import restoration tv_coins = restoration.denoise_tv_chambolle(coins_zoom, weight=.05) plt.figure() plt.subplot(1, 2, 1) plt.imshow(coins_zoom) plt.subplot(1, 2, 2) plt.imshow(tv_coins) ``` ## Osnove Pythona ### Objekti i njihova imena Osnovna razlika Pythona od jezika poput C-a je: U C-u, memorijske lokacije su primarni objekti kojima baratamo, vrijednosti zapisane u njima su sekundarne. Varijable su zato uvijek imena za "kutije": nizove od nekoliko (fiksno i konačno mnogo) uzastopnih memorijskih lokacija, unutar kojih se nalazi vrijednost objekta. Čak i kod dinamički alocirane memorije, deklarirane varijable uvijek imaju fiksnu i ograničenu veličinu (`sizeof`), dok o našoj interpretaciji njihovog sadržaja kao pointera ovisi naša sposobnost da spremimo potencijalno više podataka u memoriju nego što smo statički alocirali. U Pythonu, pogled na svijet je bitno drugačiji: primarne "vrijednosti" kojima baratamo su upravo objekti, memorijske lokacije su potpuno irelevantne, a da bismo došli do objekata odnosno da bismo ih mogli spomenuti u kodu, dajemo im _imena_. `x = 382` ne znači (kao u C-u) "stvori kutiju imena `x` i u nju stavi bit-pattern `382`", već "stvori objekt `382` (tipa `int`), i daj mu ime `x`". Direktna posljedica: tip više nije svojstvo varijable (imena), nego objekta. `x = 3; x = 'tri'` je sasvim legalno. Naravno, hoće li `x.islower()` vratiti `True` ili dignuti `AttributeError`, ovisi o konkretnoj vrijednosti od `x` u trenutku poziva -- baš kao da smo napisali `3 .islower()` odnosno `'tri'.islower()`. Još jedna posljedica: isti objekt može imati više imena. `x = y` jednostavno uzme objekt na koji referira ime `y`, i dade mu još jedno ime `x`. _Ništa se nikamo ne kopira._ Na primjer kod poziva funkcije, objekti koje smo naveli kao funkcijske argumente jednostavno dobiju još neka _lokalna_ imena, koja zovemo parametri. Opet, ništa se nikamo ne kopira. Da bismo ustanovili referiraju li imena `x` i `y` na isti objekt, možemo koristiti `x is y`. Napomena: to nema veze s jednakošću, koja se testira sa `x == y`. Implementacija jednakosti, kao i mnogih drugih operatora/metoda, ovisi o tipu od `x` (i možda od `y`): npr. razlomci bitno drugačije definiraju jednakost nego IP adrese. Čak i da dva objekta imaju potpuno istu reprezentaciju u memoriji (tada će vjerojatno biti `x == y`), to i dalje mogu biti dva objekta (`x is not y`), i promjena jednog neće utjecati na drugi objekt. Druga implikacija (`x is y` povlači `x == y`, odnosno zapravo `x == x`) vrijedi puno češće, i jedini standardni kontraprimjer je NaN (`math.nan`). ### Prostori imena (_namespaces_) Kako su imena očito vrlo važna i zapravo jedini način da u kodu govorimo o objektima, Python posvećuje posebnu pažnju njihovom razvrstavanju. Svako ime postoji u točno jednom prostoru, te se prostori imena dijele na implicitne (čija imena se obično ne navode) i eksplicitne (čija imena se moraju navesti). Implicitnih prostora imena ima četiri vrste, i uvijek su ugniježđeni na isti način. Vanjski je `builtins`, u kojem žive preddefinirana imena za Pythonove ugrađene funkcije i ostale objekte (npr. `print`). Obično se ne mijenja, iako Python dopušta i njegovu promjenu. Sljedeći prema unutra je `globals`, u kojem žive ili varijable koje smo definirali unutar interaktivnog rada (npr. pisanja u Jupyterovoj bilježnici), ili pak "globalne" varijable pojedinog _modula_ (ugrubo, datoteke s nastavkom `.py`) koje nisu ni u kojem unutrašnjem (klasnom ili lokalnom) prostoru. U njega možemo slobodno dodavati imena, a možemo i mijenjati njihove vrijednosti ali to se ne smatra dobrom praksom iz svih uobičajenih razloga protiv globalnih varijabli (globalne _konstante_, imena čije se vrijednosti ne mijenjaju -- npr. funkcije i klase koje modul definira -- su sasvim u redu). Unutar `globals` postoje dvije vrste prostora imena, koji se oba zovu `locals` ali ne vide jedan drugog bez obzira na sintaksnu ugniježđenost: klasni i funkcijski. U klasnom prostoru žive atributi (uključujući metode i svojstva) klase unutar koje se nalazimo (ako se uopće nalazimo unutar `class ...:` bloka). Točna priroda ovog bloka, pa tako i mogućnost mijenjanja, ovisi o tipu trenutne klase (tzv. "metaklasi"), no često se u njemu nalaze samo metode. U funkcijskom lokalnom prostoru žive "prave" lokalne varijable (uključivo parametri) funkcije unutar koje se nalazimo (ako se uopće nalazimo unutar `def ...:` bloka). U ovom prostoru imena su fiksna (samo ona koja se sintaksno pojavljuju u kodu funkcije) i nije moguće dodavati nova dinamički, iako je naravno moguće mijenjati njihove vrijednosti. Ovaj prostor imena iznova se stvara svakim pozivom funkcije, i prestaje postojati povratkom iz funkcije; zato je posebno optimiziran. Funkcijski lokalni prostori mogu biti i međusobno ugniježđeni, ako imamo jednu funkciju definiranu unutar druge. Za implicitne prostore imena vrijedi jednostavno pravilo: _čitanje_ vrijednosti imena (npr. korištenje imena u nekom izrazu) obavlja se prvo u trenutnom prostoru, te ako ga tamo nema, u prvom iznad, te ako ga ni tamo nema, u sljedećem iznad njega... i tako dalje u hijerarhiji. Ako se ime ne nađe ni u jednom implicitnom prostoru imena (čak niti u `builtins`), prijavljuje se greška `NameError`. S druge strane, _postavljanje_ vrijednosti imena (npr. pridruživanje tog imena nekom objektu), kao i _brisanje_ vrijednosti imena (npr. naredbom `del`), _uvijek_ se obavlja u trenutnom prostoru imena (osim ako smo to promijenili `global` ili `nonlocal` direktivom). Eksplicitni prostori imena su puno jednostavniji, i ima ih dva tipa: atributni (oni čija imena se navode kao `prostor.ime`) i spremnički (oni čija imena se navode kao `prostor[ime]`). Atributni su vrlo slični implicitnima, samo su vezani uz _objekte_ (instance) pojedinih klasa. Gotovo svaki objekt u Pythonu ima svoj atributni prostor imena. Važan specijalni slučaj: `import nekimodul` stvara objekt imena `nekimodul`, čiji atributni prostor je upravo globalni prostor modula `nekimodul.py`. Na taj način možemo jednostavno koristiti globalna imena iz jednog modula u drugom. Atributni prostori su često dinamički (možemo dodavati i brisati imena), iako pomoću `__slots__` definicije možemo fiksirati skup imena, vrlo slično funkcijskom lokalnom prostoru. Spremnički prostor imena imaju samo posebni tipovi _spremnici_, kao što su npr. liste i rječnici. Njihova posebnost je u tome da "imena" u njima mogu biti proizvoljni objekti (ipak, najčešće se zahtijeva da budu nepromjenjivi) -- u svim ostalim prostorima imena su morala biti validni identifikatori: nizovi znakova koji sadrže slova, znamenke i znak `_`, te ne počinju znamenkom. U slučaju listi (i raznih drugih sekvenci kao što su slogovi, stringovi, polja,...), validna imena su cijeli brojevi i zovu se _indeksi_. U slučaju rječnika, validna imena su bilo kakvi _hashabilni_ objekti i zovu se _ključevi_. ``` def f(x): return x + 5 f(1) ``` ### Primjena: uvoz modula Recimo da imamo modul `m.py`, unutar kojeg se nalazi kod ```py x = y = 2 z = 3 ``` Naredbom `import m` dobit ćemo (u trenutnom prostoru imena) ime `m` koje će se odnositi na objekt tipa `module`, čiji atributni prostor imena će imati tri imena: `m.x` i `m.y` će biti imena za jedan te isti broj `2`, dok će `m.z` biti ime za broj `3`. Naredbom `import m as n` dobit ćemo ime `n` koje će se odnositi na isti objekt opisan gore. Tako će `n.x` i `n.y` biti imena za `2`, dok će `n.z` biti ime za `3`. Ovo najčešće koristimo kad želimo skratiti ime modula, posebno kad je modul u paketu te je time njegovo ime zapravo cijela "staza": `import matplotlib.pyplot as plt`. Naredbom `from m import x` dobit ćemo (u trenutnom prostoru imena) ime `x` koje će se odnositi na broj `2`. Naredbom `from m import x as z` dobit ćemo ime `z` koje će se odnositi na broj `2`. Naredbom `from m import x, z` dobit ćemo imena `x` i `z` koja će se odnositi redom na `2` i `3`. Ovo koristimo kad imamo module koji sadrže sve i svašta, i treba nam samo uzak dio njihovih funkcionalnosti: `from functools import partial`. Također je korisno kad je glavnina funkcionalnosti modula u jednoj funkciji/klasi koja se zove jednako kao modul: `from datetime import datetime`. Naredbom `from m import *` dobit ćemo imena `x`, `y` i `z`, takva da će se prva dva odnositi na broj `2`, a treće na broj `3`. Primijetite da se ovakav oblik naredbe ne može naći unutar funkcijske definicije, jer se time imena `x`, `y` i `z` ne bi nalazila sintaksno u tekstu funkcijske definicije, te Python ne bi mogao konstruirati lokalni funkcijski prostor imena (ne zna koja će se imena pojaviti u lokalnom prostoru sve do trenutka poziva funkcije). Također, loša strana ovog je teže razumijevanje koda: posebno pri _debuggiranju_, izuzetno je važno za svako ime znati iz kojeg prostora dolazi. Ako imamo ```py from a import * from b import * from c import * ... ... z ... ``` nemamo nikakav način da saznamo iz kojeg modula je `z`, te najčešće moramo istraživati jedan po jedan. Ipak, u interaktivnom _quick&dirty_ kodiranju često je vrlo praktično koristiti ovaj oblik. ### Primjena: "prijenos" argumenata u funkciju ``` def f(x): x = 8 print(x) x = 3 f(x) print(x) ``` Ime `x` u prve tri linije različito je od imena `x` u druge tri linije: prvo živi u lokalnom funkcijskom prostoru funkcije `f`, dok drugo živi u globalnom prostoru ove Jupyter bilježnice. Prvo se definira funkcija `f`, s jednim lokalnim imenom `x`, koje je (jednom kad se funkcija pozove) u početku ime za argument funkcije, nakon toga ime za broj `8`, i nakon toga se njegova vrijednost (dakle `8`) prenese u funkciju `print` (precizno, funkciju čije ime u `builtins` prostoru je `print`). Nakon toga stvorimo broj `3`, damo mu _globalno_ ime `x`, te s njime pozovemo funkciju `f` (precizno, funkciju čije ime u `globals` prostoru je `f`). Tada se stvori lokalni funkcijski prostor imena, u njemu ime `x` za broj `3`, zatim se to isto (lokalno) ime pridruži broju `8`, te se ispiše `8`. Time funkcija dolazi do kraja, njen lokalni prostor imena nestaje, te se izvršava sljedeća naredba nakon funkcijskog poziva, a to je ovaj `print(x)` u zadnjoj liniji. U tom trenutku, postoji samo jedan `x`, globalno ime `x` za objekt `3`, te se ispisuje `3`. ``` def g(x): x[2] = 8 print(x) x = [1, 5, 3] g(x) print(x) ``` Velika razlika od prethodnog primjera: također postoje dva imena, lokalni i globalni `x`, no ovaj put se oba cijelo vrijeme odnose na isti objekt: jednu jedinu listu u gornjem kodu. Naredba pridruživanja `x[2] = 8` ne mijenja lokalni funkcijski prostor funkcije `g` (u kojem živi `x`), već `x`ov atributni prostor imena. Zapravo, to nije pridruživanje ničega imenu `x`, već je to pridruživanje (broja `8`) "imenu" `2` unutar prostora imena koji odgovara objektu -- spremniku `x`. To se najbolje može vidjeti po tome da u ovoj drugoj naredbi "x" može biti izraz: `(x if 2 < 3 else y)[2] = 8`, dok je naravno "pridruživanje" `(x if 2 < 3 else y) = 8` besmisleno. ### Za detaljniji prikaz ... Izuzetno koristan alat za vizualizaciju Pythonove memorije: http://pythontutor.com/visualize.html#mode=edit. ``` %%HTML <iframe width="800" height="500" frameborder="0" src="http://pythontutor.com/iframe-embed.html#code=def%20f%28x%29%3A%0A%20%20%20%20x%20%3D%208%0A%20%20%20%20print%28x%29%0Ax%20%3D%203%0Af%28x%29%0Aprint%28x%29%0A%0Adef%20g%28x%29%3A%0A%20%20%20%20x%5B2%5D%20%3D%208%0A%20%20%20%20print%28x%29%0Ax%20%3D%20%5B1,%205,%203%5D%0Ag%28x%29%0Aprint%28x%29&codeDivHeight=400&codeDivWidth=350&cumulative=false&curInstr=0&heapPrimitives=true&origin=opt-frontend.js&py=3&rawInputLstJSON=%5B%5D&textReferences=false"> </iframe> ``` ## Matematičke operacije ``` 18 + 7, 18 - 7, 18 * 7, 18 / 7 18 // 7, 18 % 7, divmod(18, 7) 18 ** 7, pow(18, 7), pow(18, 7, 1000) from fractions import Fraction as F from decimal import Decimal as D, getcontext import cmath F(18, 7)**2 % 1 str(_), _.numerator, _.denominator getcontext().prec = 30 D(18/7), D(18)/D(7) getcontext().prec = 200 D(2).sqrt() _ ** 2 - 2 2 ** 3 ** 4, (2 ** 3) ** 4, 2 ** (3 ** 4) 5 ** 3**-1, 7 ** .5, (-1) ** .5 1j.real, 1j.imag, cmath.isclose(_[2], 1j) (2 + 3j) ** 5, (1 + 1j) ** 8 cmath.isclose(cmath.e ** (1j * cmath.pi) + 1, 0) help(cmath.isclose) cmath.isclose(cmath.e ** (1j * cmath.pi) + 1, 0, abs_tol=1e-9) 2.3.imag, 7 .denominator cmath.inf * 0 _ == _ cmath.isclose(cmath.atan(cmath.inf), cmath.pi / 2) ``` ## Spremnici ``` lista = [3, -2, 5.8, 2j, 'bla', [3, 5], {8}, print, ZeroDivisionError] lista len(lista), lista[3], lista[-2] is lista[len(lista)-2], lista[~2] lista[5][0], lista[4][2][0][0][0] lista[:3], lista[3:], lista[-3:], lista[:-3] lista[2:6], lista[1:-1], lista[7:7], lista[3:99] lista, lista[::3], lista[1::3] list('MatSoft') set(_) mat = set('matematički') samoglasnici = set('aeiou') mat & samoglasnici, mat - samoglasnici mat | samoglasnici, mat ^ samoglasnici len(samoglasnici) + len(mat) == len(mat | samoglasnici) + len(mat & samoglasnici) # FUI samoglasnici <= mat, samoglasnici & mat < mat lista.append(23) lista lista.remove({8}) lista.remove(2j) lista del lista[3] del lista[2:] lista -2 in lista, 3 not in lista, 17 in lista, 117 not in lista mat mat |= set('01234') mat mat.add('z') mat.remove('č') mat.discard('ž') mat mat.isdisjoint(samoglasnici) ``` ### Rječnici ``` boje = {'jabuka': 'crveno', 'kruška': 'žuto', 'limun': 'žuto'} boje['grožđe'] = 'plavo' len(boje) for voće, boja in boje.items(): print(f'{voće} je boje: {boja}') del boje['limun'] for voće in boje: if voće != 'grožđe': boje[voće] = 'zeleno' boje {voće for voće in boje if boje[voće] == 'zeleno'} ``` ## Funkcije ``` def potencije(x): return x ** 2, x ** 3, x ** 4 kvadrat, kub, četvrta = potencije(3) kvadrat from skimage import io, transform def thumbnail(slika, širina=100, ime='thumb.png'): """Proizvodi thumbnail za sliku, zadanog imena i širine (visina se određuje proporcionalno).""" izvorna_širina, izvorna_visina, *_ = slika.shape visina = izvorna_visina * širina // izvorna_širina io.imsave(ime, transform.resize(slika, (širina, visina), mode='constant')) astro = data.astronaut() astro.shape plt.imshow(astro) io.imsave('astro.png', astro) astro_s_diska = io.imread('astro.png') thumbnail(astro_s_diska) Image('thumb.png') ``` ### Funkcije višeg reda ``` def linearna(a, b): def funkcija(x): return a*x + b return funkcija f = linearna(a=1/2, b=3) f, f.__code__.co_freevars, f.__code__.co_varnames, [c.cell_contents for c in f.__closure__] f(20) def komponiraj(*funkcije): def kompozicija(x): for f in reversed(funkcije): x = f(x) return x return kompozicija f = komponiraj(lambda x: x+1, lambda x: x*2) f(5) from functools import partial dvana = partial(pow, 2) dvana(12) komponiraj(dvana, dvana, f) _(1) ``` ## Grananja i petlje ``` if 2 < 1: print('Nešto je čudno') elif 2 == 1: print('Još čudnije') else: print('Sve ok') x = 1 if 2 <= x < 5: print(f'{x} je između 2 i 5') else: print(f'{x} nije između 2 i 5') for i in range(23, 99, 7): print(i) for riječ in 'Znanstvenici', 'vole', 'koristiti', 'Python': print(riječ) params = dict(p1=1, p2=2.879, p31=38) for parametar, vrijednost in params.items(): print(f'{parametar:>3} = {vrijednost:5.2f}') {x**2: x for x in range(4, -3, -1)} {slovo for slovo in 'Matematički softver'.lower()} listalisti = [[], [8, 3, 5], [2, 1], [3]] [element for lista in listalisti for element in lista] rezultat = [] for lista in listalisti: for element in lista: rezultat.append(element) rezultat broj = 27 while broj > 1: if broj % 2: broj = broj*3 + 1 else: broj //= 2 print(broj, end='\t') ``` ## Interaktivnost ``` from ipywidgets import interact import networkx as nx import matplotlib.pyplot as plt %matplotlib inline def plot_random_graph(n, m, p, generator): nx.draw(generator(n, m, p)) plt.show() interact(plot_random_graph, n=(2, 30), m=(1, 10), p=(0, 1, 1e-3), generator={ 'lobster': lambda n, m, p: nx.random_lobster (n, p, p / m), 'power law': lambda n, m, p: nx.powerlaw_cluster_graph (n, m, p), 'Newman-Watts-Strogatz': lambda n, m, p: nx.newman_watts_strogatz_graph(n, m, p), 'Erdős-Rényi': lambda n, m, p: nx.erdos_renyi_graph (n, p), }); nx.erdos_renyi_graph? ``` ## Domaća zadaća (do idućeg predavanja) * Otvoriti Github account i napraviti repozitorij "Matematički softver" * Skinuti i instalirati Anacondu (upute na početku ovog dokumenta) ### Zadaci za vježbu 1. Napišite funkciju koja prima $n$, a vraća listu neparnih brojeva od $1$ do (uključivo) $n$. 2. Napišite funkciju koja rješava kvadratnu jednadžbu. 3. Napišite funkciju `trapezint(f, n, a, b)` koja numerički računa integral funkcije $f$ na intervalu $[a,b]$, koristeći trapeznu formulu $$\int_a^b f(x)\,dx\approx\frac{h}{2}\sum_{i=1}^n{\bigl(f(x_{i-1})+f(x_i)\bigr)}.$$ 4. Napišite funkciju za numeričko deriviranje oblika `diff(f, x, h=1e-6)`. ``` def neparni_do(n): return list(range(1, n+1, 2)) neparni_do(7) def rq(a, b, c): D = b**2 - 4*a*c return (-b + D**.5) / (2*a), (-b - D**.5) / (2*a) rq(1,1,1) def trapezint(f, n, a, b): h = (b - a) / n xi = [a + i*h for i in range(n + 1)] s = sum(f(xi[i-1]) + f(xi[i]) for i in range(1, n+1)) return h / 2 * s trapezint(lambda x: x**3, 100, 0, 2) def diff(f, x, h=1e-6): return (f(x+h)- f(x)) / h from math import cos, radians diff(cos, radians(45)) ```
github_jupyter
# Building a Trie in Python Before we start let us reiterate the key components of a Trie or Prefix Tree. A trie is a tree-like data structure that stores a dynamic set of strings. Tries are commonly used to facilitate operations like predictive text or autocomplete features on mobile phones or web search. Before we move into the autocomplete function we need to create a working trie for storing strings. We will create two classes: * A `Trie` class that contains the root node (empty string) * A `TrieNode` class that exposes the general functionality of the Trie, like inserting a word or finding the node which represents a prefix. Give it a try by implementing the `TrieNode` and `Trie` classes below! ``` ## Represents a single node in the Trie class TrieNode: def __init__(self, end_of_word=False): # Initialize this node in the Trie # Indicates whether the string ends here is a valid word self.end_of_word = end_of_word # A dictionary to store the possible characters in this node # Dictionary key: character (e.g. a) # Dictionary value: pointer to child node self.char_dict = dict() def insert(self, char): # Add a child node in this Trie sub_char_node = TrieNode() self.char_dict[char] = sub_char_node return sub_char_node ## The Trie itself containing the root node and insert/find functions class Trie: def __init__(self): # Initialize this Trie (add a root node) root_node = TrieNode() self.root = root_node def insert(self, word): # Add a word to the Trie # Split the word into a seq of chars and build the corresponding TrieNodes cur_node = self.root for char in word: if char in cur_node.char_dict: cur_node = cur_node.char_dict[char] else: new_child_node = cur_node.insert(char) cur_node = new_child_node # End of word, set end_of_word property to True cur_node.end_of_word = True def find(self, prefix): # Find the Trie node that represents this prefix cur_node = self.root # Traverse the Trie tree base on the character sequence in the prefix for char in prefix: if char in cur_node.char_dict: cur_node = cur_node.char_dict[char] else: return None return cur_node ``` # Finding Suffixes Now that we have a functioning Trie, we need to add the ability to list suffixes to implement our autocomplete feature. To do that, we need to implement a new function on the `TrieNode` object that will return all complete word suffixes that exist below it in the trie. For example, if our Trie contains the words `["fun", "function", "factory"]` and we ask for suffixes from the `f` node, we would expect to receive `["un", "unction", "actory"]` back from `node.suffixes()`. Using the code you wrote for the `TrieNode` above, try to add the suffixes function below. (Hint: recurse down the trie, collecting suffixes as you go.) ``` class TrieNode: def __init__(self, end_of_word=False): # Initialize this node in the Trie # Indicates whether the string ends here is a valid word self.end_of_word = end_of_word # A dictionary to store the possible characters in this node # Dictionary key: character (e.g. a) # Dictionary value: pointer to child node self.char_dict = dict() def insert(self, char): # Add a child node in this Trie sub_char_node = TrieNode() self.char_dict[char] = sub_char_node return sub_char_node def suffixes(self, suffix=''): # Recursive function that collects the suffix for # all complete words below this point output_str_list = list() def find_suffix(node, output_str): # If end_of_word at this node is true, then add the suffix to result list if node.end_of_word: output_str_list.append(output_str) for char in node.char_dict: temp_output_str = output_str + char find_suffix(node.char_dict[char], temp_output_str) find_suffix(self, "") return output_str_list ``` # Testing it all out Run the following code to add some words to your trie and then use the interactive search box to see what your code returns. ``` MyTrie = Trie() wordList = [ "ant", "anthology", "antagonist", "antonym", "fun", "function", "factory", "trie", "trigger", "trigonometry", "tripod" ] for word in wordList: MyTrie.insert(word) node = MyTrie.find('t') print(node.suffixes()) from ipywidgets import widgets from IPython.display import display from ipywidgets import interact def f(prefix): if prefix != '': prefixNode = MyTrie.find(prefix) if prefixNode: print('\n'.join(prefixNode.suffixes())) else: print(prefix + " not found") else: print('') interact(f,prefix=''); ```
github_jupyter
``` import argparse import logging from operator import mul import time import os import pubweb.singlecell # import AnnDataSparse from pubweb.hdf5 import Hdf5 from pubweb.commands.convert.singlecell.anndata import ImportAnndata from pubweb.commands.convert.singlecell.cellranger import ImportCellRanger from pubweb.commands.validate.dimensions import ValidateDimensions from pubweb.commands.annotate.geneid import AnnotateGeneId from pubweb.commands.annotate.geneset import AnnotateGeneset from pubweb.commands.export.lists import ExportLists from pubweb.commands.export.attributes import ExportAttributes from pubweb.commands.export.tables import ExportTables from pubweb.commands.export.projections import ExportProjections from pubweb.commands.export.spatial import ExportSpatial from pubweb.commands.export.matrix_sparse import ExportMatrixSparse from pubweb.commands.export.matrix_dense import ExportMatrixDense from pubweb.commands.summarize.genes import SummarizeGenes from pubweb.commands.summarize.genemap import SummarizeGeneMap from pubweb.commands.summarize.colors import SummarizeColors from pubweb.commands.summarize.manifest import SummerizeManifest import importlib importlib.reload(pubweb.singlecell) importlib.reload(pubweb.hdf5) importlib.reload(pubweb.commands.convert.singlecell.anndata) importlib.reload(pubweb.commands.convert.singlecell.cellranger) importlib.reload(pubweb.commands.validate.dimensions) importlib.reload(pubweb.commands.annotate.geneid) importlib.reload(pubweb.commands.annotate.geneset) importlib.reload(pubweb.commands.export) importlib.reload(pubweb.commands.export.lists) importlib.reload(pubweb.commands.export.attributes) importlib.reload(pubweb.commands.export.tables) importlib.reload(pubweb.commands.export.projections) importlib.reload(pubweb.commands.export.spatial) importlib.reload(pubweb.commands.export.matrix_sparse) importlib.reload(pubweb.commands.export.matrix_dense) importlib.reload(pubweb.commands.summarize.genes) importlib.reload(pubweb.commands.summarize.genemap) importlib.reload(pubweb.commands.summarize.colors) importlib.reload(pubweb.commands.summarize.manifest) logging.basicConfig(level='DEBUG') datasetName='lung-upper-airway-h1299' inputFile = '/data/notebooks/input/convert.hdf5' outputFolder = '/data/notebooks/pubweb' species = 'human' overwriteHdf5 = True python_wd = '/opt/pubweb' #dir(pubweb.singlecell) ``` ``` # anndatasparse outputFile = f'{outputFolder}/pubweb.hdf5' if os.path.exists(outputFile) and overwriteHdf5: os.remove(outputFile) hdf5 = Hdf5.load(outputFile, "a") hdf5.uri %time hdf5 | ImportAnndata(inputFile, datasetName) # 345 hdf5.getDatasets() hdf5.h5py['pubweb/lung-upper-airway-h1299/matrix'] %time hdf5 | AnnotateGeneId(species=species) # 1min28s # save hdf5_geneid print(type(hdf5)) hdf5.getDatasetsWithPath('pubweb/lung-upper-airway-h1299') hdf5.getDatasets() %time hdf5 | ExportMatrixDense(outputFolder) # 14.1s %time hdf5 | ExportProjections(outputFolder) # 3min3s %time hdf5 | ExportTables(outputFolder) # 426us %time hdf5 | ExportLists(outputFolder) #480us %time hdf5 | ExportAttributes(outputFolder) # 2min 7 s %time hdf5 | SummarizeColors(outputFolder) # 59.4ms %time hdf5 | SummerizeManifest(outputFolder) # 4.2ms ```
github_jupyter
# Best-practices for Cloud-Optimized Geotiffs **Part 2. Multiple COGs** This notebook goes over ways to construct a multidimensional xarray DataArray from many 2D COGS ``` import dask import s3fs import intake import os import xarray as xr import pandas as pd # use the same GDAL environment settings as we did for the single COG case env = dict(GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR', AWS_NO_SIGN_REQUEST='YES', GDAL_MAX_RAW_BLOCK_CACHE_SIZE='200000000', GDAL_SWATH_SIZE='200000000', VSI_CURL_CACHE_SIZE='200000000') os.environ.update(env) # set up a connection with credentials and other settings s3 = s3fs.S3FileSystem(anon=True) objects = s3.ls('sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/2020/') images = ['s3://' + obj + '/Gamma0_VV.tif' for obj in objects] print(len(images)) images[:6] #january 2020 scenes ``` ## GDAL VRT A GDAL VRT file is an XML format that can group together many separate files into separate bands. It's common to create such a file with a the GDAL command line tool `gdalbuildvrt`, illustrated below: ``` #step 1) write a file list that points to the data. GDAL requires special prefixes for this /vsis3/ or /vsicurl/ with open('files.txt', 'w') as f: lines = [x.replace('s3://', '/vsis3/') + '\n' for x in images[:6]] f.writelines(lines) %%time # step 2) create a VRT file !gdalbuildvrt stack.vrt -separate -input_file_list files.txt %%time # step 4) open with xarray chunks=dict(band=1, x=2745, y=2745) da = xr.open_rasterio('stack.vrt', chunks=chunks) da # step 5) optionally modify coordinates (e.g. time dimension extracted from file name) da = da.rename({'band':'time'}) da['time'] = [pd.to_datetime(x[60:68]) for x in images[:6]] ``` #### Recap 1. `xr.open_rasterio(stack.vrt)` stores band coordinates as sequential integers (we lose file name and metadata from each individual COG, so it's common to alter the coordinates after opening the dataset) 2. data is tied to a reference to a local file ('stack.vrt'), which can cause problems with distributed computing if you don't have access to the local filesystem ## intake-xarray [intake-xarray](https://github.com/intake/intake-xarray) is a plugin for the intake library. It uses fsspec/s3fs under the hood to facilitate loading data into python objects. the function `intake.open_rasterio()` accepts a list of paths. it returns an intake object with a `to_dask()` function that returns an xarray DataArray ``` %%time # ~ 1s for 6 files # this loads the image ID into xarray's band coordinates. pattern = 's3://sentinel-s1-rtc-indigo/tiles/RTC/1/IW/10/T/ET/2020/{band}/Gamma0_VV.tif' chunks=dict(band=1, x=2745, y=2745) sources = intake.open_rasterio(images[:6], chunks=chunks, path_as_pattern=pattern, concat_dim='band') da = sources.to_dask() da ``` #### recap: * This is a convient way to avoid constructing a VRT and load a bunch of COGs. It works well as long as the COG urls follow a distinct pattern. Metadata is also lost (we have attributes from the first COG, not others) ## Custom You can also just use xarray and dask to construct a larger datacube from many COGS. ``` %%time # 4 - 8 s # Load all the images chunks=dict(band=1, x=2745, y=2745) dataArrays = [xr.open_rasterio(url, chunks=chunks) for url in images] # note use of join='override' b/c we know these COGS have the same coordinates da = xr.concat(dataArrays, dim='band', join='override', combine_attrs='drop') da = da.rename({'band':'time'}) da['time'] = [pd.to_datetime(x[60:68]) for x in images] da ``` #### recap: * The cell above is essentially a for-loop that iterates over each COG in sequence. 50ms-200ms * 80 ~ 4-16 seconds. The next notebook will look at using Dask to speed things up by opening the files in parallel. ## Visualize Here is an example of interactive visualization again using hvplot. Since we're using full resolution arrays it's key to set the `rasterize=True` keyword argument. That uses the datashader library to pre-render images before sending them to the browser. This is extremely powerful because, resolution updates as you zoom in, and you can scrub through the data cube with an interactive slider widget ``` import hvplot.xarray da.hvplot.image(rasterize=True, aspect='equal', cmap='gray', clim=(0,0.4)) ```
github_jupyter
# Kats 201 - Forecasting with Kats This tutorial will introduce time series modeling and forecasting with Kats. We will show you how to build forecasts with different Kats models and how to do parameter tuning and backtesting using Kats. The complete table of contents for Kats 201 is as follows: 1. Forecasting with Kats Base Models 1.1 SARIMA 1.2 Prophet 1.3 Holt-Winters 2. Forecasting with Kats Ensemble Model 3. Multivariate Model Forecasting 4. Hyperparameter Tuning 5. Backtesting **Note:** We provide two types of tutorial notebooks - **Kats 101**, basic data structure and functionalities in Kats - **Kats 20x**, advanced topics, including advanced forecasting techniques, advanced detection algorithms, `TsFeatures`, meta-learning, etc. # 1. Forecasting with Kats Base Models In this part, we will demonstrate the forecasting workflow with the following models with `air_passengers` data set: 1. SARIMA 2. Prophet, 3. Holt-Winters We begin by loading the `air_passengers` data set into a `TimeSeriesData` object. This code is essentially the same as the code in our introduction to the `TimeSeriesData` object in the Kats 101 Tutorial. ``` %%capture # For Google Colab: !pip install kats !wget https://raw.githubusercontent.com/facebookresearch/Kats/main/kats/data/air_passengers.csv !wget https://raw.githubusercontent.com/facebookresearch/Kats/main/kats/data/multi_ts.csv import pandas as pd import numpy as np import sys import matplotlib.pyplot as plt import warnings warnings.simplefilter(action='ignore') sys.path.append("../") from kats.consts import TimeSeriesData try: # If running on Jupyter air_passengers_df = pd.read_csv("../kats/data/air_passengers.csv") except FileNotFoundError: # If running on colab air_passengers_df = pd.read_csv("air_passengers.csv") # Note: If the column holding the time values is not called time, you will want to specify the name of this column. air_passengers_df.columns = ["time", "value"] air_passengers_ts = TimeSeriesData(air_passengers_df) ``` Because each of our time series models follow the `sklearn` model API pattern, the code for each of the next three examples is quite similar. We initialize the model with its parameters and then call the `fit` and `predict` methods. The only difference between each of these examples are the model-specific parameters. We can then use the `plot` method to visualize our forecast in each case. The values we choose for each of our paremeters in these examples are basically arbitrary. Later in this tutorial, we will show you how to pick the right parameters for a model in Kats using hyperparameter tuning. ## 1.1 SARIMA ``` from kats.models.sarima import SARIMAModel, SARIMAParams warnings.simplefilter(action='ignore') # create SARIMA param class params = SARIMAParams( p = 2, d=1, q=1, trend = 'ct', seasonal_order=(1,0,1,12) ) # initiate SARIMA model m = SARIMAModel(data=air_passengers_ts, params=params) # fit SARIMA model m.fit() # generate forecast values fcst = m.predict( steps=30, freq="MS" ) # make plot to visualize m.plot() ``` ## 1.2 Prophet Note: This example requires `fbprophet` be installed, for example with `pip install kats[prophet]` or `pip install kats[all]` ``` # import the param and model classes for Prophet model from kats.models.prophet import ProphetModel, ProphetParams # create a model param instance params = ProphetParams(seasonality_mode='multiplicative') # additive mode gives worse results # create a prophet model instance m = ProphetModel(air_passengers_ts, params) # fit model simply by calling m.fit() m.fit() # make prediction for next 30 month fcst = m.predict(steps=30, freq="MS") # plot to visualize m.plot() ``` ## 1.3 Holt-Winters ``` from kats.models.holtwinters import HoltWintersParams, HoltWintersModel warnings.simplefilter(action='ignore') params = HoltWintersParams( trend="add", #damped=False, seasonal="mul", seasonal_periods=12, ) m = HoltWintersModel( data=air_passengers_ts, params=params) m.fit() fcst = m.predict(steps=30, alpha = 0.1) m.plot() ``` # 2. Forecasting with Ensemble model `KatsEnsemble` is an ensemble forecasting model, which means it allows you to combine several different forecasting models when building a forecast. When creating an ensemble, you specify the list of models (with parameters) that you wish to include in the ensemble, and then you choose whether to aggregate these forecasts using the median or the weighted average. Prior to building any forecasts, the model checks for seasonality and if seasonality is detected, it performs an STL decomposition (using either additive or multiplicative decomposition, as specified by the user). Each of the forecasting models specified to the ensemble model are only applied to the the de-seasonalized components, and after these forecasts are aggregated the result is reseasonalized. When we initialize `KatsEnsemble`, we include a dictionary with the following components: * **models:** `EnsembleParams`, contains a list of parameters for each of the individual model parameters * **aggregation:** 'str', either 'median' or 'weightedavg', how to aggregate the individual forecasts to build an ensemble * **seasonality_length:** int, the length of the seasonality of the time series * **decomposition_method** str, either 'multiplicative' or 'additive', the type of decomposition of the initial time series In the example below, we use the `air_passengers` data set to build a median ensemble forecast that combines 6 different forecasting models. We use the `EnsembleParams` object to define the parameters for each of these models. Then generating a forecast for this ensemble is straightforward. ``` from kats.models.ensemble.ensemble import EnsembleParams, BaseModelParams from kats.models.ensemble.kats_ensemble import KatsEnsemble from kats.models import ( arima, holtwinters, linear_model, prophet, # requires fbprophet be installed quadratic_model, sarima, theta, ) # we need define params for each individual forecasting model in `EnsembleParams` class # here we include 6 different models model_params = EnsembleParams( [ BaseModelParams("arima", arima.ARIMAParams(p=1, d=1, q=1)), BaseModelParams( "sarima", sarima.SARIMAParams( p=2, d=1, q=1, trend="ct", seasonal_order=(1, 0, 1, 12), enforce_invertibility=False, enforce_stationarity=False, ), ), BaseModelParams("prophet", prophet.ProphetParams()), # requires fbprophet be installed BaseModelParams("linear", linear_model.LinearModelParams()), BaseModelParams("quadratic", quadratic_model.QuadraticModelParams()), BaseModelParams("theta", theta.ThetaParams(m=12)), ] ) # create `KatsEnsembleParam` with detailed configurations KatsEnsembleParam = { "models": model_params, "aggregation": "median", "seasonality_length": 12, "decomposition_method": "multiplicative", } # create `KatsEnsemble` model m = KatsEnsemble( data=air_passengers_ts, params=KatsEnsembleParam ) # fit and predict m.fit() # predict for the next 30 steps fcst = m.predict(steps=30) # aggregate individual model results m.aggregate() # plot to visualize m.plot() ``` # 3. Multivariate Model Forecasting Vector autoregression (VAR) is a multivariable forecasting algorithm that is supported in Kats. Here, we show show an example of how to use the `VARModel` with the `multi_ts` data set. We begin by loading the data set into a `TimeSeriesData` and previewing it. ``` try: # If running on Jupyter multi_df = pd.read_csv("../kats/data/multi_ts.csv") except FileNotFoundError: # If running on colab multi_df = pd.read_csv("multi_ts.csv") multi_ts = TimeSeriesData(multi_df) multi_df.groupby('time').sum()[['V1', 'V2']].plot(figsize=(10, 6)) plt.show() ``` Now it is straightforward to build this forecast using `VARModel` and plot the results as follows. ``` # Use VAR model to forecast this multivariate time series from kats.models.var import VARModel, VARParams params = VARParams() m = VARModel(multi_ts, params) m.fit() fcst = m.predict(steps=90) m.plot() plt.show() ``` # 4. Hyperparameter tuning To identify which hyperparameters to use for a specfied forecasting model, we have classes in Kats that allow you to efficiently identify the best hyperparameters. Here we will provide an exmaple of how to do hyperparameter tuning for an ARIMA model that using the `air_passengers` data set. NOTE: This example requires ax-platform be installed. For example, `pip install ax-platform` or `pip install kats[all]`. ``` import kats.utils.time_series_parameter_tuning as tpt from kats.consts import ModelEnum, SearchMethodEnum, TimeSeriesData from kats.models.arima import ARIMAParams, ARIMAModel from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType from ax.models.random.sobol import SobolGenerator from ax.models.random.uniform import UniformGenerator warnings.simplefilter(action='ignore') ``` The method we use to hyperparameter-tuning is a static method called `create_search_method`. To call this method, we need to specify the type of search we are doing and the search space for the parameters. We specify the search space for the parameters by defining a dictionary for each parameter and combining these dictionaries into a list. Here we are specifying that we want to look at all ARIMA(p,d,q) models where the values p, d, and q are either 1 or 2. ``` parameters_grid_search = [ { "name": "p", "type": "choice", "values": list(range(1, 3)), "value_type": "int", "is_ordered": True, }, { "name": "d", "type": "choice", "values": list(range(1, 3)), "value_type": "int", "is_ordered": True, }, { "name": "q", "type": "choice", "values": list(range(1, 3)), "value_type": "int", "is_ordered": True, }, ] ``` Now, we are going to create a grid search with these parameters. The full list of arguments of the `create_search_method` are as follows: * **Parameters:** List[Dict], this is a list of dictionaries, where each dictionary gives the search space for a parameter * **selected_search_method:** SearchMethodEnum, the type of search method used to do parameter tuning * **objective_name:** str, the nume of the objective function used for the search (this is arbitrary) ``` parameter_tuner_grid = tpt.SearchMethodFactory.create_search_method( objective_name="evaluation_metric", parameters=parameters_grid_search, selected_search_method=SearchMethodEnum.GRID_SEARCH, ) ``` Now that we have defined our search grid, we need to define the metric we are calculating at each point on the grid. Given a set of parameters p, q, and d, we define our evaluation function to be mean absolute error (MAE) of the forecast for the test data set (using an 80/20 training-test split) using these respective parameters. ``` # Divide into an 80/20 training-test split split = int(0.8*len(air_passengers_df)) train_ts = air_passengers_ts[0:split] test_ts = air_passengers_ts[split:] # Fit an ARIMA model and calculate the MAE for the test data def evaluation_function(params): arima_params = ARIMAParams( p = params['p'], d = params['d'], q = params['q'] ) model = ARIMAModel(train_ts, arima_params) model.fit() model_pred = model.predict(steps=len(test_ts)) error = np.mean(np.abs(model_pred['fcst'].values - test_ts.value.values)) return error ``` Now that we have our grid and our evaluation functions defined, we can display our evaluation metric for each point on the grid using the following function calls. ``` parameter_tuner_grid.generate_evaluate_new_parameter_values( evaluation_function=evaluation_function ) # Retrieve parameter tuning results parameter_tuning_results_grid = ( parameter_tuner_grid.list_parameter_value_scores() ) parameter_tuning_results_grid ``` From the calculations in the table above, we can conclude that ARIMA(2,1,1) has the minimal error of 52.02. # 5. Backtesting Kats provides a backtesting module that makes it easy to to compare an evaluate different forecasting models. While our hyperparameter tuning module allows you to compare different sets of parameters for a single base forecasting model, backtesting allows you to compare different types of base models (with pre-specified parameters). Our backtesting module allows you to look at multiple error metrics in a single function call. Here are the error metrics that are currently supported: * Mean Absolute Error (MAE) * Mean Absolute Percentage Error (MAPE) * Symmetric Mean Absolute Percentage Error (SMAPE) * Mean Squared Error (MSE) * Mean Absolute Scaled Error (MASE) * Root Mean Squared Error (RMSE) Our example below shows how you can use the `BackTesterSimple` class to compare errors between an ARIMA model and a Prophet model using the `air_passengers` data set. ``` from kats.utils.backtesters import BackTesterSimple from kats.models.arima import ARIMAModel, ARIMAParams backtester_errors = {} ``` Here, we define a backtester to look at each of the supported error metrics for an ARIMA(2,1,1) model. We specify in the `BackTesterSimple` definition that we are using a 75/25 training-test split to train and evaluate the metrics for this model ``` params = ARIMAParams(p=2, d=1, q=1) ALL_ERRORS = ['mape', 'smape', 'mae', 'mase', 'mse', 'rmse'] backtester_arima = BackTesterSimple( error_methods=ALL_ERRORS, data=air_passengers_ts, params=params, train_percentage=75, test_percentage=25, model_class=ARIMAModel) backtester_arima.run_backtest() ``` After we run the backtester, the `errors` attribute will be a dictionary mapping each error type name to a its corresponding value ``` backtester_errors['arima'] = {} for error, value in backtester_arima.errors.items(): backtester_errors['arima'][error] = value ``` Now we run another backteseter to caluculate the same error metrics for a Prophet model. ``` params_prophet = ProphetParams(seasonality_mode='multiplicative') # additive mode gives worse results backtester_prophet = BackTesterSimple( error_methods=ALL_ERRORS, data=air_passengers_ts, params=params_prophet, train_percentage=75, test_percentage=25, model_class=ProphetModel) backtester_prophet.run_backtest() backtester_errors['prophet'] = {} for error, value in backtester_prophet.errors.items(): backtester_errors['prophet'][error] = value ``` Here we can compare the error metrics for the two models. ``` pd.DataFrame.from_dict(backtester_errors) ```
github_jupyter
Script delete Cassandra en cluster multidomain ``` !pip install mysql-connector==2.1.7 !pip install pandas !pip install sqlalchemy #requiere instalación adicional, consultar https://github.com/PyMySQL/mysqlclient !pip install mysqlclient !pip install numpy !pip install pymysql import pandas as pd import numpy as np import os import json import random from cassandra.cluster import Cluster from cassandra.auth import PlainTextAuthProvider import time from pprint import pprint import psutil import uuid from cassandra.query import tuple_factory from cassandra.query import dict_factory from cassandra.query import BatchStatement, SimpleStatement from cassandra.policies import RetryPolicy #Los resultados de medidas de tiempo en carga por dominios se almacenan en estos objetos. #Se itera durante 100 iteraciones para sacar medias #repeticiones repeats = 100 #Ficheros de salida resultados_etl_delete = '../Results/Cassandra/CassandraDelete_test_{}.csv' def save_results_to_csv(results,file): #Guardamos los resultados en csv from datetime import datetime csv_df = pd.DataFrame(results, columns=['Registros', 'Tiempo', 'CPU','Memoria']) dia = datetime.now().strftime("%d%m%Y_%H_%M_%S") print(file.format(str(dia))) csv_df.to_csv(file.format(str(dia))) from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT from cassandra.policies import WhiteListRoundRobinPolicy, DowngradingConsistencyRetryPolicy from cassandra.query import tuple_factory from cassandra import ConsistencyLevel profile = ExecutionProfile( load_balancing_policy=WhiteListRoundRobinPolicy(['127.0.0.1']), retry_policy=DowngradingConsistencyRetryPolicy(), consistency_level=ConsistencyLevel.ALL, serial_consistency_level=ConsistencyLevel.LOCAL_SERIAL, request_timeout=3600, row_factory=tuple_factory ) cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: profile}) session = cluster.connect() print(session.execute("SELECT release_version FROM system.local").one()) session.execute('USE currentaccountkeyspace') partyid_list = [] partyid_list_deleted = [] result = session.execute("SELECT partyid FROM customerprofilekeyspace.customerprofile LIMIT 10000;") for partyid in result: partyid_list.append(partyid[0]) print(partyid_list[10:15]) ``` # Select test multidomain ``` from cassandra import ConsistencyLevel from cassandra.query import SimpleStatement from cassandra import concurrent from statistics import mean registers = [] batch = BatchStatement(consistency_level=ConsistencyLevel.ALL) account_id_list = [] #Cargas Masiva con Many def deletePartyOnCascade(): SELECT_CURRENT_ACCOUNT_STMT = "SELECT accountid from currentaccountkeyspace.currentaccount WHERE partyid = '{}';" DELETE_CURRENT_ACCOUNT_STMT = "DELETE FROM currentaccountkeyspace.currentaccount WHERE accountid = '{}';" DELETE_CUSTOMER_PROFILE_STMT = "DELETE FROM customerprofilekeyspace.customerprofile WHERE partyid = '{}';" DELETE_CUSTOMER_PROFILE_ADDRESS_STMT = "DELETE FROM customerprofilekeyspace.customerprofile_address WHERE partyid = '{}';" DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT = "DELETE FROM currentaccountkeyspace.currentaccountbyschemename WHERE accountid = '{}';" DELETE_POSITIONKEEPING_SMT = "DELETE FROM positionkeepingkeyspace.positionkeeping WHERE accountid = '{}';" session = cluster.connect('customerprofilekeyspace') iter = 0; i = 1 for i in range(0,repeats): time_inicial = time.time() accounts_to_delete = [] partyid = random.choice(partyid_list) print(SELECT_CURRENT_ACCOUNT_STMT.format(partyid)) result_ca = session.execute(SELECT_CURRENT_ACCOUNT_STMT.format(partyid)) for accountid in result_ca: accounts_to_delete.append(accountid[0]) #print("accounts_to_delete:", accountid[0]) #Borrado customerprofile print(DELETE_CUSTOMER_PROFILE_STMT.format(partyid)) batch.add(DELETE_CUSTOMER_PROFILE_STMT.format(partyid)) #Borrado customerprofileaddress print(DELETE_CUSTOMER_PROFILE_ADDRESS_STMT.format(partyid)) batch.add(DELETE_CUSTOMER_PROFILE_ADDRESS_STMT.format(partyid)) #Borrado account info for accountid in accounts_to_delete: #Borrado account print(DELETE_CURRENT_ACCOUNT_STMT.format(accountid)) batch.add(DELETE_CURRENT_ACCOUNT_STMT.format(accountid)) print(DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT.format(accountid)) batch.add(DELETE_CURRENT_BY_SCHEME_ACCOUNT_STMT.format(accountid)) print(DELETE_POSITIONKEEPING_SMT.format(accountid)) batch.add(DELETE_POSITIONKEEPING_SMT.format(accountid)) #Borrado en cascada session.execute(batch) batch.clear() partyid_list.remove(partyid) time_final = time.time() data_time_collection = round(time_final - time_inicial,3) used_cpu = psutil.cpu_percent() mem_used = psutil.virtual_memory().percent registers.append((iter,data_time_collection,used_cpu,mem_used)) print((iter,data_time_collection,used_cpu,mem_used)) iter += 1; time_inicial = time.time() i = i + 1 return registers registers = deletePartyOnCascade() #Guardamos los resultados Customer Profile save_results_to_csv(registers,resultados_etl_delete) cluster.shutdown() print('Conexion cerrada') ```
github_jupyter
``` import re with open('Day17 input.txt') as f: lines = f.readlines() lines = [x.strip() for x in lines] line = lines[0][13:] print(line) x_range = re.search('(?<=x=).*(?=, )',line)[0] x_range = [int(re.search('.*(?=\.\.)',x_range)[0]), int(re.search('(?<=\.\.).*',x_range)[0])] print(x_range) y_range = re.search('(?<=, y=).*',line)[0] y_range = [int(re.search('.*(?=\.\.)',y_range)[0]), int(re.search('(?<=\.\.).*',y_range)[0])] print(y_range) #pos is (x,y), vel is (x_vel,y_vel) # outputs new step,new vel def step(x,y,x_vel,y_vel): new_x_pos = x + x_vel new_y_pos = y + y_vel y_vel -= 1 if x_vel > 0: x_vel -= 1 elif x_vel < 0: x_vel += 1 else: pass return new_x_pos,new_y_pos,x_vel,y_vel #returns 0 if winning shot, -1 if under, 1 if over, 2 if goes through def try_shot(x,y,x_vel,y_vel): initial_y_vel = y_vel max_height = 0 while x_vel != 0 or y >= y_range[0]: #step x,y,x_vel,y_vel = step(x,y,x_vel,y_vel) if y > max_height: max_height = y #print(x,y,x_vel,y_vel) #check for win if (x_range[0] <= x <= x_range[1]) and (y_range[0] <= y <= y_range[1]): print('y velocity ',initial_y_vel, ' has a max height of: ',max_height) return 0 #every shot eventually gets to x_vel==0, but mercykill #if y_vel < 0 and y < y_range[0]: # return -1 #now x_vel == 0 and y_pos is under the box if x < x_range[0]: return -1 elif x > x_range[1]: return 1 else: return 2 #Part 1 max_height = 0 winning_y_vels = [] x,y=0,0 start_x_vel = 0 for start_y_vel in range(-5,400): #for each value of y_vel x_vel,y_vel = start_x_vel,start_y_vel success = False max_height = 0 attempted_x_vels = [] while 1: attempted_x_vels.append(x_vel) x_result = try_shot(x,y,x_vel,y_vel) if x_result == 0: #print(start_y_vel,': ',x_vel,y_vel,'successfully hits') success = True break #no situation where it doesn't go through for starting y_vel for any x elif x_result == 2: print(start_y_vel,x_vel,': ',y_vel,'goes through') break else: x_vel += -x_result if x_vel in attempted_x_vels: #print(start_y_vel,': ',y_vel,'no solution') break if success: #print(start_y_vel,'successfully hits') winning_y_vels.append(start_y_vel) else: #if len(winning_y_vels)>0: # break #print(start_y_vel, 'doesn\'t hit') pass print(max(winning_y_vels)) #returns 0 if winning shot, -1 if under, 1 if over, 2 if goes through def try_shot_test(x,y,x_vel,y_vel): while x_vel != 0 or y >= y_range[0]: #step x,y,x_vel,y_vel = step(x,y,x_vel,y_vel) print(x,y,x_vel,y_vel) #check for win if (x_range[0] <= x <= x_range[1]) and (y_range[0] <= y <= y_range[1]): return 0 #every shot eventually gets to x_vel==0, but mercykill #if y_vel < 0 and y < y_range[0]: # return -1 #now x_vel == 0 and y_pos is under the box if x < x_range[0]: return -1 elif x > x_range[1]: return 1 else: return 2 # sample testing x_range = [20, 30] y_range = [-10, -5] try_shot_test(0,0,6,10) sample = '''target area: x=20..30, y=-10..-5''' my = '''target area: x=241..273, y=-97..-63''' def parse_data(data): #target area: x=241..273, y=-97..-63 coords = { "x": [], "y": []} data = data.split() for line in (line for line in data if "=" in line): for coord in (coord for coord in line.strip(",")[2:].split('..')): coords[line[0]].append(int(coord)) return coords def launch_probe(velocity,target): p_x,p_y = [0,0] v_x,v_y = velocity t_x = sorted(target["x"]) t_y = sorted(target["y"]) max_y = p_y while (p_x < max(t_x)+1 and not (v_x == 0 and p_x < min(t_x))) and not (p_x > min(t_x) and p_y < min(t_y)): p_x += v_x p_y += v_y if v_x > 0: v_x -= 1 elif v_x < 0: v_x += 1 v_y -= 1 if p_y > max_y: max_y = p_y if (p_x in range(min(t_x),max(t_x)+1)) and (p_y in range(min(t_y),max(t_y)+1)): return True,velocity,max_y return False,velocity,max_y def main(): target = parse_data(sample) max_y = 0 optimal = [] count = 0 for x in range(1,max(target["x"])*2): for y in range(min(target["y"]),max(target["x"])): r,velocity,this_max_y = launch_probe([x,y],target) if r == True: count += 1 if this_max_y > max_y: max_y = this_max_y optimal = velocity print(optimal) print(max_y) print(count) main() ```
github_jupyter
``` %matplotlib inline from mpl_toolkits.mplot3d import Axes3D import scipy.io as io import numpy as np import matplotlib.pyplot as plt from math import ceil from scipy.optimize import curve_fit realization = 1000 import seaborn as sns from matplotlib import cm from array_response import * import itertools mat = io.loadmat('boundary.mat') bound1_para = mat['bound1_para'][0,:] bound2_para = mat['bound2_para'][0,:] bound3_para = mat['bound3_para'][0,:] bound4_1para = mat['bound4_1para'][0,:] bound4_2para = mat['bound4_2para'][0,:] bound4_3para = mat['bound4_3para'][0,:] xlim_4_1 = mat['xlim_4_1'][0,0] xlim_4_2 = mat['xlim_4_2'][0,:] xlim_4_3 = mat['xlim_4_3'][0,0] azi_rot = np.linspace(0,2*np.pi,50) def func_sin(x, c, d): return np.sin(2*np.pi*x*0.312 + c)*0.23 + d test_1 = func_sin(azi_rot, *bound1_para) test_2 = func_sin(azi_rot, *bound2_para) bound3 = np.poly1d(bound3_para) boud4_13 = np.poly1d(bound4_1para) bound4_2 = np.poly1d(bound4_2para) plt.plot(azi_rot,test_1) plt.plot(azi_rot,test_2) plt.plot(azi_rot,bound3(azi_rot)) plt.plot(azi_rot,boud4_13(azi_rot)) plt.plot(azi_rot,bound4_2(azi_rot)) plt.ylim(0,3.14) def check_cate(_azi,_ele): _index = "" if ((_ele - bound3(_azi)) > 0): if (((_azi<xlim_4_1) and ((_ele - boud4_13(_azi))<0)) or ((_azi>xlim_4_2[0]) and (_azi<xlim_4_2[1]) and ((_ele - bound4_2(_azi))<0)) or ((_azi>xlim_4_3) and ((_ele - boud4_13(_azi))<0))): _index = "samecluster" else: _index = "diffclus_samepol" else: if ((_ele - func_sin(_azi, *bound2_para)) > 0): _index = "diffclus_crosspol" else: if ((_ele - func_sin(_azi, *bound1_para)) > 0): _index = "samecluster" else: _index = "diffclus_samepol" return _index ``` ### Parameters declaration Declare parameters needed for channel realization ``` Ns = 1 # number of streams Nc = 6 # number of cluster Nray = 1 # number of rays in each cluster Nt = 64 # number of transmit antennas Nr = 16 # number of receive antennas angle_sigma = 10/180*np.pi # standard deviation of the angles in azimuth and elevation both of Rx and Tx gamma = np.sqrt((Nt*Nr)/(Nc*Nray)) realization = 1000 # equivalent to number of taking sample count = 0 eps = 0.1 # 20dB isolation sigma = np.sqrt(8/(1+eps**2))*1.37/1.14 # according to the normalization condition of H ``` ### Channel Realization Realize channel H for Dual-Polarized antenna array ``` H_pol = np.zeros((2*Nr,2*Nt,realization),dtype=complex) At = np.zeros((Nt,Nc*Nray,realization),dtype=complex) Ar = np.zeros((Nr,Nc*Nray,realization),dtype=complex) alpha_hh = np.zeros((Nc*Nray,realization),dtype=complex) alpha_hv = np.zeros((Nc*Nray,realization),dtype=complex) alpha_vh = np.zeros((Nc*Nray,realization),dtype=complex) alpha_vv = np.zeros((Nc*Nray,realization),dtype=complex) AoD = np.zeros((2,Nc*Nray),dtype=complex) AoA = np.zeros((2,Nc*Nray),dtype=complex) H = np.zeros((2*Nr,2*Nt,realization),dtype=complex) azi_rot = np.random.normal(1.7,0.3,realization) ele_rot = np.random.normal(2.3,0.3,realization) # Why PI/2 ?? # azi_rot = np.random.uniform(0,2*np.pi,realization) # ele_rot = np.random.uniform(0,np.pi,realization) # Why PI/2 ?? R = np.array([[np.cos(ele_rot)*np.cos(azi_rot),np.sin(ele_rot)],[-np.sin(ele_rot)*np.cos(azi_rot),np.cos(ele_rot)]]) # rotation matrix for reali in range(realization): for c in range(1,Nc+1): AoD_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Departure _ azimuth AoD_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Departure _ elevation AoA_azi_m = np.random.uniform(0,2*np.pi,1) # Mean Angle of Arrival_ azimuth AoA_ele_m = np.random.uniform(0,np.pi,1) # Mean Angle of Arrival_ elevation AoD[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_azi_m, angle_sigma, (1,Nray)) AoD[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoD_ele_m, angle_sigma, (1,Nray)) AoA[0,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_azi_m, angle_sigma, (1,Nray)) AoA[1,(c-1)*Nray:Nray*c] = np.random.laplace(AoA_ele_m, angle_sigma, (1,Nray)) for j in range(Nc*Nray): At[:,j,reali] = array_response(AoD[0,j],AoD[1,j],Nt)/np.sqrt(2) # UPA array response Ar[:,j,reali] = array_response(AoA[0,j],AoA[1,j],Nr)/np.sqrt(2) var_hh = ((sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[0,j])**2)).real var_hv = ((eps**2)*(sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[0,j])**2)).real var_vh = ((eps**2)*(sigma**2)*(np.cos(AoD[0,j])**2)*(np.cos(AoA[1,j])**2)).real var_vv = ((sigma**2)*(np.cos(AoD[1,j])**2)*(np.cos(AoA[1,j])**2)).real alpha_hh[j,reali] = np.random.normal(0, np.sqrt(var_hh/2)) + 1j*np.random.normal(0, np.sqrt(var_hh/2)) alpha_hv[j,reali] = np.random.normal(0, np.sqrt(var_hv/2)) + 1j*np.random.normal(0, np.sqrt(var_hv/2)) alpha_vh[j,reali] = np.random.normal(0, np.sqrt(var_vh/2)) + 1j*np.random.normal(0, np.sqrt(var_vh/2)) alpha_vv[j,reali] = np.random.normal(0, np.sqrt(var_vv/2)) + 1j*np.random.normal(0, np.sqrt(var_vv/2)) alpha = np.vstack((np.hstack((alpha_hh[j,reali],alpha_hv[j,reali])),np.hstack((alpha_vh[j,reali],alpha_vv[j,reali])))) H_pol[:,:,reali] = H_pol[:,:,reali] + np.kron(alpha,Ar[:,[j],reali]@At[:,[j],reali].conj().T) H_pol[:,:,reali] = 2*gamma* H_pol[:,:,reali] H[:,:,reali] = (np.kron(R[:,:,reali],np.eye(Nr)))@H_pol[:,:,reali] H[:,:,reali] = np.sqrt(4/3)* H[:,:,reali] ``` ### Check normalized condition ``` channel_fro_2 = np.zeros(realization) for reali in range(realization): channel_fro_2[reali] = np.linalg.norm(H[:,:,reali],'fro') print("4*Nt*Nr =", 4*Nt*Nr , " Frobenius norm =", np.mean(channel_fro_2**2)) cluster = np.arange(Nc) print(cluster) c = list(itertools.combinations(cluster, 2)) num_path = (2*Nc-1)*Nc path_combi = np.zeros((num_path,4),dtype=int) print(path_combi.shape) path_combi[0:Nc,:]=np.arange(Nc).reshape(Nc,1).repeat(4,axis=1) count = 0 for i in range(int(Nc*(Nc-1)/2)): path_combi[Nc+4*i,:] = np.array([c[count][0],c[count][0],c[count][1],c[count][1]]) path_combi[Nc+4*i+1,:] = np.array([c[count][1],c[count][1],c[count][0],c[count][0]]) path_combi[Nc+4*i+2,:] = np.array([c[count][0],c[count][1],c[count][1],c[count][0]]) path_combi[Nc+4*i+3,:] = np.array([c[count][1],c[count][0],c[count][0],c[count][1]]) count = count+1 cross_index = [] samepolar_index = [] count = Nc-1 while (count<num_path-4): cross_index.extend([count+3,count+4]) samepolar_index.extend([count+1,count+2]) count = count + 4 cross_index = np.array(cross_index) samepolar_index = np.array(samepolar_index) sameclus_index = np.arange(0,Nc) print(cross_index) print(samepolar_index) print(sameclus_index) # print(path_combi) path_gain = np.zeros((num_path,realization)) # 2 to save the position and maximum value for reali in range(realization): for combi in range(num_path): path_gain[combi,reali] =\ (np.abs\ ((np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.sin(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,1])+\ (np.cos(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.sin(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,1])+\ (-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hh[path_combi[combi,0],reali]+np.cos(ele_rot[reali])*alpha_vh[path_combi[combi,0],reali])*(path_combi[combi,0]==path_combi[combi,3])+\ (-np.sin(ele_rot[reali])*np.cos(azi_rot[reali])*alpha_hv[path_combi[combi,2],reali]+np.cos(ele_rot[reali])*alpha_vv[path_combi[combi,2],reali])*(path_combi[combi,2]==path_combi[combi,3]) ))**2 print(np.max(path_gain[0:Nc,2])) print(path_gain[0:Nc,2]) print(path_gain[samepolar_index,2]) print(np.max(path_gain[samepolar_index,2])) ``` __Check maximum gain from combination of path in each realization__ ``` index = np.zeros(realization,dtype=int) for reali in range(realization): index[reali] = np.argmax(path_gain[:,reali]) ``` __Same Cluster__ ``` index_sameclus = np.zeros(realization,dtype=int) for reali in range(realization): index_sameclus[reali] = np.argmax(path_gain[0:Nc,reali]) gain_sameclus = np.zeros(realization,dtype=float) for reali in range(realization): gain_sameclus[reali] = path_gain[index_sameclus[reali],reali] ``` __Chosen Category before check__ ``` choosen_cate = ["" for x in range(realization)] index_checkcate = np.zeros(realization,dtype=int) cate = "" temp = 0 for reali in range(realization): cate = check_cate(azi_rot[reali],ele_rot[reali]) if (cate == "samecluster"): index_checkcate[reali] = np.argmax(path_gain[0:Nc,reali]) if (cate == "diffclus_samepol"): temp = np.argmax(path_gain[samepolar_index,reali]) index_checkcate[reali] = int(temp+(np.floor(temp/2))*2+Nc) # index_checkcate[reali] = np.argmax(path_gain[samepolar_index,reali]) if (cate == "diffclus_crosspol"): # index_checkcate[reali] = np.argmax(path_gain[cross_index,reali]) temp = np.argmax(path_gain[cross_index,reali]) index_checkcate[reali] = int(temp+(np.floor(temp/2)+1)*2+Nc) choosen_cate[reali] = cate temp = 0 ``` ### Plot Spectral Efficiency ``` SNR_dB = np.arange(-35,10,5) SNR = 10**(SNR_dB/10) smax = SNR.shape[0] R_cross = np.zeros([smax, realization],dtype=complex) # R_steer = np.zeros([smax, realization],dtype=complex) R_samecl = np.zeros([smax, realization],dtype=complex) R_checkcate = np.zeros([smax, realization],dtype=complex) for reali in range(realization): _chosen_combi_path = path_combi[index[reali]] _chosen_checkcate_path = path_combi[index_checkcate[reali]] # _chosen_checkcate_path = path_combi[:,reali] _chosen_sameclus_path = path_combi[index_sameclus[reali]] W_cross = np.vstack((Ar[:,[_chosen_combi_path[1]],reali],Ar[:,[_chosen_combi_path[3]],reali])) F_cross = np.vstack((At[:,[_chosen_combi_path[0]],reali],At[:,[_chosen_combi_path[2]],reali])) W_checkcate = np.vstack((Ar[:,[_chosen_checkcate_path[1]],reali],Ar[:,[_chosen_checkcate_path[3]],reali])) F_checkcate = np.vstack((At[:,[_chosen_checkcate_path[0]],reali],At[:,[_chosen_checkcate_path[2]],reali])) # W_steer = np.vstack((Ar[:,[_chosen_steer_path[0]],reali],Ar[:,[_chosen_steer_path[1]],reali])) # F_steer = np.vstack((At[:,[_chosen_steer_path[0]],reali],At[:,[_chosen_steer_path[1]],reali])) W_samecl = np.vstack((Ar[:,[_chosen_sameclus_path[1]],reali],Ar[:,[_chosen_sameclus_path[3]],reali])) F_samecl = np.vstack((At[:,[_chosen_sameclus_path[0]],reali],At[:,[_chosen_sameclus_path[2]],reali])) for s in range(smax): R_cross[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_cross)@H[:,:,reali]@F_cross@F_cross.conj().T@H[:,:,reali].conj().T@W_cross)) R_checkcate[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_checkcate)@H[:,:,reali]@F_checkcate@F_checkcate.conj().T@H[:,:,reali].conj().T@W_checkcate)) R_samecl[s,reali] = np.log2(np.linalg.det(np.eye(Ns)+(SNR[s]/Ns)*np.linalg.pinv(W_samecl)@H[:,:,reali]@F_samecl@F_samecl.conj().T@H[:,:,reali].conj().T@W_samecl)) x = np.linalg.norm(F_cross,'fro') print("Ns", Ns , " Frobenius norm FRF*FBB=", x**2) plt.plot(SNR_dB, (np.sum(R_cross,axis=1).real)/realization, label='joint polarization beam steering') plt.plot(SNR_dB, (np.sum(R_checkcate,axis=1).real)/realization, label='one category beam steering') plt.plot(SNR_dB, (np.sum(R_samecl,axis=1).real)/realization, label='same ray beam steering') plt.legend(loc='upper left',prop={'size': 9}) plt.xlabel('SNR(dB)',fontsize=11) plt.ylabel('Spectral Efficiency (bits/s/Hz)',fontsize=11) plt.tick_params(axis='both', which='major', labelsize=9) plt.ylim(0,11) plt.grid() plt.show() ```
github_jupyter
<h1 align="center">Introduction to SimpleITKv4 Registration</h1> <table width="100%"> <tr style="background-color: red;"><td><font color="white">SimpleITK conventions:</font></td></tr> <tr><td> <ul> <li>Dimensionality and pixel type of registered images is required to be the same (2D/2D or 3D/3D).</li> <li>Supported pixel types are sitkFloat32 and sitkFloat64 (use the SimpleITK <a href="http://www.itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#af8c9d7cc96a299a05890e9c3db911885">Cast()</a> function if your image's pixel type is something else). </ul> </td></tr> </table> ## Registration Components <img src="ITKv4RegistrationComponentsDiagram.svg" style="width:700px"/><br><br> There are many options for creating an instance of the registration framework, all of which are configured in SimpleITK via methods of the <a href="http://www.itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ImageRegistrationMethod.html">ImageRegistrationMethod</a> class. This class encapsulates many of the components available in ITK for constructing a registration instance. Currently, the available choices from the following groups of ITK components are: ### Optimizers The SimpleITK registration framework supports several optimizer types via the SetOptimizerAsX() methods, these include: <ul> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1ExhaustiveOptimizerv4.html">Exhaustive</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1AmoebaOptimizerv4.html">Nelder-Mead downhill simplex</a>, a.k.a. Amoeba. </li> <li> <a href="https://itk.org/Doxygen/html/classitk_1_1PowellOptimizerv4.html">Powell optimizer</a>. </li> <li> <a href="https://itk.org/Doxygen/html/classitk_1_1OnePlusOneEvolutionaryOptimizerv4.html">1+1 evolutionary optimizer</a>. </li> <li> Variations on gradient descent: <ul> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1GradientDescentOptimizerv4Template.html">GradientDescent</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1GradientDescentLineSearchOptimizerv4Template.html">GradientDescentLineSearch</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1RegularStepGradientDescentOptimizerv4.html">RegularStepGradientDescent</a> </li> </ul> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1ConjugateGradientLineSearchOptimizerv4Template.html">ConjugateGradientLineSearch</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1LBFGSBOptimizerv4.html">L-BFGS-B</a> (Limited memory Broyden, Fletcher,Goldfarb,Shannon-Bound Constrained) - supports the use of simple constraints ($l\leq x \leq u$) </li> </ul> ### Similarity metrics The SimpleITK registration framework supports several metric types via the SetMetricAsX() methods, these include: <ul> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1MeanSquaresImageToImageMetricv4.html">MeanSquares</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1DemonsImageToImageMetricv4.html">Demons</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1CorrelationImageToImageMetricv4.html">Correlation</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1ANTSNeighborhoodCorrelationImageToImageMetricv4.html">ANTSNeighborhoodCorrelation</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1JointHistogramMutualInformationImageToImageMetricv4.html">JointHistogramMutualInformation</a> </li> <li> <a href="http://www.itk.org/Doxygen/html/classitk_1_1MattesMutualInformationImageToImageMetricv4.html">MattesMutualInformation</a> </li> </ul> ### Interpolators The SimpleITK registration framework supports several interpolators via the SetInterpolator() method, which receives one of the <a href="http://www.itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#a7cb1ef8bd02c669c02ea2f9f5aa374e5">following enumerations</a>: <ul> <li> sitkNearestNeighbor </li> <li> sitkLinear </li> <li> sitkBSpline </li> <li> sitkGaussian </li> <li> sitkHammingWindowedSinc </li> <li> sitkCosineWindowedSinc </li> <li> sitkWelchWindowedSinc </li> <li> sitkLanczosWindowedSinc </li> <li> sitkBlackmanWindowedSinc </li> </ul> ## Data - Retrospective Image Registration Evaluation We will be using part of the training data from the Retrospective Image Registration Evaluation (<a href="http://www.insight-journal.org/rire/">RIRE</a>) project. ``` import SimpleITK as sitk # Utility method that either downloads data from the MIDAS repository or # if already downloaded returns the file name for reading from disk (cached data). %run update_path_to_download_script from downloaddata import fetch_data as fdata # Always write output to a separate directory, we don't want to pollute the source directory. import os OUTPUT_DIR = 'Output' ``` ## Utility functions A number of utility callback functions for image display and for plotting the similarity metric during registration. ``` import matplotlib.pyplot as plt %matplotlib inline from ipywidgets import interact, fixed from IPython.display import clear_output # Callback invoked by the interact IPython method for scrolling through the image stacks of # the two images (moving and fixed). def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa): # Create a figure with two subplots and the specified size. plt.subplots(1,2,figsize=(10,8)) # Draw the fixed image in the first subplot. plt.subplot(1,2,1) plt.imshow(fixed_npa[fixed_image_z,:,:],cmap=plt.cm.Greys_r); plt.title('fixed image') plt.axis('off') # Draw the moving image in the second subplot. plt.subplot(1,2,2) plt.imshow(moving_npa[moving_image_z,:,:],cmap=plt.cm.Greys_r); plt.title('moving image') plt.axis('off') plt.show() # Callback invoked by the IPython interact method for scrolling and modifying the alpha blending # of an image stack of two images that occupy the same physical space. def display_images_with_alpha(image_z, alpha, fixed, moving): img = (1.0 - alpha)*fixed[:,:,image_z] + alpha*moving[:,:,image_z] plt.imshow(sitk.GetArrayViewFromImage(img),cmap=plt.cm.Greys_r); plt.axis('off') plt.show() # Callback invoked when the StartEvent happens, sets up our new data. def start_plot(): global metric_values, multires_iterations metric_values = [] multires_iterations = [] # Callback invoked when the EndEvent happens, do cleanup of data and figure. def end_plot(): global metric_values, multires_iterations del metric_values del multires_iterations # Close figure, we don't want to get a duplicate of the plot latter on. plt.close() # Callback invoked when the IterationEvent happens, update our data and display new figure. def plot_values(registration_method): global metric_values, multires_iterations metric_values.append(registration_method.GetMetricValue()) # Clear the output area (wait=True, to reduce flickering), and plot current data clear_output(wait=True) # Plot the similarity metric values plt.plot(metric_values, 'r') plt.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*') plt.xlabel('Iteration Number',fontsize=12) plt.ylabel('Metric Value',fontsize=12) plt.show() # Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the # metric_values list. def update_multires_iterations(): global metric_values, multires_iterations multires_iterations.append(len(metric_values)) ``` ## Read images We first read the images, casting the pixel type to that required for registration (Float32 or Float64) and look at them. ``` fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32) moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32) interact(display_images, fixed_image_z=(0,fixed_image.GetSize()[2]-1), moving_image_z=(0,moving_image.GetSize()[2]-1), fixed_npa = fixed(sitk.GetArrayViewFromImage(fixed_image)), moving_npa=fixed(sitk.GetArrayViewFromImage(moving_image))); ``` ## Initial Alignment Use the CenteredTransformInitializer to align the centers of the two volumes and set the center of rotation to the center of the fixed image. ``` initial_transform = sitk.CenteredTransformInitializer(fixed_image, moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY) moving_resampled = sitk.Resample(moving_image, fixed_image, initial_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID()) interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled)); ``` ## Registration The specific registration task at hand estimates a 3D rigid transformation between images of different modalities. There are multiple components from each group (optimizers, similarity metrics, interpolators) that are appropriate for the task. Note that each component selection requires setting some parameter values. We have made the following choices: <ul> <li>Similarity metric, mutual information (Mattes MI): <ul> <li>Number of histogram bins, 50.</li> <li>Sampling strategy, random.</li> <li>Sampling percentage, 1%.</li> </ul> </li> <li>Interpolator, sitkLinear.</li> <li>Optimizer, gradient descent: <ul> <li>Learning rate, step size along traversal direction in parameter space, 1.0 .</li> <li>Number of iterations, maximal number of iterations, 100.</li> <li>Convergence minimum value, value used for convergence checking in conjunction with the energy profile of the similarity metric that is estimated in the given window size, 1e-6.</li> <li>Convergence window size, number of values of the similarity metric which are used to estimate the energy profile of the similarity metric, 10.</li> </ul> </li> </ul> Perform registration using the settings given above, and take advantage of the built in multi-resolution framework, use a three tier pyramid. In this example we plot the similarity metric's value during registration. Note that the change of scales in the multi-resolution framework is readily visible. ``` registration_method = sitk.ImageRegistrationMethod() # Similarity metric settings. registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) # Optimizer settings. registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, convergenceMinimumValue=1e-6, convergenceWindowSize=10) registration_method.SetOptimizerScalesFromPhysicalShift() # Setup for the multi-resolution framework. registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1]) registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0]) registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() # Don't optimize in-place, we would possibly like to run this cell multiple times. registration_method.SetInitialTransform(initial_transform, inPlace=False) # Connect all of the observers so that we can perform plotting during registration. registration_method.AddCommand(sitk.sitkStartEvent, start_plot) registration_method.AddCommand(sitk.sitkEndEvent, end_plot) registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, update_multires_iterations) registration_method.AddCommand(sitk.sitkIterationEvent, lambda: plot_values(registration_method)) final_transform = registration_method.Execute(sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)) ``` ## Post registration analysis Query the registration method to see the metric value and the reason the optimization terminated. The metric value allows us to compare multiple registration runs as there is a probabilistic aspect to our registration, we are using random sampling to estimate the similarity metric. Always remember to query why the optimizer terminated. This will help you understand whether termination is too early, either due to thresholds being too tight, early termination due to small number of iterations - numberOfIterations, or too loose, early termination due to large value for minimal change in similarity measure - convergenceMinimumValue) ``` print('Final metric value: {0}'.format(registration_method.GetMetricValue())) print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription())) ``` Now visually inspect the results. ``` moving_resampled = sitk.Resample(moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelID()) interact(display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), fixed = fixed(fixed_image), moving=fixed(moving_resampled)); ``` If we are satisfied with the results, save them to file. ``` sitk.WriteImage(moving_resampled, os.path.join(OUTPUT_DIR, 'RIRE_training_001_mr_T1_resampled.mha')) sitk.WriteTransform(final_transform, os.path.join(OUTPUT_DIR, 'RIRE_training_001_CT_2_mr_T1.tfm')) ```
github_jupyter
# Colab notebook or tutorial ### [How to run PyTorch with GPU and CUDA 8.0 support on Google Colab](https://www.dlology.com/blog/how-to-run-pytorch-with-gpu-and-cuda-92-support-on-google-colab/) # New Section ``` !nvidia-smi !cat /etc/*-release ``` ## Install [Cuda 8.0 ](https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&target_distro=Ubuntu&target_version=1710&target_type=deblocal) ``` !wget https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb # cd ../ !ls !dpkg -i cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb !ls /var/ !ls /var/cuda-repo-8-0-local-ga2 | grep .pub !apt-key add /var/cuda-repo-8-0-local-ga2/7fa2af80.pub !apt-get update !apt-get install cuda-8-0 ``` ## Install [PyTorch](https://pytorch.org/) with Cuda 8.0 support ``` !nvcc --version !pip install torch==0.4.0 #!pip install torchvision !git clone https://github.com/UTS-CAS/planercnn_with_adf.git cd planercnn_with_adf !pip install -r requirements.txt !ls /usr/local/cuda-8.0 # %%writefile make.sh # #!/usr/bin/env bash # # CUDA_PATH=/usr/local/cuda/ # export CUDA_PATH=/usr/local/cuda-9.2/ # #You may also want to ad the following # #export C_INCLUDE_PATH=/opt/cuda/include # export CXXFLAGS="-std=c++11" # export CFLAGS="-std=c99" # # python setup.py build_ext --inplace # # rm -rf build # CUDA_ARCH="-gencode arch=compute_30,code=sm_30 \ # -gencode arch=compute_35,code=sm_35 \ # -gencode arch=compute_50,code=sm_50 \ # -gencode arch=compute_52,code=sm_52 \ # -gencode arch=compute_60,code=sm_60 \ # -gencode arch=compute_61,code=sm_61 " # # compile NMS # cd nms/src/cuda/ # echo "Compiling nms kernels by nvcc..." # #nvcc -c -o nms_cuda_kernel.cu.o nms_cuda_kernel.cu -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC $CUDA_ARCH # nvcc -c -o nms_kernel.cu.o nms_kernel.cu -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC $CUDA_ARCH # cd ../../ # python build.py !export CUDA_PATH=/usr/local/cuda-8.0/ !nvcc --version !ls !sudo apt-get update && \ sudo apt-get install build-essential software-properties-common -y && \ sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y && \ sudo apt-get update && \ sudo apt-get install gcc-5 g++-5 -y && \ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 --slave /usr/bin/g++ g++ /usr/bin/g++-5 && \ gcc -v cd /content/planercnn_with_adf/ !ls cd nms/src/cuda/ !gcc --version # !sudo dpkg --configure -a # run it in case below cmd fails !sudo apt-get install gcc-5 g++-5 g++-5-multilib gfortran-5 !sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-5 50 --slave /usr/bin/g++ g++ /usr/bin/g++-5 !sudo update-alternatives --config gcc !gcc --version !nvcc -c -o nms_kernel.cu.o nms_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_60 cd ../../ !python build.py cd .. cd roialign/roi_align/src/cuda/ !nvcc -c -o crop_and_resize_kernel.cu.o crop_and_resize_kernel.cu -x cu -Xcompiler -fPIC -arch=sm_60 cd ../../ !python build.py cd ../../ #!python evaluate_adf.py --methods=f --suffix=warping_refine --dataset=inference --customDataFolder=example_images ls mkdir inputs %%writefile inputs/camera.txt 587 587 320 240 640 480 # !zip -r testfile.zip test !ls cd /content/planercnn_with_adf/ !wget https://www.dropbox.com/s/yjcg6s57n581sk0/checkpoint.zip?dl=0 !mkdir checkpoint !mv "checkpoint.zip?dl=0" "planercnn_refine.zip" !mv planercnn_refine.zip checkpoint/ cd checkpoint/ !unzip planercnn_refine.zip from google.colab import drive drive.mount('/content/drive') cp /content/drive/My\ Drive/computer_vision/session14_assignment/planercnn-master/inputs/Images/* /content/planercnn_with_adf/inputs/ ## Update evaluate_adf.py - comment line 644, 645, 646 # rm /content/planercnn_with_adf/test/inference/* ls /content/planercnn_with_adf/inputs/ | wc -l from shutil import copyfile import shutil import os import glob cd /content/planercnn_with_adf !mkdir input_temp img_names_list1 = glob.glob('inputs/*') img_names_list1.remove("inputs/camera.txt") len(img_names_list1) def copy_images(src, tar, input_filename): files = glob.glob(src) # print(files) for f in files: filename = f.split("/")[-1] if('image' in filename or 'segmentation_0_final' in filename): if 'image_0' in filename: new_input_filename = input_filename.split('.')[:-1][0]+'.png' new_filename = os.path.dirname(os.path.realpath(f))+"/"+new_input_filename os.rename(f,new_filename) shutil.copy(new_filename, f"{tar}{new_input_filename}") elif 'segmentation_0_final' in filename: new_output_filename = input_filename.split('.')[:-1][0]+'_segmentation_final.png' new_filename = os.path.dirname(os.path.realpath(f))+"/"+new_output_filename os.rename(f,new_filename) shutil.copy(new_filename, f"{tar}{new_output_filename}") def clean_dir(dir): os.system(f'rm -f {dir}*') output = '/content/drive/My Drive/computer_vision/session14_assignment/planercnn-master/outputs/' for i in range(3590): if len(img_names_list1)>0: img_names_temp = img_names_list1[:1] print(f'running set - {i}') del img_names_list1[:1] os.system('rm -f input_temp/*') for item in img_names_temp: src = item filename = item.split("/")[1] dst = 'input_temp/'+filename copyfile(src, dst) # copy 100 images copyfile('inputs/camera.txt', 'input_temp/camera.txt') # copy camera.txt file os.system('rm -f test/inference/*') # Cleaning inference folder to clean before evaluation. os.system('python evaluate_adf.py --methods=f --suffix=warping_refine --dataset=inference --customDataFolder=input_temp') copy_images('test/inference/*', output, filename) # print(f'Yet to run - {len(img_names_list1)} images') ls /content/drive/My\ Drive/computer_vision/session14_assignment/planercnn-master/outputs/ | wc -l ```
github_jupyter
# Fit $k_{ij}$ and $r_c^{ABij}$ interactions parameter of Ethanol and CPME --- Let's call $\underline{\xi}$ the optimization parameters of a mixture. In order to optimize them, you need to provide experimental phase equilibria data. This can include VLE, LLE and VLLE data. The objective function used for each equilibria type are shown below: ### Vapor-Liquid Equilibria Data $$ OF_{VLE}(\underline{\xi}) = w_y \sum_{j=1}^{Np} \left[ \sum_{i=1}^c (y_{i,j}^{cal} - y_{i,j}^{exp})^2 \right] + w_P \sum_{j=1}^{Np} \left[ \frac{P_{j}^{cal} - P_{j}^{exp}}{P_{j}^{exp}} \right]^2$$ Where, $Np$ is the number of experimental data points, $y_i$ is the vapor molar fraction of the component $i$ and $P$ is the bubble pressure. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_y$ is the weight for the vapor composition error and $w_P$ is the weight for the bubble pressure error. ### Liquid-Liquid Equilibria Data $$ OF_{LLE}(\underline{\xi}) = w_x \sum_{j=1}^{Np} \sum_{i=1}^c \left[x_{i,j} - x_{i,j}^{exp}\right]^2 + w_w \sum_{j=1}^{Np} \sum_{i=1}^c \left[ w_{i,j} - w_{i,j}^{exp} \right]^2 $$ Where, $Np$ is the number of experimental data points, $x_i$ and $w_i$ are the molar fraction of the component $i$ on the liquids phases. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_x$ and $w_w$ are the weights for the liquid 1 ($x$) and liquid 2 ($w$) composition error. ### Vapor-Liquid-Liquid Equilibria Data $$ OF_{VLLE}(\underline{\xi}) = w_x \sum_{j=1}^{Np} \sum_{i=1}^c \left[x_{i,j}^{cal} - x_{i,j}^{exp}\right]^2 + w_w \sum_{j=1}^{Np} \sum_{i=1}^c \left[w_{i,j}^{cal} - w_{i,j}^{exp}\right]^2 + w_y \sum_{j=1}^{Np} \sum_{i=1}^c \left[y_{i,j}^{cal} - y_{i,j}^{exp}\right]^2 + w_P \sum_{j=1}^{Np} \left[ \frac{P_{j}^{cal}}{P_{j}^{exp}} - 1\right]^2 $$ Where, $Np$ is the number of experimental data points, $y_i$, $x_i$ and $w_i$ are the molar fraction of the component $i$ on the vapor and liquids phases, respectively. The superscripts $cal$ and $exp$ refers to the computed and experimental values, respectively. Finally, $w_x$ and $w_w$ are the weights for the liquid 1 ($x$) and liquid 2 ($w$) composition error, $w_y$ is the weight for vapor composition error and $w_P$ is weight for three phase equilibria pressure error. If there is data for more than one equilibria type, the errors can be added accordinly. So the objective funcion becomes: $$ OF(\underline{\xi}) =OF_{ELV}(\underline{\xi}) + OF_{ELL}(\underline{\xi}) + OF_{ELLV}(\underline{\xi})$$ --- This notebook has te purpose of showing how to optimize the $k_{ij}$ and $r_c^{ABij}$ for a mixture with induced association. For these mixtures the interactions parameters are shown below: $$ \epsilon_{ij} = (1-k_{ij}) \frac{\sqrt{\sigma_i^3 \sigma_j^3}}{\sigma_{ij}^3} \sqrt{\epsilon_i \epsilon_j} ;\quad\epsilon_{ij}^{AB} = \frac{\epsilon^{AB} (self-associating)}{2} ;\quad r^{ABij}_c (fitted)$$ First, it's needed to import the necessary modules ``` import numpy as np from sgtpy import component, mixture, saftvrmie from sgtpy.fit import fit_cross ``` Now that the functions are available it is necessary to create the mixture. ``` ethanol = component('ethanol2C', ms = 1.7728, sigma = 3.5592 , eps = 224.50, lambda_r = 11.319, lambda_a = 6., eAB = 3018.05, rcAB = 0.3547, rdAB = 0.4, sites = [1,0,1], cii= 5.3141080872882285e-20) cpme = component('cpme', ms = 2.32521144, sigma = 4.13606074, eps = 343.91193798, lambda_r = 14.15484877, lambda_a = 6.0, npol = 1.91990385,mupol = 1.27, sites =[0,0,1], cii = 3.5213681817448466e-19) mix = mixture(ethanol, cpme) ``` Now the experimental equilibria data is read and a tuple is created. It includes the experimental liquid composition, vapor composition, equilibrium temperature and pressure. This is done with ```datavle = (Xexp, Yexp, Texp, Pexp)``` ``` # Experimental data obtained from Mejia, Cartes, J. Chem. Eng. Data, vol. 64, no. 5, pp. 1970–1977, 2019 # Experimental temperature saturation in K Texp = np.array([355.77, 346.42, 342.82, 340.41, 338.95, 337.78, 336.95, 336.29, 335.72, 335.3 , 334.92, 334.61, 334.35, 334.09, 333.92, 333.79, 333.72, 333.72, 333.81, 334.06, 334.58]) # Experimental pressure in Pa Pexp = np.array([50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000., 50000.]) # Experimental liquid composition Xexp = np.array([[0. , 0.065, 0.11 , 0.161, 0.203, 0.253, 0.301, 0.351, 0.402, 0.446, 0.497, 0.541, 0.588, 0.643, 0.689, 0.743, 0.785, 0.837, 0.893, 0.947, 1. ], [1. , 0.935, 0.89 , 0.839, 0.797, 0.747, 0.699, 0.649, 0.598, 0.554, 0.503, 0.459, 0.412, 0.357, 0.311, 0.257, 0.215, 0.163, 0.107, 0.053, 0. ]]) # Experimental vapor composition Yexp = np.array([[0. , 0.302, 0.411, 0.48 , 0.527, 0.567, 0.592, 0.614, 0.642, 0.657, 0.678, 0.694, 0.71 , 0.737, 0.753, 0.781, 0.801, 0.837, 0.883, 0.929, 1. ], [1. , 0.698, 0.589, 0.52 , 0.473, 0.433, 0.408, 0.386, 0.358, 0.343, 0.322, 0.306, 0.29 , 0.263, 0.247, 0.219, 0.199, 0.163, 0.117, 0.071, 0. ]]) datavle = (Xexp, Yexp, Texp, Pexp) ``` The function ```fit_cross``` optimize the $k_{ij}$ correction and $r_c^{ABij}$ distance. An initial guess is needed, as well as the mixture object, the index of the self-associating component and the equilibria data. Optionally, the ```minimize_options``` option allows modifying the minimizer default parameters. ``` #initial guesses for kij and rcij x0 = [0.01015194, 2.23153033] fit_cross(x0, mix, assoc=0, datavle=datavle) ``` If the mixture exhibits other equilibria types you can supply this experimental data to the ``datalle`` or ``datavlle`` parameters. - ``datalle``: (Xexp, Wexp, Texp, Pexp) - ``datavlle``: (Xexp, Wexp, Yexp, Texp, Pexp) You can specify the weights for each objetive function through the following parameters: - ``weights_vle``: list or array_like, weights for the VLE objective function. - weights_vle[0] = weight for Y composition error, default to 1. - weights_vle[1] = weight for bubble pressure error, default to 1. - ``weights_lle``: list or array_like, weights for the LLE objective function. - weights_lle[0] = weight for X (liquid 1) composition error, default to 1. - weights_lle[1] = weight for W (liquid 2) composition error, default to 1. - ``weights_vlle``: list or array_like, weights for the VLLE objective function. - weights_vlle[0] = weight for X (liquid 1) composition error, default to 1. - weights_vlle[1] = weight for W (liquid 2) composition error, default to 1. - weights_vlle[2] = weight for Y (vapor) composition error, default to 1. - weights_vlle[3] = weight for equilibrium pressure error, default to 1. Additionally, you can set options to the SciPy's ``minimize`` function using the ``minimize_options`` parameter. For more information just run: ```fit_cross?```
github_jupyter
<table width=60%> <tr style="background-color: white;"> <td><img src='https://www.creativedestructionlab.com/wp-content/uploads/2018/05/xanadu.jpg'></td>></td> </tr> </table> --- <img src='https://raw.githubusercontent.com/XanaduAI/strawberryfields/master/doc/_static/strawberry-fields-text.png'> --- <br> <center> <h1> Gaussian boson sampling tutorial </h1></center> To get a feel for how Strawberry Fields works, let's try coding a quantum program, Gaussian boson sampling. ## Background information: Gaussian states A Gaussian state is one that can be described by a [Gaussian function](https://en.wikipedia.org/wiki/Gaussian_function) in the phase space. For example, for a single mode Gaussian state, squeezed in the $x$ quadrature by squeezing operator $S(r)$, could be described by the following [Wigner quasiprobability distribution](Wigner quasiprobability distribution): $$W(x,p) = \frac{2}{\pi}e^{-2\sigma^2(x-\bar{x})^2 - 2(p-\bar{p})^2/\sigma^2}$$ where $\sigma$ represents the **squeezing**, and $\bar{x}$ and $\bar{p}$ are the mean **displacement**, respectively. For multimode states containing $N$ modes, this can be generalised; Gaussian states are uniquely defined by a [multivariate Gaussian function](https://en.wikipedia.org/wiki/Multivariate_normal_distribution), defined in terms of the **vector of means** ${\mu}$ and a **covariance matrix** $\sigma$. ### The position and momentum basis For example, consider a single mode in the position and momentum quadrature basis (the default for Strawberry Fields). Assuming a Gaussian state with displacement $\alpha = \bar{x}+i\bar{p}$ and squeezing $\xi = r e^{i\phi}$ in the phase space, it has a vector of means and a covariance matrix given by: $$ \mu = (\bar{x},\bar{p}),~~~~~~\sigma = SS\dagger=R(\phi/2)\begin{bmatrix}e^{-2r} & 0 \\0 & e^{2r} \\\end{bmatrix}R(\phi/2)^T$$ where $S$ is the squeezing operator, and $R(\phi)$ is the standard two-dimensional rotation matrix. For multiple modes, in Strawberry Fields we use the convention $$ \mu = (\bar{x}_1,\bar{x}_2,\dots,\bar{x}_N,\bar{p}_1,\bar{p}_2,\dots,\bar{p}_N)$$ and therefore, considering $\phi=0$ for convenience, the multimode covariance matrix is simply $$\sigma = \text{diag}(e^{-2r_1},\dots,e^{-2r_N},e^{2r_1},\dots,e^{2r_N})\in\mathbb{C}^{2N\times 2N}$$ If a continuous-variable state *cannot* be represented in the above form (for example, a single photon Fock state or a cat state), then it is non-Gaussian. ### The annihilation and creation operator basis If we are instead working in the creation and annihilation operator basis, we can use the transformation of the single mode squeezing operator $$ S(\xi) \left[\begin{matrix}\hat{a}\\\hat{a}^\dagger\end{matrix}\right] = \left[\begin{matrix}\cosh(r)&-e^{i\phi}\sinh(r)\\-e^{-i\phi}\sinh(r)&\cosh(r)\end{matrix}\right] \left[\begin{matrix}\hat{a}\\\hat{a}^\dagger\end{matrix}\right]$$ resulting in $$\sigma = SS^\dagger = \left[\begin{matrix}\cosh(2r)&-e^{i\phi}\sinh(2r)\\-e^{-i\phi}\sinh(2r)&\cosh(2r)\end{matrix}\right]$$ For multiple Gaussian states with non-zero squeezing, the covariance matrix in this basis simply generalises to $$\sigma = \text{diag}(S_1S_1^\dagger,\dots,S_NS_N^\dagger)\in\mathbb{C}^{2N\times 2N}$$ ## Introduction to Gaussian boson sampling <div class="alert alert-info"> “If you need to wait exponential time for your single photon sources to emit simultaneously, then there would seem to be no advantage over classical computation. This is the reason why so far, boson sampling has only been demonstrated with 3-4 photons. When faced with these problems, until recently, all we could do was shrug our shoulders.” - [Scott Aaronson](https://www.scottaaronson.com/blog/?p=1579) </div> While [boson sampling](https://en.wikipedia.org/wiki/Boson_sampling) allows the experimental implementation of a quantum sampling problem that it countably hard classically, one of the main issues it has in experimental setups is one of **scalability**, due to its dependence on an array of simultaneously emitting single photon sources. Currently, most physical implementations of boson sampling make use of a process known as [Spontaneous Parametric Down-Conversion](http://en.wikipedia.org/wiki/Spontaneous_parametric_down-conversion) to generate the single photon source inputs. Unfortunately, this method is non-deterministic - as the number of modes in the apparatus increases, the average time required until every photon source emits a simultaneous photon increases *exponentially*. In order to simulate a *deterministic* single photon source array, several variations on boson sampling have been proposed; the most well known being scattershot boson sampling ([Lund, 2014](https://link.aps.org/doi/10.1103/PhysRevLett.113.100502)). However, a recent boson sampling variation by [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) negates the need for single photon Fock states altogether, by showing that **incident Gaussian states** - in this case, single mode squeezed states - can produce problems in the same computational complexity class as boson sampling. Even more significantly, this negates the scalability problem with single photon sources, as single mode squeezed states can be easily simultaneously generated experimentally. Aside from changing the input states from single photon Fock states to Gaussian states, the Gaussian boson sampling scheme appears quite similar to that of boson sampling: 1. $N$ single mode squeezed states $\left|{\xi_i}\right\rangle$, with squeezing parameters $\xi_i=r_ie^{i\phi_i}$, enter an $N$ mode linear interferometer with unitary $U$. <br> 2. The output of the interferometer is denoted $\left|{\psi'}\right\rangle$. Each output mode is then measured in the Fock basis, $\bigotimes_i n_i\left|{n_i}\middle\rangle\middle\langle{n_i}\right|$. Without loss of generality, we can absorb the squeezing parameter $\phi$ into the interferometer, and set $\phi=0$ for convenience. The covariance matrix **in the creation and annihilation operator basis** at the output of the interferometer is then given by: $$\sigma_{out} = \frac{1}{2} \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right]\sigma_{in} \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right]$$ Using phase space methods, [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) showed that the probability of measuring a Fock state is given by $$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(U\bigoplus_i\tanh(r_i)U^T)]_{st}\right|^2}{n_1!n_2!\cdots n_N!\sqrt{|\sigma_{out}+I/2|}},$$ i.e. the sampled single photon probability distribution is proportional to the **Hafnian** of a submatrix of $U\bigoplus_i\tanh(r_i)U^T$, dependent upon the output covariance matrix. <div class="alert alert-success" style="border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9"> <p style="color: #119a68;">**The Hafnian**</p> The Hafnian of a matrix is defined by <br><br> $$\text{Haf}(A) = \frac{1}{n!2^n}\sum_{\sigma=S_{2N}}\prod_{i=1}^N A_{\sigma(2i-1)\sigma(2i)}$$ <br> $S_{2N}$ is the set of all permutations of $2N$ elements. In graph theory, the Hafnian calculates the number of perfect <a href="https://en.wikipedia.org/wiki/Matching_(graph_theory)">matchings</a> in an **arbitrary graph** with adjacency matrix $A$. <br> Compare this to the permanent, which calculates the number of perfect matchings on a *bipartite* graph - the Hafnian turns out to be a generalisation of the permanent, with the relationship $$\begin{align} \text{Per(A)} = \text{Haf}\left(\left[\begin{matrix} 0&A\\ A^T&0 \end{matrix}\right]\right) \end{align}$$ As any algorithm that could calculate (or even approximate) the Hafnian could also calculate the permanent - a #P problem - it follows that calculating or approximating the Hafnian must also be a classically hard problem. </div> ### Equally squeezed input states In the case where all the input states are squeezed equally with squeezing factor $\xi=r$ (i.e. so $\phi=0$), we can simplify the denominator into a much nicer form. It can be easily seen that, due to the unitarity of $U$, $$\left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] = \left[ \begin{matrix}UU^\dagger&0\\0&U^*U^T\end{matrix} \right] =I$$ Thus, we have $$\begin{align} \sigma_{out} +\frac{1}{2}I &= \sigma_{out} + \frac{1}{2} \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] = \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \frac{1}{2} \left(\sigma_{in}+I\right) \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] \end{align}$$ where we have subtituted in the expression for $\sigma_{out}$. Taking the determinants of both sides, the two block diagonal matrices containing $U$ are unitary, and thus have determinant 1, resulting in $$\left|\sigma_{out} +\frac{1}{2}I\right| =\left|\frac{1}{2}\left(\sigma_{in}+I\right)\right|=\left|\frac{1}{2}\left(SS^\dagger+I\right)\right| $$ By expanding out the right hand side, and using various trig identities, it is easy to see that this simply reduces to $\cosh^{2N}(r)$ where $N$ is the number of modes; thus the Gaussian boson sampling problem in the case of equally squeezed input modes reduces to $$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)},$$ ## The Gaussian boson sampling circuit The multimode linear interferometer can be decomposed into two-mode beamsplitters (`BSgate`) and single-mode phase shifters (`Rgate`) (<a href="https://doi.org/10.1103/physrevlett.73.58">Reck, 1994</a>), allowing for an almost trivial translation into a continuous-variable quantum circuit. For example, in the case of a 4 mode interferometer, with arbitrary $4\times 4$ unitary $U$, the continuous-variable quantum circuit for Gaussian boson sampling is given by <img src="https://s3.amazonaws.com/xanadu-img/gaussian_boson_sampling.svg" width=70%/> In the above, * the single mode squeeze states all apply identical squeezing $\xi=r$, * the detectors perform Fock state measurements (i.e. measuring the photon number of each mode), * the parameters of the beamsplitters and the rotation gates determines the unitary $U$. For $N$ input modes, we must have a minimum of $N$ columns in the beamsplitter array ([Clements, 2016](https://arxiv.org/abs/1603.08788)). ## Simulating boson sampling in Strawberry Fields ``` import strawberryfields as sf from strawberryfields.ops import * from strawberryfields.utils import random_interferometer ``` Strawberry Fields makes this easy; there is an `Interferometer` quantum operation, and a utility function that allows us to generate the matrix representing a random interferometer. ``` U = random_interferometer(4) ``` The lack of Fock states and non-linear operations means we can use the Gaussian backend to simulate Gaussian boson sampling. In this example program, we are using input states with squeezing parameter $\xi=1$, and the randomly chosen interferometer generated above. ``` eng = sf.Engine('gaussian') gbs = sf.Program(4) with gbs.context as q: # prepare the input squeezed states S = Sgate(1) All(S) | q # interferometer Interferometer(U) | q MeasureFock() | q results = eng.run(gbs, run_options={"shots":10}) state = results.state # Note: Running this cell will generate a warning. This is just the Gaussian backend of Strawberryfields telling us # that, although it can carry out the MeasureFock operation, it will not update the state of the circuit after doing so, # since the resulting state would be non-Gaussian. For this notebook, the warning can be safely ignored. ``` We can see the decomposed beamsplitters and rotation gates, by calling `eng.print_applied()`: ``` eng.print_applied() ``` <div class="alert alert-success" style="border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9"> <p style="color: #119a68;">**Available decompositions**</p> Check out our <a href="https://strawberryfields.readthedocs.io/en/stable/conventions/decompositions.html">documentation</a> to see the available CV decompositions available in Strawberry Fields. </div> We can also see some of the measurement samples from this circuit within `results.samples`. These correspond to independent runs of the Gaussian Boson Sampling circuit. ``` results.samples ``` ## Analysis Let's now verify the Gaussian boson sampling result, by comparing the output Fock state probabilities to the Hafnian, using the relationship $$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)}$$ ### Calculating the Hafnian For the right hand side numerator, we first calculate the submatrix $[(UU^T\tanh(r))]_{st}$: ``` import numpy as np B = (np.dot(U, U.T) * np.tanh(1)) ``` In Gaussian boson sampling, we determine the submatrix by taking the rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix in the case of the output measurement $\left|{1,1,0,0}\right\rangle$, ``` B[:,[0,1]][[0,1]] ``` To calculate the Hafnian in Python, we can use the direct definition $$\text{Haf}(A) = \frac{1}{n!2^n} \sum_{\sigma \in S_{2n}} \prod_{j=1}^n A_{\sigma(2j - 1), \sigma(2j)}$$ Notice that this function counts each term in the definition multiple times, and renormalizes to remove the multiple counts by dividing by a factor $\frac{1}{n!2^n}$. **This function is extremely slow!** ``` from itertools import permutations from scipy.special import factorial def Haf(M): n = len(M) m = int(n/2) haf = 0.0 for i in permutations(range(n)): prod = 1.0 for j in range(m): prod *= M[i[2 * j], i[2 * j + 1]] haf += prod return haf / (factorial(m) * (2 ** m)) ``` ## Comparing to the SF result In Strawberry Fields, both Fock and Gaussian states have the method `fock_prob()`, which returns the probability of measuring that particular Fock state. #### Let's compare the case of measuring at the output state $\left|0,1,0,1\right\rangle$: ``` B = (np.dot(U, U.T) * np.tanh(1))[:, [1, 3]][[1, 3]] np.abs(Haf(B)) ** 2 / np.cosh(1) ** 4 state.fock_prob([0, 1, 0, 1]) ``` #### For the measurement result $\left|2,0,0,0\right\rangle$: ``` B = (np.dot(U, U.T) * np.tanh(1))[:, [0, 0]][[0, 0]] np.abs(Haf(B)) ** 2 / (2 * np.cosh(1) ** 4) state.fock_prob([2, 0, 0, 0]) ``` #### For the measurement result $\left|1,1,0,0\right\rangle$: ``` B = (np.dot(U, U.T) * np.tanh(1))[:, [0, 1]][[0, 1]] np.abs(Haf(B)) ** 2 / np.cosh(1) ** 4 state.fock_prob([1, 1, 0, 0]) ``` #### For the measurement result $\left|1,1,1,1\right\rangle$, this corresponds to the full matrix $B$: ``` B = (np.dot(U,U.T) * np.tanh(1)) np.abs(Haf(B)) ** 2 / np.cosh(1) ** 4 state.fock_prob([1, 1, 1, 1]) ``` #### For the measurement result $\left|0,0,0,0\right\rangle$, this corresponds to a **null** submatrix, which has a Hafnian of 1: ``` 1 / np.cosh(1) ** 4 state.fock_prob([0, 0, 0, 0]) ``` As you can see, like in the boson sampling tutorial, they agree with almost negligable difference. <div class="alert alert-success" style="border: 0px; border-left: 3px solid #119a68; color: black; background-color: #daf0e9"> <p style="color: #119a68;">**Exercises**</p> Repeat this notebook with <ol> <li> A higher value for <tt>shots</tt> in <tt>eng.run()</tt>, and compare the relative probabilties of events with the expected values.</li> <li> A Fock backend such as NumPy, instead of the Gaussian backend</li> <li> Different beamsplitter and rotation parameters</li> <li> Input states with *differing* squeezed values $r_i$. You will need to modify the code to take into account the fact that the output covariance matrix determinant must now be calculated! </ol> </div>
github_jupyter
``` # Manipulação e tratamento das bases import pandas as pd import numpy as np #Pré-Processamento das bases !pip install imblearn from imblearn.over_sampling import SMOTE from sklearn.model_selection import train_test_split #Modelagem de Dados from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import accuracy_score Antes= pd.read_csv('Base_Tratada.csv', sep= ',') Antes= Antes.loc[Antes['CO_MUN_NOT'].isin([330490])] Antes=Antes[(Antes['Periodo']==1.0)] Antes= Antes.drop(columns=["CO_MUN_NOT", "Periodo"]) Antes.head() ``` # PRÉ-PROCESSAMENTO ``` Antes['CS_GESTANT'].replace({1.0: 1, 2.0: 1, 3.0 :1, 4.0 : 1}, inplace= True) Antes['CS_GESTANT'].replace({5.0: 0, 6.0:0, 9.0:0}, inplace= True) Antes['CS_RACA'].fillna(9,inplace= True) Antes['CS_ESCOL_N'].fillna(9,inplace= True) Antes['SURTO_SG'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['SURTO_SG'].fillna(0,inplace= True) Antes['NOSOCOMIAL'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['NOSOCOMIAL'].fillna(0,inplace= True) Antes['FEBRE'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['FEBRE'].fillna(0,inplace= True) Antes['TOSSE'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['TOSSE'].fillna(0,inplace= True) Antes['GARGANTA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['GARGANTA'].fillna(0,inplace= True) Antes['DISPNEIA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['DISPNEIA'].fillna(0,inplace= True) Antes['DESC_RESP'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['DESC_RESP'].fillna(0,inplace= True) Antes['SATURACAO'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['SATURACAO'].fillna(0,inplace= True) Antes['DIARREIA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['DIARREIA'].fillna(0,inplace= True) Antes['VOMITO'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['VOMITO'].fillna(0,inplace= True) Antes['PUERPERA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['PUERPERA'].fillna(0,inplace= True) Antes['CARDIOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['CARDIOPATI'].fillna(0,inplace= True) Antes['HEMATOLOGI'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['HEMATOLOGI'].fillna(0,inplace= True) Antes['SIND_DOWN'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['SIND_DOWN'].fillna(0,inplace= True) Antes['HEPATICA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['HEPATICA'].fillna(0,inplace= True) Antes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['ASMA'].fillna(0,inplace= True) Antes['DIABETES'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['DIABETES'].fillna(0,inplace= True) Antes['NEUROLOGIC'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['NEUROLOGIC'].fillna(0,inplace= True) Antes['PNEUMOPATI'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['PNEUMOPATI'].fillna(0,inplace= True) Antes['IMUNODEPRE'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['IMUNODEPRE'].fillna(0,inplace= True) Antes['RENAL'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['RENAL'].fillna(0,inplace= True) Antes['OBESIDADE'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['OBESIDADE'].fillna(0,inplace= True) Antes['ASMA'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['ASMA'].fillna(0,inplace= True) Antes['ANTIVIRAL'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['ANTIVIRAL'].fillna(0,inplace= True) Antes['UTI'].replace({2.0: 0, 9.0: 0}, inplace= True) Antes['UTI'].fillna(0,inplace= True) Antes['SUPORT_VEN'].replace({3.0: 0, 9.0: 0}, inplace= True) Antes['SUPORT_VEN'].fillna(0,inplace= True) Antes['PCR_RESUL'].fillna(4,inplace= True) Antes['HISTO_VGM'].replace({0: 2}, inplace= True) Antes['DOR_ABD'].replace({9.0: 0, 2.0 :0}, inplace= True) Antes['DOR_ABD'].fillna(0,inplace= True) Antes['FADIGA'].replace({9.0: 0, 2.0 :0}, inplace= True) Antes['FADIGA'].fillna(0,inplace= True) Antes['PERD_OLFT'].replace({9.0: 0, 2.0 :0}, inplace= True) Antes['PERD_OLFT'].fillna(0,inplace= True) Antes['PERD_PALA'].replace({9.0: 0, 2.0 :0}, inplace= True) Antes['PERD_PALA'].fillna(0,inplace= True) Antes['VACINA'].fillna(0,inplace= True) Antes['FATOR_RISC'].replace({'S': 1, 'N':2, '1':1, '2':2}, inplace= True) Antes['FATOR_RISC'].fillna(0,inplace= True) ``` - Resetando o Index novamente. ``` Antes= Antes.reset_index(drop=True) Antes.head() ``` - Aplicação da Dummy nas Features Categóricas ``` Antes=pd.get_dummies(Antes, columns=['CS_SEXO', 'CS_GESTANT', 'CS_RACA', 'CS_ESCOL_N', 'SURTO_SG', 'NOSOCOMIAL', 'FEBRE', 'TOSSE', 'GARGANTA', 'DISPNEIA', 'DESC_RESP', 'SATURACAO', 'DIARREIA', 'VOMITO', 'PUERPERA', 'FATOR_RISC', 'CARDIOPATI', 'HEMATOLOGI', 'SIND_DOWN', 'HEPATICA', 'ASMA', 'DIABETES', 'NEUROLOGIC', 'PNEUMOPATI', 'IMUNODEPRE', 'RENAL', 'OBESIDADE', 'VACINA', 'ANTIVIRAL', 'UTI', 'SUPORT_VEN', 'PCR_RESUL', 'HISTO_VGM', 'DOR_ABD', 'FADIGA', 'PERD_OLFT', 'PERD_PALA'], drop_first=True) Antes.head() ``` # Verificando o Balanceamento ``` Antes["EVOLUCAO"].value_counts(normalize=True) X = Antes[['IDADE_ANOS','CS_SEXO_M','CS_RACA_4.0','FEBRE_1.0','DISPNEIA_1.0','SATURACAO_1.0','UTI_1.0', 'SUPORT_VEN_1.0', 'SUPORT_VEN_2.0', 'PCR_RESUL_2.0','TOSSE_1.0','DESC_RESP_1.0', 'FATOR_RISC_2']] y = Antes['EVOLUCAO'] Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3, random_state=42) Xtrain.shape, Xtest.shape, ytrain.shape, ytest.shape smote = SMOTE(sampling_strategy = 'minority', random_state = 42) Xtrain_over, ytrain_over = smote.fit_resample(Xtrain,ytrain) Xtest_over, ytest_over = smote.fit_resample(Xtest,ytest) Xtrain_over.shape, ytrain_over.shape, Xtest_over.shape, ytest_over.shape ``` # Aplicação do Modelo Escolhido ``` random_state=42 GRA = GradientBoostingClassifier() GRA.fit(Xtrain_over, ytrain_over) previsoes = GRA.predict(Xtest_over) previsoes accuracy_score(ytest_over, previsoes) # Testar Modelo idade = 43.0 sexo = 1 raca = 0 febre = 1 dispneia = 1 saturacao = 0 uti = 1 suport1 = 1 suport2 = 0 pcr = 1 tosse = 1 descresp = 0 frisc = 0 prediction = GRA.predict(np.array([idade, sexo, raca, febre, dispneia, saturacao, uti, suport1, suport2, pcr, tosse, descresp, frisc]).reshape(1, -1)) print(prediction) ```
github_jupyter
``` import numpy as np import MDAnalysis as mda import nglview as nv from sklearn.decomposition import PCA import requests from Bio.PDB import * ``` ### Overall settings ``` movav_resis = 3 # number of residues used to calculate moving averages for CA positions (must be 3,5 or 7) vector_scale_factor = 10 vector_width = 1.0 vector_colors = [ [0,0,1], [1,0,0], [0,1,0] ] ``` ### G protein details Give the residue numbers for Galpha H5.13 to H5.23 (as used in DOI [10.1073/pnas.1820944116](https://doi.org/10.1073/pnas.1820944116)) and the Galpha chain ``` h5_inds = [341,351] # Gi residue numbers #h5_inds = [337,347] # Gi (6OY9, 6OYA) #h5_inds = [346,356] # Gq (+7DFL) #h5_inds = [233,243] # Gq (6WHA) #h5_inds = [381,391] # Gs #h5_inds = [371,381] # Gs (7D3S, 6GDG) #h5_inds = [2381,2391] # Gs (6E67) #h5_inds = [367,377] # Gs (7JJO) gpro_chain = "A" ``` ### GPCR details Then you can either read the GPCR details manually by running this... ``` pdb_filename = "PDB_filename.pdb" gpcr_chain = "R" gpcr_name = "uniprotname_variant" ``` OR fetch GPCR structure from PDB: ``` pdb = "7DFL" # Download the PDB file pdbl = PDBList() pdb_filename = pdbl.retrieve_pdb_file( pdb, file_format = "pdb" ) # Get the protein name from GPCRdb url = 'https://gpcrdb.org/services/structure/' + pdb + '/' response = requests.get(url) protein_data = response.json() gpcr_chain = protein_data['preferred_chain'] gpcr_name = protein_data['protein'] print( "gpcr_chain:", gpcr_chain ) print( "gpcr_name:", gpcr_name ) ``` Now fetch the GPCR details from GPCRdb by running the following: ``` # For each helix, get the start and end residue numbers from GPCRdb url = 'https://gpcrdb.org/services/residues/' + gpcr_name + '/' response = requests.get(url) protein_data = response.json() tm_endpoints = np.zeros((8,2)) helix_no = 0 for i in protein_data: generic_no = i['display_generic_number'] if generic_no == None: continue expected_prefix = str(helix_no) + "." next_prefix = str(helix_no + 1) + "." sequence_no = i['sequence_number'] if generic_no[:2] == next_prefix: tm_endpoints[helix_no,0] = sequence_no helix_no += 1 if generic_no[1] == ".": tm_endpoints[helix_no-1,1] = sequence_no ``` ### Run the analysis ``` u = mda.Universe( pdb_filename ) # Open NGLView instance view1 = nv.show_mdanalysis(u) view1.remove_cartoon() view1.remove_ball_and_stick() view1.add_cartoon('protein',color='#00BB00', opacity=0.3) # Fit a vector to residues H5.13 to H5.23 and plot it h5_selection = "(segid %s) and (resnum %d-%d) and (name CA)" % ( gpro_chain, h5_inds[0], h5_inds[1] ) h5_CAs = u.select_atoms( h5_selection ) h5_PCA = h5_CAs.principal_axes() h5_cog = h5_CAs.center_of_geometry() view1.shape.add_arrow( ( h5_cog + vector_scale_factor * h5_PCA[2] ).tolist(), ( h5_cog - vector_scale_factor * h5_PCA[2] ).tolist(), vector_colors[0], vector_width ) # For each TM, fit a vector to CA's #4-9 from the extracellular side tm_vectors = np.zeros((7,3)) bundle_resnum = "" for i in range(7): # Get the indices for extracellular residues no. 4-9 extracell_index = 0 tm_startres = tm_endpoints[i,0] + 3 if i%2 != 0: extracell_index = -1 tm_startres = tm_endpoints[i,1] - 8 print( "For TM%d, using residues %d-%d" % ( i+1, tm_startres, tm_startres + 5 ) ) tm_resnum = "(resnum %d-%d)" % ( tm_startres, tm_startres + 5 ) if i > 0: bundle_resnum += " or " bundle_resnum += tm_resnum # Use CA positions from PDB file tm_CAs = u.select_atoms( "segid %s and %s and (name CA)" % ( gpcr_chain, tm_resnum ) ) if len( tm_CAs ) < 6: continue tm_PCA = tm_CAs.principal_axes() principal_idx = 2 tm_cog = tm_CAs.center_of_geometry() # Overwrite: Use moving average of three CAs tm_CAs = u.select_atoms( "segid %s and (resnum %d-%d) and (name CA)" % ( gpcr_chain, tm_startres - ( movav_resis - 1 )/2, tm_startres + 5 + ( movav_resis - 1 )/2 ) ) tm_pos = tm_CAs.positions tm_pos_movav = np.zeros( (6,3) ) for k in range(6): tm_pos_movav[k,:] = np.average( tm_pos[ k:(k+movav_resis-1), : ], axis = 0 ) pca = PCA() pca.fit( tm_pos_movav ) tm_PCA = pca.components_ principal_idx = 0 # Make sure the vectors are pointing in the same direction dist_to_endpoint = np.zeros(2) for k in [0,1]: dist_to_endpoint[k] = np.sum( np.power( tm_cog + np.power( -1, k) * tm_PCA[ principal_idx ] - tm_pos[ extracell_index ], 2 ) ) if dist_to_endpoint[1] > dist_to_endpoint[0]: tm_PCA[0] *= -1 tm_vectors[i,:] = tm_PCA[ principal_idx ] # Plot the TM vector view1.shape.add_arrow( ( tm_cog - vector_scale_factor * tm_vectors[i,:] ).tolist(), ( tm_cog + vector_scale_factor * tm_vectors[i,:] ).tolist(), vector_colors[1], vector_width ) # Now calculate the GPCR axis by summing the TM vectors bundle_vector = np.sum( tm_vectors, axis = 0 ) bundle_vector /= np.linalg.norm( bundle_vector ) bundle_CAs = u.select_atoms( "segid %s and (%s) and (name CA)" % ( gpcr_chain, bundle_resnum ) ) bundle_cog = bundle_CAs.center_of_geometry() view1.shape.add_arrow( ( bundle_cog - vector_scale_factor * bundle_vector ).tolist(), ( bundle_cog + 4 * vector_scale_factor * bundle_vector ).tolist(), vector_colors[2], vector_width ) # Calculate the angle between the H5 vector and the GPCR axis angle = np.round( np.rad2deg( np.arccos( np.dot( bundle_vector, h5_PCA[2] ) ) ) ) print( "Angle: %d degrees" % np.min( [ angle, 180-angle ] ) ) # Show the NGLView instance view1 ```
github_jupyter
This notebook is part of the $\omega radlib$ documentation: https://docs.wradlib.org. Copyright (c) $\omega radlib$ developers. Distributed under the MIT License. See LICENSE.txt for more info. # How to use wradlib's ipol module for interpolation tasks? ``` import wradlib.ipol as ipol from wradlib.util import get_wradlib_data_file from wradlib.vis import plot_ppi import numpy as np import matplotlib.pyplot as pl import datetime as dt import warnings warnings.filterwarnings('ignore') try: get_ipython().magic("matplotlib inline") except: pl.ion() ``` ### 1-dimensional example Includes Nearest Neighbours, Inverse Distance Weighting, and Ordinary Kriging. ``` # Synthetic observations xsrc = np.arange(10)[:, None] vals = np.sin(xsrc).ravel() # Define target coordinates xtrg = np.linspace(0, 20, 100)[:, None] # Set up interpolation objects # IDW idw = ipol.Idw(xsrc, xtrg) # Nearest Neighbours nn = ipol.Nearest(xsrc, xtrg) # Linear ok = ipol.OrdinaryKriging(xsrc, xtrg) # Plot results pl.figure(figsize=(10,5)) pl.plot(xsrc.ravel(), vals, 'bo', label="Observation") pl.plot(xtrg.ravel(), idw(vals), 'r-', label="IDW interpolation") pl.plot(xtrg.ravel(), nn(vals), 'k-', label="Nearest Neighbour interpolation") pl.plot(xtrg.ravel(), ok(vals), 'g-', label="Ordinary Kriging") pl.xlabel("Distance", fontsize="large") pl.ylabel("Value", fontsize="large") pl.legend(loc="bottomright") ``` ### 2-dimensional example Includes Nearest Neighbours, Inverse Distance Weighting, Linear Interpolation, and Ordinary Kriging. ``` # Synthetic observations and source coordinates src = np.vstack( (np.array([4, 7, 3, 15]), np.array([8, 18, 17, 3]))).transpose() np.random.seed(1319622840) vals = np.random.uniform(size=len(src)) # Target coordinates xtrg = np.linspace(0, 20, 40) ytrg = np.linspace(0, 20, 40) trg = np.meshgrid(xtrg, ytrg) trg = np.vstack( (trg[0].ravel(), trg[1].ravel()) ).T # Interpolation objects idw = ipol.Idw(src, trg) nn = ipol.Nearest(src, trg) linear = ipol.Linear(src, trg) ok = ipol.OrdinaryKriging(src, trg) # Subplot layout def gridplot(interpolated, title=""): pm = ax.pcolormesh(xtrg, ytrg, interpolated.reshape( (len(xtrg), len(ytrg)) ) ) pl.axis("tight") ax.scatter(src[:, 0], src[:, 1], facecolor="None", s=50, marker='s') pl.title(title) pl.xlabel("x coordinate") pl.ylabel("y coordinate") # Plot results fig = pl.figure(figsize=(8,8)) ax = fig.add_subplot(221, aspect="equal") gridplot(idw(vals), "IDW") ax = fig.add_subplot(222, aspect="equal") gridplot(nn(vals), "Nearest Neighbours") ax = fig.add_subplot(223, aspect="equal") gridplot(np.ma.masked_invalid(linear(vals)), "Linear interpolation") ax = fig.add_subplot(224, aspect="equal") gridplot(ok(vals), "Ordinary Kriging") pl.tight_layout() ``` ### Using the convenience function ipol.interpolation in order to deal with missing values **(1)** Exemplified for one dimension in space and two dimensions of the source value array (could e.g. be two time steps). ``` # Synthetic observations (e.g. two time steps) src = np.arange(10)[:, None] vals = np.hstack((1.+np.sin(src), 5. + 2.*np.sin(src))) # Target coordinates trg = np.linspace(0, 20, 100)[:, None] # Here we introduce missing values in the second dimension of the source value array vals[3:5, 1] = np.nan # interpolation using the convenience function "interpolate" idw_result = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=4) nn_result = ipol.interpolate(src, trg, vals, ipol.Nearest) # Plot results fig = pl.figure(figsize=(10,5)) ax = fig.add_subplot(111) pl1 = ax.plot(trg, idw_result, 'b-', label="IDW") pl2 = ax.plot(trg, nn_result, 'k-', label="Nearest Neighbour") pl3 = ax.plot(src, vals, 'ro', label="Observations") ``` **(2)** Exemplified for two dimensions in space and two dimensions of the source value array (e.g. time steps), containing also NaN values (here we only use IDW interpolation) ``` # Just a helper function for repeated subplots def plotall(ax, trgx, trgy, src, interp, pts, title, vmin, vmax): ix = np.where(np.isfinite(pts)) ax.pcolormesh(trgx, trgy, interp.reshape( (len(trgx),len(trgy) ) ), vmin=vmin, vmax=vmax ) ax.scatter(src[ix, 0].ravel(), src[ix, 1].ravel(), c=pts.ravel()[ix], s=20, marker='s', vmin=vmin, vmax=vmax) ax.set_title(title) pl.axis("tight") # Synthetic observations src = np.vstack( (np.array([4, 7, 3, 15]), np.array([8, 18, 17, 3])) ).T np.random.seed(1319622840 + 1) vals = np.round(np.random.uniform(size=(len(src), 2)), 1) # Target coordinates trgx = np.linspace(0, 20, 100) trgy = np.linspace(0, 20, 100) trg = np.meshgrid(trgx, trgy) trg = np.vstack((trg[0].ravel(), trg[1].ravel())).transpose() result = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=4) # Now introduce NaNs in the observations vals_with_nan = vals.copy() vals_with_nan[1, 0] = np.nan vals_with_nan[1:3, 1] = np.nan result_with_nan = ipol.interpolate(src, trg, vals_with_nan, ipol.Idw, nnearest=4) vmin = np.concatenate((vals.ravel(), result.ravel())).min() vmax = np.concatenate((vals.ravel(), result.ravel())).max() fig = pl.figure(figsize=(8,8)) ax = fig.add_subplot(221) plotall(ax, trgx, trgy, src, result[:, 0], vals[:, 0], '1st dim: no NaNs', vmin, vmax) ax = fig.add_subplot(222) plotall(ax, trgx, trgy, src, result[:, 1], vals[:, 1], '2nd dim: no NaNs', vmin, vmax) ax = fig.add_subplot(223) plotall(ax, trgx, trgy, src, result_with_nan[:, 0], vals_with_nan[:, 0], '1st dim: one NaN', vmin, vmax) ax = fig.add_subplot(224) plotall(ax, trgx, trgy, src, result_with_nan[:, 1], vals_with_nan[:, 1], '2nd dim: two NaN', vmin, vmax) pl.tight_layout() ``` ### How to use interpolation for gridding data in polar coordinates? Read polar coordinates and corresponding rainfall intensity from file ``` filename = get_wradlib_data_file('misc/bin_coords_tur.gz') src = np.loadtxt(filename) filename = get_wradlib_data_file('misc/polar_R_tur.gz') vals = np.loadtxt(filename) src.shape ``` Define target grid coordinates ``` xtrg = np.linspace(src[:,0].min(), src[:,0].max(), 200) ytrg = np.linspace(src[:,1].min(), src[:,1].max(), 200) trg = np.meshgrid(xtrg, ytrg) trg = np.vstack((trg[0].ravel(), trg[1].ravel())).T ``` Linear Interpolation ``` ip_lin = ipol.Linear(src, trg) result_lin = ip_lin(vals.ravel(), fill_value=np.nan) ``` IDW interpolation ``` ip_near = ipol.Nearest(src, trg) maxdist = trg[1,0] - trg[0,0] result_near = ip_near(vals.ravel(), maxdist=maxdist) ``` Plot results ``` fig = pl.figure(figsize=(15, 6)) fig.subplots_adjust(wspace=0.4) ax = fig.add_subplot(131, aspect="equal") plot_ppi(vals, ax=ax) ax = fig.add_subplot(132, aspect="equal") pl.pcolormesh(xtrg, ytrg, result_lin.reshape( (len(xtrg), len(ytrg)) ) ) ax = fig.add_subplot(133, aspect="equal") pl.pcolormesh(xtrg, ytrg, result_near.reshape( (len(xtrg), len(ytrg)) ) ) ```
github_jupyter
<span style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">An Exception was encountered at '<a href="#papermill-error-cell">In [42]</a>'.</span> # High Value Customers Identification (Insiders) # **By: Marx Cerqueira** # IMPORTS ``` import re import os import inflection import sqlite3 import numpy as np import pandas as pd import seaborn as sns import umap.umap_ as umap from matplotlib import pyplot as plt from sklearn import metrics as m from sklearn import preprocessing as pp from sklearn import decomposition as dd from sklearn import ensemble as en from sklearn import manifold as mn from sklearn import mixture as mx from sklearn import cluster as c from scipy.cluster import hierarchy as hc from plotly import express as px from sqlalchemy import create_engine ``` ## Loading Data ``` # load data df_ecomm_raw = pd.read_csv('/home/marxcerqueira/repos/Kaggle-HighValue-Custormers-Identification/data/raw/Ecommerce.csv', encoding='iso-8859-1', low_memory=False) #drop extra column df_ecomm_raw = df_ecomm_raw.drop(columns = ['Unnamed: 8'], axis = 1) ``` # DATA DISCRIPTION ``` # Copy dataset df0 = df_ecomm_raw.copy() ``` ## Rename Columns ``` cols_old = ['InvoiceNo','StockCode','Description','Quantity', 'InvoiceDate','UnitPrice','CustomerID','Country'] snakecase = lambda x: inflection.underscore(x) col_news = list(map(snakecase, cols_old)) # Rename columns df0.columns = col_news ``` ## Data Dimension ``` print('Number of rows: {}.'.format(df0.shape[0])) print('Number of cols: {}.'.format(df0.shape[1])) ``` ## Data Types ``` df0.info() ``` ## Check NA Values ``` missing_count = df0.isnull().sum() # the count of missing values value_count = df0.isnull().count() # the total values count missing_percentage = round(missing_count/value_count*100,2) # the percentage of missing values missing_df = pd.DataFrame({'missing value count': missing_count, 'percentage': missing_percentage}) missing_df barchart = missing_df.plot.bar(y='percentage') for index, percentage in enumerate( missing_percentage ): barchart.text( index, percentage, str(percentage)+'%') ``` ## Fillout NA ``` # separate NA's in two different dataframe, one with NAs and other without it df_missing = df0.loc[df0['customer_id'].isna(), :] df_not_missing = df0.loc[~df0['customer_id'].isna(), :] # create reference df_backup = pd.DataFrame( df_missing['invoice_no'].drop_duplicates().copy() ) df_backup['customer_id'] = np.arange( 19000, 19000+len( df_backup ), 1) # Fillout NA stratety: creating customers_id to keep their behavior (25% of the database) # merge original with reference dataframe df0 = pd.merge( df0, df_backup, on='invoice_no', how='left' ) # coalesce df0['customer_id'] = df0['customer_id_x'].combine_first( df0['customer_id_y'] ) # drop extra columns df0 = df0.drop( columns=['customer_id_x', 'customer_id_y'], axis=1 ) df0.isna().sum() ``` ## Change Types ``` # Transforme datatype of variable invoice_date to datetime df0['invoice_date'] = pd.to_datetime(df0['invoice_date']) df0['customer_id'] = df0['customer_id'].astype('int64') df0.dtypes ``` ## Descriptive Statistics ``` df0.describe().T df0.describe(include = object).T num_attributes = df0.select_dtypes(include = np.number) cat_attributes = df0.select_dtypes(exclude = [np.number, np.datetime64]) ``` ### Numerical Attributes ``` # central tendency - mean, median ct1 = pd.DataFrame(num_attributes.apply(np.mean)).T ct2 = pd.DataFrame(num_attributes.apply(np.median)).T # dispersion - desvio padrão, min, max, range, skew, kurtosis d1 = pd.DataFrame(num_attributes.apply(np.std)).T d2 = pd.DataFrame(num_attributes.apply(np.min)).T d3 = pd.DataFrame(num_attributes.apply(np.max)).T d4 = pd.DataFrame(num_attributes.apply(lambda x: x.max()-x.min())).T d5 = pd.DataFrame(num_attributes.apply(lambda x: x.skew())).T d6 = pd.DataFrame(num_attributes.apply(lambda x: x.kurtosis())).T #concatenate m1 = pd.concat([d2,d3,d4,ct1,ct2,d1,d5,d6]).T.reset_index() m1.columns = ['attributes', 'min', 'max', 'range', 'mean', 'mediana','std', 'skew','kurtosis'] m1 ``` ### Categorical Attributes #### Invoice_No ``` # problem: We got letters and numbers in invoice_no #df1['invoice_no'].astype( int ) # identification: df_letter_invoices = df0.loc[df0['invoice_no'].apply( lambda x: bool( re.search( '[^0-9]+', x ) ) ), :] df_letter_invoices.head() print( 'Total number of invoices: {}'.format( len( df_letter_invoices ) ) ) print( 'Total number of negative quantity: {}'.format( len( df_letter_invoices[ df_letter_invoices['quantity'] < 0 ] ) ) ) ``` #### Stock_Code ``` # check stock codes only characters df0.loc[df0['stock_code'].apply( lambda x: bool( re.search( '^[a-zA-Z]+$', x ) ) ), 'stock_code'].unique() # Acão: ## 1. Remove stock_code in ['POST', 'D', 'M', 'PADS', 'DOT', 'CRUK'] ``` # VARIABLE FILTERING ``` df1 = df0.copy() # === Numerical attributes ==== df1 = df1.loc[df1['unit_price'] >= 0.04, :] # === Categorical attributes ==== df1 = df1[~df1['stock_code'].isin( ['POST', 'D', 'DOT', 'M', 'S', 'AMAZONFEE', 'm', 'DCGSSBOY', 'DCGSSGIRL', 'PADS', 'B', 'CRUK'] )] # description df1 = df1.drop( columns='description', axis=1 ) # country df1 = df1[~df1['country'].isin( ['European Community', 'Unspecified' ] ) ] #assuming this risk so we can use lat long parameters # bad customers df1 = df1[~df1['customer_id'].isin([16446])] # quantity df1_returns = df1.loc[df1['quantity'] < 0, :].copy() df1_purchases = df1.loc[df1['quantity'] >= 0, :].copy() ``` # FEATURE ENGINEERING ``` df2 = df1.copy() ``` ## Feature Creation ``` # data reference # RFM Model, creating feature for it df_ref = df2.drop(['invoice_no', 'stock_code', 'quantity', 'invoice_date', 'unit_price', 'country'], axis = 1).drop_duplicates(ignore_index = True).copy() ``` ### Gross Revenue ``` # Gross Revenue ( Faturamento ) quantity * price df1_purchases.loc[:, 'gross_revenue'] = df1_purchases.loc[:,'quantity'] * df1_purchases.loc[:, 'unit_price'] # Monetary (How much money a customer spends on purchases) df_monetary = df1_purchases.loc[:, ['customer_id', 'gross_revenue']].groupby( 'customer_id' ).sum().reset_index() df_ref = pd.merge( df_ref, df_monetary, on='customer_id', how='left' ) df_ref.isna().sum() ``` ### Recency ``` # Recency - Day from last purchase df_recency = df1_purchases.loc[:, ['customer_id', 'invoice_date']].groupby( 'customer_id' ).max().reset_index() df_recency['recency_days'] = ( df1['invoice_date'].max() - df_recency['invoice_date'] ).dt.days df_recency = df_recency[['customer_id', 'recency_days']].copy() df_ref = pd.merge( df_ref, df_recency, on='customer_id', how='left' ) df_ref.isna().sum() ``` ### Qty Products (different stock codes by customer) ``` # Quantity of unique products purchased (Frequency: qntd of products over time) # Number of products (different stock codes by customer) df_freq = (df1_purchases.loc[:, ['customer_id', 'stock_code']].groupby( 'customer_id' ).count() .reset_index() .rename( columns={'stock_code': 'qty_products'} ) ) df_ref = pd.merge( df_ref, df_freq, on='customer_id', how='left' ) df_ref.isna().sum() ``` ### Frequency ``` #Frequency Purchase (rate: purchases by day) df_aux = ( df1_purchases[['customer_id', 'invoice_no', 'invoice_date']].drop_duplicates() .groupby( 'customer_id') .agg( max_ = ( 'invoice_date', 'max' ), min_ = ( 'invoice_date', 'min' ), days_= ( 'invoice_date', lambda x: ( ( x.max() - x.min() ).days ) + 1 ), buy_ = ( 'invoice_no', 'count' ) ) ).reset_index() # Frequency df_aux['frequency'] = df_aux[['buy_', 'days_']].apply( lambda x: x['buy_'] / x['days_'] if x['days_'] != 0 else 0, axis=1 ) # Merge df_ref = pd.merge( df_ref, df_aux[['customer_id', 'frequency']], on='customer_id', how='left' ) df_ref.isna().sum() ``` ### Number of Returns ``` #Number of Returns df_returns = df1_returns[['customer_id', 'quantity']].groupby( 'customer_id' ).sum().reset_index().rename( columns={'quantity':'qty_returns'} ) df_returns['qty_returns'] = df_returns['qty_returns'] * -1 df_ref = pd.merge( df_ref, df_returns, how='left', on='customer_id' ) df_ref.loc[df_ref['qty_returns'].isna(), 'qty_returns'] = 0 #customers with 0 returned items df_ref.isna().sum() ``` # EXPLORATORY DATA ANALYSIS (EDA) ``` df3 = df_ref.dropna().copy() df3.isna().sum() ``` ## Space Study ``` # Original dataset #df33 = df3.drop(columns = ['customer_id'], axis = '').copy() # dataset with selected columns due feature selection based on its importance cols_selected = ['customer_id', 'gross_revenue', 'recency_days', 'qty_products', 'frequency', 'qty_returns'] df33 = df3[cols_selected].drop(columns = 'customer_id', axis = 1) df33.head() mm = pp.MinMaxScaler() df33['gross_revenue'] = mm.fit_transform(df33[['gross_revenue']]) df33['recency_days'] = mm.fit_transform(df33[['recency_days']]) df33['qty_products'] = mm.fit_transform(df33[['qty_products']]) df33['frequency'] = mm.fit_transform(df33[['frequency']]) df33['qty_returns'] = mm.fit_transform(df33[['qty_returns']]) X = df33.copy() X.shape ``` #### PCA ``` pca = dd.PCA( n_components = X.shape[1]) principal_components = pca.fit_transform(X) # plot explained variables features = range(pca.n_components_) plt.bar(features, pca.explained_variance_ratio_, color = 'black') #quais componentes principais com a maior variação de dados # pca component df_pca = pd.DataFrame( principal_components ) sns.scatterplot(x = 0, y = 1, data = df_pca); ``` #### UMAP ``` reducer = umap.UMAP(random_state = 42) embedding = reducer.fit_transform(X) #gera o espaço projetado - embedding é a projeção gerada em outro espaço #embedding df_umap = pd.DataFrame() df_umap['embedding_X'] = embedding[:, 0] df_umap['embedding_y'] = embedding[:, 1] #plot UMAP - cluster projetado de alta dimencionalidade sns.scatterplot(x = 'embedding_X', y = 'embedding_y', data = df_umap); ``` #### t-SNE ``` reducer = mn.TSNE( n_components = 2, n_jobs = -1, random_state = 42) embedding = reducer.fit_transform(X) #gera o espaço projetado - embedding é a projeção gerada em outro espaço #embedding df_tsne = pd.DataFrame() df_tsne['embedding_X'] = embedding[:, 0] df_tsne['embedding_y'] = embedding[:, 1] #plot UMAP - cluster projetado de alta dimencionalidade sns.scatterplot(x = 'embedding_X', y = 'embedding_y', data = df_tsne); ``` #### Tree-Based Embedding ``` df3.head() # training dataset X = df33.drop(columns = ['gross_revenue'], axis = 1) #target variable y = df33['gross_revenue'] # I could use boruta to select features to build a better embedding space # model definition rf_model = en.RandomForestRegressor(n_estimators = 100, random_state = 42) # model training rf_model.fit(X,y) # leaf df_leaf = pd.DataFrame(rf_model.apply( X )) # using UMAP to reduce the space study from 100 to 2 reducer = umap.UMAP(random_state = 42) embedding = reducer.fit_transform(df_leaf) #gera o espaço projetado - embedding é a projeção gerada em outro espaço #embedding df_tree = pd.DataFrame() df_tree['embedding_X'] = embedding[:, 0] df_tree['embedding_y'] = embedding[:, 1] #plot UMAP - cluster projetado de alta dimencionalidade sns.scatterplot(x = 'embedding_X', y = 'embedding_y', data = df_tree); ``` # DATA PREPARATION <span id="papermill-error-cell" style="color:red; font-family:Helvetica Neue, Helvetica, Arial, sans-serif; font-size:2em;">Execution using papermill encountered an exception here and stopped:</span> ``` # Tree-Based Embbeding df4 = df_tree.copy() df4.to_csv('../src/data/tree_based_embbeding.csv', index = False) # # UMAP Embbeding # df4 = df_umap.copy() # # TSNE Embedding # df4 = df_tsne.copy() ``` # HYPERPARAMETER FINE-TUNNING ``` X = df4.copy() X.head() clusters = np.arange(2, 31, 1) #silhouette was increasing, so we put more k points clusters ``` ## K-Means ``` kmeans_sil = [] for k in clusters: # model definition kmeans_model = c.KMeans( n_clusters = k, n_init = 100, random_state = 42 ) # model training kmeans_model.fit(X) # model predict labels = kmeans_model.predict(X) # model performance sil = m.silhouette_score( X, labels, metric = 'euclidean') kmeans_sil.append(sil) plt.plot( clusters, kmeans_sil, linestyle = '--', marker = 'o', color = 'b' ) plt.xlabel( 'K' ); plt.ylabel('Silhouette Score'); plt.title('KMeans Silhouette Score per K '); ``` ## GMM ``` gmm_sil = [] for k in clusters: # model definition gmm_model = mx.GaussianMixture(n_components = k, n_init = 100, random_state = 42) # model training gmm_model.fit(X) # model prediction labels = gmm_model.predict(X) # model performance sil = m.silhouette_score(X, labels, metric = 'euclidean') gmm_sil.append(sil) plt.plot(clusters, gmm_sil, linestyle = '--', marker = 'o', color = 'b') plt.xlabel( 'K' ); plt.ylabel('Silhouette Score'); plt.title('GMM Silhouette Score per K '); ``` ## Hierarchical Clustering ``` # model definition and training hc_model = hc.linkage(X, 'ward') ``` ### H-Clustering Silhouette Score ``` hc_sil = [] for k in clusters: #model definition and training hc_model = hc.linkage(X, 'ward') # model predict labels = hc.fcluster(hc_model, k, criterion = 'maxclust') # metrics sil = m.silhouette_score(X, labels, metric = 'euclidean') hc_sil.append(sil) plt.plot(clusters, hc_sil, linestyle = '--', marker = 'o', color = 'b') ``` ## Results ``` ## Results - Tree Based Embedding df_results = pd.DataFrame({'KMeans:': kmeans_sil, 'GMM': gmm_sil, 'HC': hc_sil} ).T df_results.columns = clusters df_results.style.highlight_max(color = 'lightgreen', axis = 1) ## Results - UMAP Embedding df_results = pd.DataFrame({'KMeans:': kmeans_sil, 'GMM': gmm_sil, 'HC': hc_sil} ).T df_results.columns = clusters df_results.style.highlight_max(color = 'lightgreen', axis = 1) ## Results - TSNE Embedding df_results = pd.DataFrame({'KMeans:': kmeans_sil, 'GMM': gmm_sil, 'HC': hc_sil} ).T df_results.columns = clusters df_results.style.highlight_max(color = 'lightgreen', axis = 1) ``` # MACHINE LEARNING MODEL TRAINING ## K-Means ``` # model definition k = 8; kmeans = c.KMeans(init = 'random', n_clusters = k, n_init = 100, max_iter = 300, random_state = 42) # model training kmeans.fit(X) # clustering labels = kmeans.labels_ # # trying with GMM beacuse of its approach in the embedding space # # k=11 ; # # model definition # gmm_model = mx.GaussianMixture(n_components = k,n_init = 10 ,random_state=42) # # model training # gmm_model.fit(X) # # model prediction # labels = gmm_model.predict(X) ``` ## Cluster Validation ``` # WSS (Within-cluster Sum of Square ) # print('WSS score: {}'.format(kmeans.inertia_)) # SS (Silhouette Score) print('SS score: {}'.format(m.silhouette_score(X, labels, metric = 'euclidean'))) ``` # CLUSTER ANALYSIS ``` df9 = X.copy() df9['cluster'] = labels ``` ## Visualization Inspection ``` # k = 8 for KMeans sns.scatterplot(x = 'embedding_X', y = 'embedding_y', hue = 'cluster', data = df9, palette = 'deep') ``` ## Cluster Profile ``` df92 = df3[cols_selected].copy() df92['cluster'] = labels df92.head() # Explaining clusters profile based on this averages # Number of customer df_cluster = df92[['customer_id', 'cluster']].groupby( 'cluster' ).count().reset_index() df_cluster['perc_customer'] = 100*( df_cluster['customer_id'] / df_cluster['customer_id'].sum() ) # Avg Gross revenue df_avg_gross_revenue = df92[['gross_revenue', 'cluster']].groupby( 'cluster' ).mean().reset_index() df_cluster = pd.merge( df_cluster, df_avg_gross_revenue, how='inner', on='cluster' ) # Avg recency days df_avg_recency_days = df92[['recency_days', 'cluster']].groupby( 'cluster' ).mean().reset_index() df_cluster = pd.merge( df_cluster, df_avg_recency_days, how='inner', on='cluster' ) # Avg qty products df_qty_products = df92[['qty_products', 'cluster']].groupby( 'cluster' ).mean().reset_index() df_cluster = pd.merge( df_cluster, df_qty_products, how='inner', on='cluster' ) # Frequency df_frequency = df92[['frequency', 'cluster']].groupby( 'cluster' ).mean().reset_index() df_cluster = pd.merge( df_cluster, df_frequency, how='inner', on='cluster' ) # Avg qty returns df_qty_returns = df92[['qty_returns', 'cluster']].groupby( 'cluster' ).mean().reset_index() df_cluster = pd.merge( df_cluster, df_qty_returns, how='inner', on='cluster' ) df_cluster # during the new EDA we can do a analyse inside each cluster ``` Cluster Insiders (04): - Number of customers: 551 (9.67% of costumers) - Avg Gross Revenue: $10410,00 - Recency Average: 45 days - Avg of Qty Products Purchased: 366 un - Purchase Frequency: 0.21 products per day # Exploratory Dada Analysis ``` df10 = df92.copy() df10.head() ``` # MODEL DEPLOYMENT ``` df92.dtypes df92['recency_days'] = df92['recency_days'].astype(int) df92['qty_products'] = df92['qty_products'].astype(int) df92['qty_returns'] = df92['qty_returns'].astype(int) # # create database # conn = sqlite3.connect('insiders_db.sqlite') # # create table # query_create_insiders = """ # CREATE TABLE insiders ( # custer_id INTEGER, # gross_revenue REAL, # recency_days INTEGER, # qty_products INTEGER, # frequency INTEGER, # qty_returns INTEGER, # cluster INTEGER # ) # """ # conn.execute(query_create_insiders) # conn.commit() # conn.close() # database connection conn = create_engine('sqlite:///insiders_db.sqlite') # # drop table # query_drop_insiders = """ # DROP TABLE insiders # """ #create table query_create_insiders = """ CREATE TABLE insiders ( customer_id INTEGER, gross_revenue REAL, recency_days INTEGER, qty_products INTEGER, frequency INTEGER, qty_returns INTEGER, cluster INTEGER ) """ conn.execute(query_create_insiders) # insert into data df92.to_sql('insiders', con = conn, if_exists = 'append', index = False) # consulting database # get query query_collect = """ SELECT * from insiders """ df = pd.read_sql_query(query_collect, conn) df.head() ```
github_jupyter
## Exploratory Data Analysis ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import warnings warnings.filterwarnings('ignore') # read dataset df = pd.read_csv('../datasets/winequality/winequality-red.csv',sep=';') # check data dimensions print(df.shape) # check length print(len(df)) # check number of dimensions of your DataFrame or Series print(df.ndim) # show the first five rows print(df.head(5)) # show the last five rows print(df.tail(5)) # print column names df.dtypes # return the number of non-missing values for each column of the DataFrame print(df.count) # change direction to get count of non-missing values for each each row df.count(axis='columns') # To print the metadata, use info() print(df.info()) # show the columns df.columns ``` ### Sorting A DataFrame can be sorted by the value of one of the variables (i.e columns). For example, we can sort by Total day charge (use ascending=False to sort in descending order): ``` df.sort_values(by='alcohol', ascending=False).head() ``` Alternatively, we can also sort by multiple columns: ``` df.sort_values(by=['alcohol', 'quality'], ascending=[True, False]).head() ``` ### Indexing and retrieving data DataFrame can be indexed in different ways. To get a single column, you can use a DataFrame['Name'] construction. Let's use this to answer a question about that column alone: **what is the proportion of alcohol in our dataframe?** ``` df['alcohol'].mean() ``` ### Applying Functions to Cells, Columns and Rows **To apply functions to each column, use `apply():`** ``` df.apply(np.max) ``` The apply method can also be used to apply a function to each row. To do this, specify `axis=1`. `lambda` functions are very convenient in such scenarios. For example, if we need to select all wines with alcohol content greater than 6, we can do it like this: ``` df[df['alcohol'].apply(lambda alcohol: alcohol > 6)].head() ``` The `map` method can be used to **replace values in a column** by passing a dictionary of the form `{old_value: new_value}` as its argument: ``` d = {'9.4' : 100, '9.8' : 200} df['alcohol'] = df['alcohol'].map(d) df.head() ``` The same thing can be done with the `replace` method: ### Grouping In general, grouping data in Pandas goes as follows: df.groupby(by=grouping_columns)[columns_to_show].function() 1. First, the `groupby` method divides the grouping_columns by their values. They become a new index in the resulting dataframe. 2. Then, columns of interest are selected (`columns_to_show`). If columns_to_show is not included, all non groupby clauses will be included. 3. Finally, one or several functions are applied to the obtained groups per selected columns. Here is an example where we group the data according to the values of the `sulphates` variable and display statistics of three columns in each group: ``` columns_to_show = ['pH', 'chlorides', 'citric acid'] df.groupby(['sulphates'])[columns_to_show].describe(percentiles=[]).head() ``` Let’s do the same thing, but slightly differently by passing a list of functions to `agg()`: ``` columns_to_show = ['pH', 'chlorides', 'citric acid'] df.groupby(['sulphates'])[columns_to_show].agg([np.mean, np.std, np.min, np.max]).head() ``` ### Summary tables Suppose we want to see how the observations in our sample are distributed in the context of two variables - `sulphates` and `quality`. To do so, we can build a contingency table using the `crosstab` method: ``` pd.crosstab(df['sulphates'], df['quality']).head() pd.crosstab(df['sulphates'], df['quality'], normalize=True).head() ``` ## First attempt on predicting wine quality Let's see how wine quality is related to the alcohol content in it. We’ll do this using a crosstab contingency table and also through visual analysis with Seaborn (however, visual analysis will be covered more thoroughly in the next article). ``` pd.crosstab(df['pH'], df['quality'], margins=True).head() sns.countplot(x='density', hue='quality', data=df); ``` ### Histogram ``` # create histogram bin_edges = np.arange(0, df['residual sugar'].max() + 1, 1) fig = plt.hist(df['residual sugar'], bins=bin_edges) # add plot labels plt.xlabel('count') plt.ylabel('residual sugar') plt.show() ``` ### Scatterplot for continuous variables ``` # create scatterplot fig = plt.scatter(df['pH'], df['residual sugar']) # add plot labels plt.xlabel('pH') plt.ylabel('residual sugar') plt.show() ``` ### Scatterplot Matrix ``` # show columns df.columns # create scatterplot matrix fig = sns.pairplot(data=df[['alcohol', 'pH', 'residual sugar', 'quality']], hue='quality') # add plot labels plt.xlabel('pH') plt.ylabel('residual sugar') plt.show() ``` ### Boxplots - Distribution of data in terms of median and percentiles (median is the 50th percentile) ##### manual approach ``` percentiles = np.percentile(df['alcohol'], q=[25, 50, 75]) percentiles for p in percentiles: plt.axhline(p, color='black', linestyle='-') plt.scatter(np.zeros(df.shape[0]) + 0.5, df['alcohol']) iqr = percentiles[-1] - percentiles[0] upper_whisker = min(df['alcohol'].max(), percentiles[-1] + iqr * 1.5) lower_whisker = max(df['alcohol'].min(), percentiles[0] - iqr * 1.5) plt.axhline(upper_whisker, color='black', linestyle='--') plt.axhline(lower_whisker, color='black', linestyle='--') plt.ylim([8, 16]) plt.ylabel('alcohol') fig = plt.gca() fig.axes.get_xaxis().set_ticks([]) plt.show() ``` #### using matplotlib.pyplot.boxplot approach ``` plt.boxplot(df['alcohol']) plt.ylim([8, 16]) plt.ylabel('alcohol') fig = plt.gca() fig.axes.get_xaxis().set_ticks([]) plt.show() # Assume density is the target variable #descriptive statistics summary df['density'].describe() #histogram sns.distplot(df['density']); #skewness and kurtosis print("Skewness: %f" % df['density'].skew()) print("Kurtosis: %f" % df['density'].kurt()) ``` ### Relationship with other continuous variables ``` # other variables are fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol', var = 'pH' data = pd.concat([df['density'], df[var]], axis=1) data.plot.scatter(x=var, y='density'); ### Relationship with categorical variable var = 'quality' data = pd.concat([df['density'], df[var]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sns.boxplot(x=var, y="density", data=data) ``` #### Correlation matrix (heatmap style) ``` #correlation matrix corrmat = df.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(corrmat, vmax=.8, square=True); ``` #### `density` correlation matrix (zoomed heatmap style) ``` k = 10 #number of variables for heatmap cols = corrmat.nlargest(k, 'density')['density'].index cm = np.corrcoef(df[cols].values.T) sns.set(font_scale=1.25) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values) plt.show() ``` From the above heatmap plot we can see that variable `density` is highly correlated to `fixed acidity`, `citric acid`, `total sulphur dioxide`, and `free sulphur dioxide` ``` df.columns #scatterplot sns.set() cols = ['fixed acidity', 'citric acid', 'total sulfur dioxide', 'free sulfur dioxide'] sns.pairplot(df[cols], height = 2.5) plt.show(); ``` ### Missing data Important questions when thinking about missing data: - How prevalent is the missing data? - Is missing data random or does it have a pattern? ``` #missing data total = df.isnull().sum().sort_values(ascending=False) percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False) missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent']) missing_data.head(20) ``` ### Detailed Statistical Analysis According to Hair et al. (2013), four assumptions should be tested: **Normality** - When we talk about normality what we mean is that the data should look like a normal distribution. This is important because several statistic tests rely on this (e.g. t-statistics). In this exercise we'll just check univariate normality for 'density' (which is a limited approach). Remember that univariate normality doesn't ensure multivariate normality (which is what we would like to have), but it helps. Another detail to take into account is that in big samples (>200 observations) normality is not such an issue. However, if we solve normality, we avoid a lot of other problems (e.g. heteroscedacity) so that's the main reason why we are doing this analysis. **Homoscedasticity** - I just hope I wrote it right. Homoscedasticity refers to the 'assumption that dependent variable(s) exhibit equal levels of variance across the range of predictor variable(s)' (Hair et al., 2013). Homoscedasticity is desirable because we want the error term to be the same across all values of the independent variables. **Linearity**- The most common way to assess linearity is to examine scatter plots and search for linear patterns. If patterns are not linear, it would be worthwhile to explore data transformations. However, we'll not get into this because most of the scatter plots we've seen appear to have linear relationships. **Absence of correlated errors** - Correlated errors, like the definition suggests, happen when one error is correlated to another. For instance, if one positive error makes a negative error systematically, it means that there's a relationship between these variables. This occurs often in time series, where some patterns are time related. We'll also not get into this. However, if you detect something, try to add a variable that can explain the effect you're getting. That's the most common solution for correlated errors. **Normality** - Histogram - Kurtosis and skewness. - Normal probability plot - Data distribution should closely follow the diagonal that represents the normal distribution. ``` #histogram and normal probability plot sns.set_style('darkgrid') sns.distplot(df['density']); # Add labels plt.title('Histogram of Density') plt.xlabel('Density') plt.ylabel('Count') sns.distplot(df['density'], hist= True, kde=False) help(sns.distplot) ```
github_jupyter
# Content with notebooks You can also create content with Jupyter Notebooks. The content for the current page is contained in a Jupyter Notebook in the `notebooks/` folder of the repository. This means that we can include code blocks and their outputs, and export them to Jekyll markdown. **You can find the original notebook for this page [at this address](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb)** ## Markdown + notebooks As it is markdown, you can embed images, HTML, etc into your posts! ![](cool.jpg) You an also $add_{math}$ and $$ math^{blocks} $$ or $$ \begin{align*} \mbox{mean} la_{tex} \\ \\ math blocks \end{align*} $$ But make sure you \$Escape \$your \$dollar signs \$you want to keep! ## Code blocks and image outputs Textbooks with Jupyter will also embed your code blocks and output in your site. For example, here's some sample Matplotlib code: ``` from matplotlib import rcParams, cycler import matplotlib.pyplot as plt import numpy as np plt.ion() # Fixing random state for reproducibility np.random.seed(19680801) N = 10 data = [np.logspace(0, 1, 100) + np.random.randn(100) + ii for ii in range(N)] data = np.array(data).T cmap = plt.cm.coolwarm rcParams['axes.prop_cycle'] = cycler(color=cmap(np.linspace(0, 1, N))) from matplotlib.lines import Line2D custom_lines = [Line2D([0], [0], color=cmap(0.), lw=4), Line2D([0], [0], color=cmap(.5), lw=4), Line2D([0], [0], color=cmap(1.), lw=4)] fig, ax = plt.subplots(figsize=(10, 5)) lines = ax.plot(data) ax.legend(custom_lines, ['Cold', 'Medium', 'Hot']); ``` Note that the image above is captured and displayed by Jekyll. ## Removing content before publishing You can also remove some content before publishing your book to the web. For example, in [the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb) there used to be a cell below... ``` thisvariable = "none of this should show up in the textbook" fig, ax = plt.subplots() x = np.random.randn(100) y = np.random.randn(100) ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm) ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes) ax.set_axis_off() ``` You can also **remove only the code** so that images and other output still show up. Below we'll *only* display an image. It was generated with Python code in a cell, which you can [see in the original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb) ``` # NO CODE thisvariable = "this plot *will* show up in the textbook." fig, ax = plt.subplots() x = np.random.randn(100) y = np.random.randn(100) ax.scatter(x, y, s=np.abs(x*100), c=x, cmap=plt.cm.coolwarm) ax.text(0, .5, thisvariable, fontsize=20, transform=ax.transAxes) ax.set_axis_off() ``` And here we'll *only* display a Pandas DataFrame. Again, this was generated with Python code from [this original notebook](https://github.com/jupyter/jupyter-book/blob/master/jupyter_book/minimal/content/features/notebooks.ipynb). ``` # NO CODE import pandas as pd pd.DataFrame([['hi', 'there'], ['this', 'is'], ['a', 'DataFrame']], columns=['Word A', 'Word B']) ``` You can configure the text that *Textbooks with Jupyter* uses for this by modifying your book's `_config.yml` file. ## Interactive outputs We can even do the same for *interactive* material. Below we'll display a map using `ipyleaflet`. When the notebook is converted to Markdown, the code for creating the interactive map is retained. **Note that this will only work for some packages.** They need to be able to output standalone HTML/Javascript, and not depend on an underlying Python kernel to work. ``` import folium m = folium.Map( location=[45.372, -121.6972], zoom_start=12, tiles='Stamen Terrain' ) folium.Marker( location=[45.3288, -121.6625], popup='Mt. Hood Meadows', icon=folium.Icon(icon='cloud') ).add_to(m) folium.Marker( location=[45.3311, -121.7113], popup='Timberline Lodge', icon=folium.Icon(color='green') ).add_to(m) folium.Marker( location=[45.3300, -121.6823], popup='Some Other Location', icon=folium.Icon(color='red', icon='info-sign') ).add_to(m) m ```
github_jupyter
# Tutorial for Geoseg > __version__ == 0.1.0 > __author__ == Go-Hiroaki # Overview: ## 1. Evaluating with pretrained models > Test model performance by providing pretrained models ## 2. Re-training with provided dataset > Trained new models with provide training datastet ## 3. Training with personal dataset > Train and test models with your own dataset ``` ls ``` ## 1. Evaluating with pretrained models ### 1.1 Prepared and loaded dataset #### > Prepared dataset ``` YOUR_DATASET/ |-- img | |-- train_1.png | |-- train_2.png | `-- |-- msk | |-- train_1.png | |-- train_2.png | `-- |-- ref.csv |-- statistic.csv |-- train.csv |-- val.csv ``` #### > Modified src/datasets.py to make sure YOUR_DATASET ``` if __name__ == "__main__": # ====================== parameter initialization ======================= # parser = argparse.ArgumentParser(description='ArgumentParser') parser.add_argument('-idx', type=int, default=0, help='index of sample image') args = parser.parse_args() idx = args.idx for root in ['YOUR_DATASET']: for mode in ["IM", "IMS", "IME"]: print("Load {}/{}.".format(root, mode)) trainset, valset = load_dataset(root, mode) # print("Load train set = {} examples, val set = {} examples".format( # len(trainset), len(valset))) sample = trainset[idx] trainset.show(idx) sample = valset[idx] valset.show(idx) print("\tsrc:", sample["src"].shape, "tar:", sample["tar"].shape,) ``` #### > Run src/datasets.py > python src/datasets.py if success, sample image will show up in example/ ### 1.2 Download pretrained models > 1. FCN8s_iter_5000.pth [LINK](https://drive.google.com/open?id=1KHs7coyXAipz8t5cN_lbTC4MOYi8FddI) > 2. FCN16s_iter_5000.pth [LINK](https://drive.google.com/open?id=1wlORkMx_ykmHysShUKY4UcCYs-fVaen6) > 3. FCN32s_iter_5000.pth [LINK](https://drive.google.com/open?id=1OR_Sk66RAGtKrp0quvqazRkL0xtAH8RY) > 4. SegNet_iter_5000.pth [LINK](https://drive.google.com/open?id=1J0aRjFG-zOSSXnynm02VaYxjw1tjx-qC) > 5. UNet_iter_5000.pth [LINK](https://drive.google.com/open?id=17X0aCgRx3XXgH1fcfLoLwgcbWIzxZe5K) > 6. FPN_iter_5000.pth [LINK](https://drive.google.com/open?id=1fWrCnGQJBZTw7m5OZlQvH5-R_JJlBA-r) > 7. ResUNet_iter_5000.pth [LINK](https://drive.google.com/open?id=1jGs_PxEMXCshOzXdg9LuFJxe8kO39oxT) > 8. MC-FCN_iter_5000.pth [LINK](https://drive.google.com/open?id=1Kt_JmR0ZGXvK9kuTmDOek5l1SsHX4xhz) > 9. BR-Net_iter_5000.pth [LINK](https://drive.google.com/open?id=1rytD9tzAq2mne5yf3XEh-jTSHlvQvedT) > * Upcoming ... After downloading corresponding pretrained models, save them at checkpoints/ . ``` ls ./checkpoint/ ``` ### 1.3 Run evaluation scripts * sinle model ``` visSingle.py -h optional arguments: -h, --help show this help message and exit -checkpoints CHECKPOINTS [CHECKPOINTS ...] checkpoints used for making prediction -spaces SPACES [SPACES ...] barrier space for merging -direction {horizontal,vertical} merge image direction -disp_cols DISP_COLS cols for displaying image -edge_fn {shift,canny} method used for edge extraction -gen_nb GEN_NB number of generated image -color COLOR background color for generated rgb result -partition PARTITION partition of dataset for loading -disk DISK dilation level -cuda CUDA using cuda for optimization ``` The generate result will show up at result/single - BR-Net ![time](./result/single/BR-Net_canny_segmap_edge_0.png) * multi models ``` visSingleComparison.py -h optional arguments: -h, --help show this help message and exit -checkpoints CHECKPOINTS [CHECKPOINTS ...] checkpoints used for making prediction -spaces SPACES [SPACES ...] barrier spaces for merging -direction {horizontal,vertical} merge image direction -disp_cols DISP_COLS cols for displaying image -target {segmap,edge} target for model prediction [segmap, edge] -edge_fn {shift,canny} method used for edge extraction -gen_nb GEN_NB number of generated image -eval_fn {ov,precision,recall,f1_score,jaccard,kappa} method used for evaluate performance -significance SIGNIFICANCE significant different level between methods -color COLOR background color for generated rgb result -partition PARTITION partition of dataset for loading -disk DISK dilation level -batch_size BATCH_SIZE batch size for model prediction -cuda CUDA using cuda for optimization ``` The generate result will show up at result/single-comparison - Segmap FCN32s_FCN16s_FCN8s ![time](./result/single-comparison/segmap_FCN32s_FCN16s_FCN8s_1.png) - Edge FCN32s_FCN16s_FCN8s ![time](./result/single-comparison/edge_FCN32s_FCN16s_FCN8s_1.png) ## 2. Re-train with provided dataset ### 2.1 Download training dataset > Training dataset [LINK](https://drive.google.com/file/d/1boGcJz9TyK9XB4GUhjCHVu8XGtbgjjbi/view?usp=sharing). Unzip and place to datasets/ ### 2.2 Run training scripts ``` python src/train.py -h usage: train.py [-h] [-root ROOT] [-net NET] [-base_kernel BASE_KERNEL] [-trigger {epoch,iter}] [-interval INTERVAL] [-terminal TERMINAL] [-batch_size BATCH_SIZE] [-lr LR] [-cuda CUDA] ArgumentParser optional arguments: -h, --help show this help message and exit -root ROOT root dir of dataset for training models -net NET network type for training -base_kernel BASE_KERNEL base number of kernels -trigger {epoch,iter} trigger type for logging -interval INTERVAL interval for logging -terminal TERMINAL terminal for training -batch_size BATCH_SIZE batch_size for training -lr LR learning rate for optimization -cuda CUDA using cuda for optimization ``` ## 3. Training with personal dataset ### 3.1 Prepare your own dataset ### 3.2 Run training scripts ``` Step ```
github_jupyter
[@LorenaABarba](https://twitter.com/LorenaABarba) 12 steps to Navier–Stokes ====== *** This Jupyter notebook continues the presentation of the **12 steps to Navier–Stokes**, the practical module taught in the interactive CFD class of [Prof. Lorena Barba](http://lorenabarba.com). You should have completed [Step 1](./01_Step_1.ipynb) before continuing, having written your own Python script or notebook and having experimented with varying the parameters of the discretization and observing what happens. Step 2: Nonlinear Convection ----- *** Now we're going to implement nonlinear convection using the same methods as in step 1. The 1D convection equation is: $$\frac{\partial u}{\partial t} + u \frac{\partial u}{\partial x} = 0$$ Instead of a constant factor $c$ multiplying the second term, now we have the solution $u$ multiplying it. Thus, the second term of the equation is now *nonlinear*. We're going to use the same discretization as in Step 1 — forward difference in time and backward difference in space. Here is the discretized equation. $$\frac{u_i^{n+1}-u_i^n}{\Delta t} + u_i^n \frac{u_i^n-u_{i-1}^n}{\Delta x} = 0$$ Solving for the only unknown term, $u_i^{n+1}$, yields: $$u_i^{n+1} = u_i^n - u_i^n \frac{\Delta t}{\Delta x} (u_i^n - u_{i-1}^n)$$ As before, the Python code starts by loading the necessary libraries. Then, we declare some variables that determine the discretization in space and time (you should experiment by changing these parameters to see what happens). Then, we create the initial condition $u_0$ by initializing the array for the solution using $u = 2\ @\ 0.5 \leq x \leq 1$ and $u = 1$ everywhere else in $(0,2)$ (i.e., a hat function). ``` import numpy # we're importing numpy from matplotlib import pyplot # and our 2D plotting library %matplotlib inline nx = 41 dx = 2 / (nx - 1) nt = 20 #nt is the number of timesteps we want to calculate dt = .025 #dt is the amount of time each timestep covers (delta t) u = numpy.ones(nx) #as before, we initialize u with every value equal to 1. u[int(.5 / dx) : int(1 / dx + 1)] = 2 #then set u = 2 between 0.5 and 1 as per our I.C.s un = numpy.ones(nx) #initialize our placeholder array un, to hold the time-stepped solution ``` The code snippet below is *unfinished*. We have copied over the line from [Step 1](./01_Step_1.ipynb) that executes the time-stepping update. Can you edit this code to execute the nonlinear convection instead? ``` for n in range(nt): #iterate through time un = u.copy() ##copy the existing values of u into un for i in range(1, nx): ##now we'll iterate through the u array u[i] = un[i]*(1 - (dt/dx)*(un[i]-un[i-1])) ###This is the line from Step 1, copied exactly. Edit it for our new equation. ###then uncomment it and run the cell to evaluate Step 2 ###u[i] = un[i] - c * dt / dx * (un[i] - un[i-1]) pyplot.plot(numpy.linspace(0, 2, nx), u) ##Plot the results ``` What do you observe about the evolution of the hat function under the nonlinear convection equation? What happens when you change the numerical parameters and run again? ## Learn More For a careful walk-through of the discretization of the convection equation with finite differences (and all steps from 1 to 4), watch **Video Lesson 4** by Prof. Barba on YouTube. ``` from IPython.display import YouTubeVideo YouTubeVideo('y2WaK7_iMRI') from IPython.core.display import HTML def css_styling(): styles = open("../styles/custom.css", "r").read() return HTML(styles) css_styling() ``` > (The cell above executes the style for this notebook.)
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings("ignore") ##### Functions # 1st function: to graph time series based on TransactionDT vs the variable selected def scatter(column): fr,no_fr = (train[train['isFraud'] == 1], train[train['isFraud'] == 0]) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,3)) ax1.title.set_text('Histogram ' + column + ' when isFraud == 0') ax1.set_ylim(train[column].min() - 1,train[column].max() + 1) ax1.scatter(x = no_fr['TransactionDT'], y = no_fr[column], color = 'blue', marker='o') ax2.title.set_text('Histogram ' + column + ' when isFraud == 1') ax2.set_ylim(train[column].min() - 1,train[column].max() + 1) ax2.scatter(x = fr['TransactionDT'], y = fr[column], color = 'red', marker='o') plt.show() # 2nd function: to show a ranking of pearson correlation with the variable selected def corr(data,column): print('Correlation with ' + column) print(train[data].corrwith(train[column]).abs().sort_values(ascending = False)[1:]) # 3rd function: to reduce the groups based on Nans agroupation and pearson correlation def reduce(groups): result = list() for values in groups: maxval = 0 val = values[0] for value in values: unique_values = train[value].nunique() if unique_values > maxval: maxval = unique_values val = value result.append(value) return result # 4th function: to sort each column in ascending order based on its number def order_finalcolumns(final_Xcolumns): return sorted(final_Xcolumns, key=lambda x: int("".join([i for i in x if i.isdigit()]))) ##### Download of files. print('Downloading datasets...') print(' ') train = pd.read_pickle('/kaggle/input/1-fraud-detection-memory-reduction/train_mred.pkl') print('Train has been downloaded... (1/2)') test = pd.read_pickle('/kaggle/input/1-fraud-detection-memory-reduction/test_mred.pkl') print('Test has been downloaded... (2/2)') print(' ') print('All files are downloaded') ##### All the columns of train dataset. print(list(train)) ``` # NaNs Exploration We will search all the columns to determine which columns are related by the number of NANs present. After grouping them, we decide to keep the columns of each group with major amount of unique values (its supposed to be the most explanatory variable) ## Transaction columns ``` # These columns are the first ones in transaction dataset. columns= list(train.columns[:17]) columns for col in columns: print(f'{col} NaNs: {train[col].isna().sum()} | {train[col].isna().sum()/train.shape[0]:.2%}') # If we look closely to % NaNs data, most of them have low number of missing information. We are keeping all the columns where % NaNs < 0.7 final_transactioncolumns = list() for col in columns: if train[col].isna().sum()/train.shape[0] < 0.7: final_transactioncolumns.append(col) print('Final Transaction columns:',final_transactioncolumns) ``` ## C columns ``` ##### Group the C columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['C' + str(i) for i in range(1,15)] df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 ``` ### Group 1 (single group) ``` ##### Time series graph based on TransactionDT # There is no column that does not have NaNs values so we get all the columns in the same group group_list = ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['C1','C11','C2','C6','C8','C4','C10','C14','C12','C7','C13'], ['C3'], ['C5','C9']] result = reduce(reduce_groups) print('Final C columns:',result) final_ccolumns = result ``` ## D columns ``` ##### Group the D columns + Dachr columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['D' + str(i) for i in range(1,16)] columns.extend(['D1achr','D2achr','D4achr','D6achr','D10achr','D11achr','D12achr','D13achr','D14achr','D15achr']) df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 ``` ### Group 1 (single group) ``` ##### Time series graph based on TransactionDT. # Despite having different number of NaNs, we are analyzing it as a single group. But due to NaNs low number in D1, we keep it as a final column. group_list = ['D1achr', 'D2achr', 'D3', 'D4achr', 'D5', 'D6achr', 'D7', 'D8', 'D9', 'D10achr', 'D11achr', 'D12achr', 'D13achr', 'D14achr', 'D15achr'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 # On the first group, D1achr vs D2achr --> we keep D1achr due to the low number of NaNs. reduce_groups = [['D3','D7','D5'],['D4achr','D12achr','D6achr','D15achr','D10achr', 'D11achr'], ['D8'], ['D9'], ['D13achr'],['D14achr']] result = reduce(reduce_groups) result.append('D1achr') print('Final D columns:',result) final_dcolumns = result ``` ## M columns ``` ##### Group the M columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['M' + str(i) for i in range(1,10)] df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 ``` ### Group 1 (single group) ``` # To analize M columns, we need to transform strings to numbers. Instead of using Label Encoder, we use a dictionary. T_F_num = dict({'F': 0, 'T': 1, 'M0': 0, 'M1': 1, 'M2': 2}) for column in ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9']: print(f'{column}:', train[column].unique()) print('Transforming strings to numbers...') train[column] = train[column].replace(T_F_num) print(f'{column}:', train[column].unique()) print('') ##### Time series graph based on TransactionDT. # Despite having different number of NaNs, we are analyzing it as a single group. group_list = ['M1', 'M2', 'M3', 'M4', 'M5', 'M6', 'M7', 'M8', 'M9'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() #### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, We grouped together the columns with corr > 0.7 but in this case, no correlation is bigger than 0.7 # That's why, in this particular case we grouped together the columns with corr > 0.5 reduce_groups = ['M1'], ['M2','M3'], ['M4'], ['M5'], ['M6'], ['M7', 'M8'], ['M9'] result = reduce(reduce_groups) print('Final M columns:',result) final_mcolumns = result ``` ## V columns ``` ##### Group the V columns to determine which columns are related by the number of NANs present and analyze its groups independently. columns = ['V' + str(i) for i in range(1,340)] df_nan = train.isna() dict_nans = dict() for column in columns: number_nans = df_nan[column].sum() try: dict_nans[number_nans].append(column) except: dict_nans[number_nans] = [column] group_number = 1 for key,values in dict_nans.items(): print('Group {}'.format(group_number),'| Number of NANs =',key) print(values) print(' ') group_number += 1 final_vcolumns = list() ``` ### Group 1 ``` ##### Time series graph based on TransactionDT. group_list = ['V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V1'], ['V2','V3'], ['V4','V5'], ['V6','V7'], ['V8','V9'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group1 columns:',result) ``` ### Group 2 ``` ##### Time series graph based on TransactionDT. group_list = ['V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'V29', 'V30', 'V31', 'V32', 'V33', 'V34'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V12','V13'], ['V14'], ['V15','V16','V33','V34','V31','V32','V21','V22','V17','V18'], ['V19','V20'],['V23','V24'],['V25','V26'],['V27','V28'],['V29','V30']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group2 columns:',result) ``` ### Group 3 ``` ##### Time series graph based on TransactionDT. group_list = ['V35', 'V36', 'V37', 'V38', 'V39', 'V40', 'V41', 'V42', 'V43', 'V44', 'V45', 'V46', 'V47', 'V48', 'V49', 'V50', 'V51', 'V52'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V35','V36'], ['V37','V38'], ['V39','V40','V42','V43','V50','V51','V52'], ['V41'], ['V44','V45'],['V46','V47'],['V48','V49']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group3 columns:',result) ``` ### Group 4 ``` ##### Time series graph based on TransactionDT. group_list = ['V53', 'V54', 'V55', 'V56', 'V57', 'V58', 'V59', 'V60', 'V61', 'V62', 'V63', 'V64', 'V65', 'V66', 'V67', 'V68', 'V69', 'V70', 'V71', 'V72', 'V73', 'V74'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V53','V54'], ['V55','V56'], ['V57','V58','V71','V73','V72','V74','V63','V59','V64','V60'],['V61','V62'],['V65'], ['V66','V67'],['V68'], ['V69','V70']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group4 columns:',result) ``` ### Group 5 ``` ##### Time series graph based on TransactionDT. group_list = ['V75', 'V76', 'V77', 'V78', 'V79', 'V80', 'V81', 'V82', 'V83', 'V84', 'V85', 'V86', 'V87', 'V88', 'V89', 'V90', 'V91', 'V92', 'V93', 'V94'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V75','V76'],['V77','V78'], ['V79', 'V94', 'V93', 'V92', 'V84', 'V85', 'V80', 'V81'],['V82','V83'],['V86','V87'],['V88'],['V89'],['V90','V91']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group5 columns:',result) ``` ### Group 6 ``` ##### Time series graph based on TransactionDT. group_list = ['V95', 'V96', 'V97', 'V98', 'V99', 'V100', 'V101', 'V102', 'V103', 'V104', 'V105', 'V106', 'V107', 'V108', 'V109', 'V110', 'V111', 'V112', 'V113', 'V114', 'V115', 'V116', 'V117', 'V118', 'V119', 'V120', 'V121', 'V122', 'V123', 'V124', 'V125', 'V126', 'V127', 'V128', 'V129', 'V130', 'V131', 'V132', 'V133', 'V134', 'V135', 'V136', 'V137'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 # We omit V107 since there is no info about corr with other columns and its unique values are 1. reduce_groups = [['V95','V101'],['V96','V102','V97','V99','V100','V103'],['V98'],['V104','V106','V105'],['V108','V110','V114','V109','V111','V113','V112','V115','V116'], ['V117','V119','V118'],['V120','V122','V121'],['V123','V125','V124'],['V126','V128','V132'],['V127','V133','V134'],['V129','V131','V130'], ['V135','V137','V136']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group6 columns:',result) ``` ### Group 7 ``` ##### Time series graph based on TransactionDT. group_list = ['V138', 'V139', 'V140', 'V141', 'V142', 'V143', 'V144', 'V145', 'V146', 'V147', 'V148', 'V149', 'V150', 'V151', 'V152', 'V153', 'V154', 'V155', 'V156', 'V157', 'V158', 'V159', 'V160', 'V161', 'V162', 'V163', 'V164', 'V165', 'V166'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V138'],['V139','V140'],['V141','V142'],['V143','V159','V150','V151','V165','V144','V145','V160','V152','V164','V166'],['V146','V147'], ['V148','V155','V149','V153','V154','V156','V157','V158'],['V161','V163','V162']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group7 columns:',result) ``` ### Group 8 ``` ##### Time series graph based on TransactionDT. group_list = ['V167', 'V168', 'V172', 'V173', 'V176', 'V177', 'V178', 'V179', 'V181', 'V182', 'V183', 'V186', 'V187', 'V190', 'V191', 'V192', 'V193', 'V196', 'V199', 'V202', 'V203', 'V204', 'V205', 'V206', 'V207', 'V211', 'V212', 'V213', 'V214', 'V215', 'V216'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V167','V176','V199','V179','V190','V177','V186','V168','V172','V178','V196','V191','V204','V213','V207','V173'],['V181','V183','V182', 'V187','V192','V203','V215','V178','V193','V212','V204'],['V202','V216','V204','V214'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group8 columns:',result) ``` ### Group 9 ``` ##### Time series graph based on TransactionDT. group_list = ['V169', 'V170', 'V171', 'V174', 'V175', 'V180', 'V184', 'V185', 'V188', 'V189', 'V194', 'V195', 'V197', 'V198', 'V200', 'V201', 'V208', 'V209', 'V210'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V169'],['V170','V171','V200','V201'],['V174','V175'],['V180'],['V184','V185'],['V188','V189'],['V194','V197','V195','V198'], ['V208','V210','V209']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group9 columns:',result) ``` ### Group 10 ``` ##### Time series graph based on TransactionDT. group_list = ['V217', 'V218', 'V219', 'V223', 'V224', 'V225', 'V226', 'V228', 'V229', 'V230', 'V231', 'V232', 'V233', 'V235', 'V236', 'V237','V240', 'V241', 'V242', 'V243', 'V244', 'V246', 'V247', 'V248', 'V249', 'V252', 'V253', 'V254', 'V257', 'V258', 'V260', 'V261', 'V262', 'V263', 'V264', 'V265', 'V266', 'V267', 'V268', 'V269', 'V273', 'V274', 'V275', 'V276', 'V277', 'V278'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V217','V231','V233','V228','V257','V219','V232','V246'],['V218','V229','V224','V225','V253','V243','V254','V248','V264','V261','V249','V258', 'V267','V274','V230','V236','V247','V262','V223','V252','V260'],['V226','V263','V276','V278'], ['V235','V237'],['V240','V241'],['V242','V244'], ['V265','V275','V277','V268','V273'],['V269','V266']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group10 columns:',result) ``` ### Group 11 ``` ##### Time series graph based on TransactionDT. group_list = ['V220', 'V221', 'V222', 'V227', 'V234', 'V238', 'V239', 'V245', 'V250', 'V251', 'V255', 'V256', 'V259', 'V270', 'V271', 'V272'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V220'],['V221','V222','V259','V245','V227','V255','V256'],['V234'],['V238','V239'],['V250','V251'],['V270','V272','V271'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group11 columns:',result) ``` ### Group 12 ``` ##### Time series graph based on TransactionDT. group_list = ['V279', 'V280', 'V284', 'V285', 'V286', 'V287', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V297', 'V298', 'V299', 'V302', 'V303', 'V304', 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = [['V279','V293','V290','V280','V295','V294','V292','V291','V317','V307','V318'],['V284'],['V285','V287'],['V286'],['V297','V299','V298'], ['V302','V304','V303'],['V305'],['V306','V308','V316','V319'],['V309','V311','V312','V310'],['V320','V321']] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group12 columns:',result) ``` ### Group 13 ``` ##### Time series graph based on TransactionDT. group_list = ['V281', 'V282', 'V283', 'V288', 'V289', 'V296', 'V300', 'V301', 'V313', 'V314', 'V315'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V281','V282','V283'],['V288','V289'],['V296'],['V300','V301'],['V313','V315','V314'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group13 columns:',result) ``` ### Group 14 ``` ##### Time series graph based on TransactionDT. group_list = ['V322', 'V323', 'V324', 'V325', 'V326', 'V327', 'V328', 'V329', 'V330', 'V331', 'V332', 'V333', 'V334', 'V335', 'V336', 'V337', 'V338', 'V339'] for column in group_list: scatter(column) ##### Heatmap plt.figure(figsize = (15,15)) sns.heatmap(train[group_list].corr(), cmap='RdBu_r', annot=True, center=0.0) plt.show() ##### Ranking of pearson correlation. for column in group_list: corr(group_list,column) print(' ') ##### Based on pearson correlation, we grouped together the columns with corr > 0.7 reduce_groups = ['V322','V324'],['V323','V326','V324','V327','V326'],['V325'],['V328','V330','V329'],['V331','V333','V332','V337'],['V334','V336','V335'] result = reduce(reduce_groups) final_vcolumns.extend(result) print('Final V_Group14 columns:',result) ``` ### Final V columns ``` print('Number of V columns:', len(final_vcolumns)) print(final_vcolumns) ``` # Conclusions Based on previous process, we suggest keeping as final columns the ones describes below: ``` ##### 1st we sort them (ascending order) with a function final_ccolumns = order_finalcolumns(final_ccolumns) final_dcolumns = order_finalcolumns(final_dcolumns) final_mcolumns = order_finalcolumns(final_mcolumns) final_vcolumns = order_finalcolumns(final_vcolumns) ##### Final columns print(f'Final Transaction columns ({len(final_transactioncolumns)}): {final_transactioncolumns}') print(' ') print(f'Final C columns ({len(final_ccolumns)}): {final_ccolumns}') print(' ') print(f'Final D columns ({len(final_dcolumns)}): {final_dcolumns}') print(' ') print(f'Final M columns ({len(final_mcolumns)}): {final_mcolumns}') print(' ') print(f'Final V columns ({len(final_vcolumns)}): {final_vcolumns}') print(' ') print('#' * 50) final_columns = final_transactioncolumns + final_ccolumns + final_dcolumns + final_mcolumns + final_vcolumns print(' ') print('Final columns:', final_columns) print(' ') print('Lenght of final columns:', len(final_columns)) ```
github_jupyter
# Dropout Dropout [1] is a technique for regularizing neural networks by randomly setting some features to zero during the forward pass. In this exercise you will implement a dropout layer and modify your fully-connected network to optionally use dropout. [1] [Geoffrey E. Hinton et al, "Improving neural networks by preventing co-adaptation of feature detectors", arXiv 2012](https://arxiv.org/abs/1207.0580) ``` # As usual, a bit of setup from __future__ import print_function import time import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.fc_net import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array from cs231n.solver import Solver %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.items(): print('%s: ' % k, v.shape) ``` # Dropout forward pass In the file `cs231n/layers.py`, implement the forward pass for dropout. Since dropout behaves differently during training and testing, make sure to implement the operation for both modes. Once you have done so, run the cell below to test your implementation. ``` np.random.seed(231) x = np.random.randn(500, 500) + 10 for p in [0.25, 0.4, 0.7]: out, _ = dropout_forward(x, {'mode': 'train', 'p': p}) out_test, _ = dropout_forward(x, {'mode': 'test', 'p': p}) print('Running tests with p = ', p) print('Mean of input: ', x.mean()) print('Mean of train-time output: ', out.mean()) print('Mean of test-time output: ', out_test.mean()) print('Fraction of train-time output set to zero: ', (out == 0).mean()) print('Fraction of test-time output set to zero: ', (out_test == 0).mean()) print() ``` # Dropout backward pass In the file `cs231n/layers.py`, implement the backward pass for dropout. After doing so, run the following cell to numerically gradient-check your implementation. ``` np.random.seed(231) x = np.random.randn(10, 10) + 10 dout = np.random.randn(*x.shape) dropout_param = {'mode': 'train', 'p': 0.2, 'seed': 123} out, cache = dropout_forward(x, dropout_param) dx = dropout_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda xx: dropout_forward(xx, dropout_param)[0], x, dout) # Error should be around e-10 or less print('dx relative error: ', rel_error(dx, dx_num)) ``` ## Inline Question 1: What happens if we do not divide the values being passed through inverse dropout by `p` in the dropout layer? Why does that happen? ## Answer: # Fully-connected nets with Dropout In the file `cs231n/classifiers/fc_net.py`, modify your implementation to use dropout. Specifically, if the constructor of the net receives a value that is not 1 for the `dropout` parameter, then the net should add dropout immediately after every ReLU nonlinearity. After doing so, run the following to numerically gradient-check your implementation. ``` np.random.seed(231) N, D, H1, H2, C = 2, 15, 20, 30, 10 X = np.random.randn(N, D) y = np.random.randint(C, size=(N,)) for dropout in [1, 0.75, 0.5]: print('Running check with dropout = ', dropout) model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C, weight_scale=5e-2, dtype=np.float64, dropout=dropout, seed=123) loss, grads = model.loss(X, y) print('Initial loss: ', loss) # Relative errors should be around e-6 or less; Note that it's fine # if for dropout=1 you have W2 error be on the order of e-5. for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5) print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))) print() ``` # Regularization experiment As an experiment, we will train a pair of two-layer networks on 500 training examples: one will use no dropout, and one will use a keep probability of 0.25. We will then visualize the training and validation accuracies of the two networks over time. ``` # Train two identical nets, one with dropout and one without np.random.seed(231) num_train = 500 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } solvers = {} dropout_choices = [1, 0.25] for dropout in dropout_choices: model = FullyConnectedNet([500], dropout=dropout) print(dropout) solver = Solver(model, small_data, num_epochs=25, batch_size=100, update_rule='adam', optim_config={ 'learning_rate': 5e-4, }, verbose=True, print_every=100) solver.train() solvers[dropout] = solver # Plot train and validation accuracies of the two models train_accs = [] val_accs = [] for dropout in dropout_choices: solver = solvers[dropout] train_accs.append(solver.train_acc_history[-1]) val_accs.append(solver.val_acc_history[-1]) plt.subplot(3, 1, 1) for dropout in dropout_choices: plt.plot(solvers[dropout].train_acc_history, 'o', label='%.2f dropout' % dropout) plt.title('Train accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(ncol=2, loc='lower right') plt.subplot(3, 1, 2) for dropout in dropout_choices: plt.plot(solvers[dropout].val_acc_history, 'o', label='%.2f dropout' % dropout) plt.title('Val accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.legend(ncol=2, loc='lower right') plt.gcf().set_size_inches(15, 15) plt.show() ``` ## Inline Question 2: Compare the validation and training accuracies with and without dropout -- what do your results suggest about dropout as a regularizer? ## Answer: ## Inline Question 3: Suppose we are training a deep fully-connected network for image classification, with dropout after hidden layers (parameterized by keep probability p). How should we modify p, if at all, if we decide to decrease the size of the hidden layers (that is, the number of nodes in each layer)? ## Answer:
github_jupyter
# <font color='firebrick'><center>Idx Stats Report</center></font> ### This report provides information from the output of samtools idxstats tool. It outputs the number of mapped reads per chromosome/contig. <br> ``` from IPython.display import display, Markdown from IPython.display import HTML import IPython.core.display as di import csv import numpy as np import zlib import CGAT.IOTools as IOTools import itertools as ITL import os import string import pandas as pd import sqlite3 import matplotlib as mpl from matplotlib.backends.backend_pdf import PdfPages # noqa: E402 #mpl.use('Agg') # noqa: E402 import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter import matplotlib.font_manager as font_manager import matplotlib.lines as mlines from matplotlib.colors import ListedColormap from matplotlib import cm from matplotlib import rc, font_manager import CGAT.Experiment as E import math from random import shuffle import matplotlib as mpl import datetime import seaborn as sns import nbformat %matplotlib inline ################################################## #Plot customization #plt.ioff() plt.style.use('seaborn-white') #plt.style.use('ggplot') title_font = {'size':'20','color':'darkblue', 'weight':'bold', 'verticalalignment':'bottom'} # Bottom vertical alignment for more space axis_font = {'size':'18', 'weight':'bold'} #For summary page pdf '''To add description page plt.figure() plt.axis('off') plt.text(0.5,0.5,"my title",ha='center',va='center') pdf.savefig() ''' #Panda data frame cutomization pd.options.display.width = 80 pd.set_option('display.max_colwidth', -1) chr_feature=['total_reads','total_mapped_reads', 'chr1','chr2','chr3','chr4', 'chr5','chr6','chr7','chr8', 'chr9','chr10','chr11','chr12', 'chr13','chr14','chr15','chr16', 'chr17','chr18','chr19','chrX', 'chrY','chrM'] chr_index=['Total reads','Total mapped reads', 'chr1','chr2','chr3','chr4', 'chr5','chr6','chr7','chr8', 'chr9','chr10','chr11','chr12', 'chr13','chr14','chr15','chr16', 'chr17','chr18','chr19','chrX', 'chrY','chrM'] colors_category = ['red','green','darkorange','yellowgreen', 'pink', 'gold', 'lightskyblue', 'orchid','darkgoldenrod','skyblue','b', 'red', 'darkorange','grey','violet','magenta','cyan', 'hotpink','mediumslateblue'] threshold = 5 def hover(hover_color="#ffff99"): return dict(selector="tr:hover", props=[("background-color", "%s" % hover_color)]) def y_fmt(y, pos): decades = [1e9, 1e6, 1e3, 1e0, 1e-3, 1e-6, 1e-9 ] suffix = ["G", "M", "k", "" , "m" , "u", "n" ] if y == 0: return str(0) for i, d in enumerate(decades): if np.abs(y) >=d: val = y/float(d) signf = len(str(val).split(".")[1]) if signf == 0: return '{val:d} {suffix}'.format(val=int(val), suffix=suffix[i]) else: if signf == 1: #print(val, signf) if str(val).split(".")[1] == "0": return '{val:d} {suffix}'.format(val=int(round(val)), suffix=suffix[i]) tx = "{"+"val:.{signf}f".format(signf = signf) +"} {suffix}" return tx.format(val=val, suffix=suffix[i]) #return y return y def getTables(dbname): ''' Retrieves the names of all tables in the database. Groups tables into dictionaries by annotation ''' dbh = sqlite3.connect(dbname) c = dbh.cursor() statement = "SELECT name FROM sqlite_master WHERE type='table'" c.execute(statement) tables = c.fetchall() print(tables) c.close() dbh.close() return def readDBTable(dbname, tablename): ''' Reads the specified table from the specified database. Returns a list of tuples representing each row ''' dbh = sqlite3.connect(dbname) c = dbh.cursor() statement = "SELECT * FROM %s" % tablename c.execute(statement) allresults = c.fetchall() c.close() dbh.close() return allresults def getDBColumnNames(dbname, tablename): dbh = sqlite3.connect(dbname) res = pd.read_sql('SELECT * FROM %s' % tablename, dbh) dbh.close() return res.columns def plotBar(df,samplename): fig, ax = plt.subplots() ax.set_frame_on(True) ax.xaxis.set_major_formatter(FuncFormatter(y_fmt)) colors=['yellowgreen','darkorange'] for ii in range(0,df.shape[0]): plt.barh(ii,df['chrX'][ii],color=colors[0], align="center",height=0.6,edgecolor=colors[0]) plt.barh(ii,df['chrY'][ii],color=colors[1], align="center",height=0.6,edgecolor=colors[0]) fig = plt.gcf() fig.set_size_inches(20,14) plt.yticks(fontsize =20,weight='bold') plt.yticks(range(df.shape[0]),df['track']) plt.xticks(fontsize =20,weight='bold') ax.grid(which='major', linestyle='-', linewidth='0.3') plt.ylabel("Sample",labelpad=65,fontsize =25,weight='bold') plt.xlabel("\nMapped reads",fontsize =25,weight='bold') plt.title("Reads mapped to X and Y chromosome\n",fontsize =30,weight='bold',color='darkblue') plt.gca().invert_yaxis() legend_properties = {'weight':'bold','size':'20'} leg = plt.legend(chr_feature[21:23],title="Contigs",prop=legend_properties,bbox_to_anchor=(1.14,0.65),frameon=True) leg.get_frame().set_edgecolor('k') leg.get_frame().set_linewidth(2) leg.get_title().set_fontsize(25) leg.get_title().set_fontweight('bold') plt.tight_layout() #plt.savefig(''.join([samplename,'.png']),bbox_inches='tight',pad_inches=0.6) plt.show() return fig def displayTable(plotdf,name): # Display table styles = [ hover(), dict(selector="th", props=[("font-size", "130%"), ("text-align", "center"), ]), dict(selector="td", props=[("font-size", "120%"), ("text-align", "center"), ]), dict(selector="caption", props=[("caption-side", "top"), ("text-align", "center"), ("font-size", "100%")]) ] df1 = (plotdf.style.set_table_styles(styles).set_caption(name)) display(df1) print("\n\n") def plot_idxstats(newdf,df,samplename): fig,ax = plt.subplots() ax.grid(which='major', linestyle='-', linewidth='0.25') ax.yaxis.set_major_formatter(FuncFormatter(y_fmt)) index=list(range(newdf.shape[1])) colors = plt.cm.plasma(np.linspace(0,1,newdf.shape[0])) for ii in range(0,newdf.shape[0]): plt.plot(index,newdf.iloc[ii],linewidth=2,color=colors[ii],linestyle="-",marker='o',fillstyle='full',markersize=8) fig = plt.gcf() fig.set_size_inches(11,8) plt.xticks(index,chr_feature[2:24],fontsize = 14,weight='bold') plt.yticks(fontsize = 14,weight='bold') labels = ax.get_xticklabels() plt.setp(labels, rotation=40) legend_properties = {'weight':'bold','size':'14'} leg = plt.legend(df['track'],title="Sample",prop=legend_properties,bbox_to_anchor=(1.42,1.01),frameon=True) leg.get_frame().set_edgecolor('k') leg.get_frame().set_linewidth(2) leg.get_title().set_fontsize(16) leg.get_title().set_fontweight('bold') plt.xlabel('\nContigs',**axis_font) plt.ylabel('Mapped Reads',**axis_font,labelpad=40) plt.title("Mapped reads per contig", **title_font) plt.tight_layout() #plt.savefig(''.join([samplename,'.png']),bbox_inches='tight',pad_inches=0.6) print("\n\n") plt.show() return fig def idxStatsReport(dbname, tablename): trans = pd.DataFrame(readDBTable(dbname,tablename)) trans.columns = getDBColumnNames(dbname,tablename) df=trans #print(df) #newdf = df[df.columns[0:25]] newdf = df[chr_feature[2:24]] #print(newdf) plotdf = df[chr_feature] plotdf.columns = chr_index plotdf.index = [df['track']] #del plotdf.index.name #pdf=PdfPages("idx_stats_summary.pdf") displayTable(plotdf,"Idx Full Stats") fig = plot_idxstats(newdf,df,"idx_full_stats") #pdf.savefig(fig,bbox_inches='tight',pad_inches=0.6) print("\n\n\n") fig = plotBar(df,"idxStats_X_Y_mapped_reads") #pdf.savefig(fig,bbox_inches='tight',pad_inches=0.6) #pdf.close() #getTables("csvdb") idxStatsReport("../csvdb","idxstats_reads_per_chromosome") ```
github_jupyter
## 1. Meet Dr. Ignaz Semmelweis <p><img style="float: left;margin:5px 20px 5px 1px" src="https://assets.datacamp.com/production/project_20/img/ignaz_semmelweis_1860.jpeg"></p> <!-- <img style="float: left;margin:5px 20px 5px 1px" src="https://assets.datacamp.com/production/project_20/datasets/ignaz_semmelweis_1860.jpeg"> --> <p>This is Dr. Ignaz Semmelweis, a Hungarian physician born in 1818 and active at the Vienna General Hospital. If Dr. Semmelweis looks troubled it's probably because he's thinking about <em>childbed fever</em>: A deadly disease affecting women that just have given birth. He is thinking about it because in the early 1840s at the Vienna General Hospital as many as 10% of the women giving birth die from it. He is thinking about it because he knows the cause of childbed fever: It's the contaminated hands of the doctors delivering the babies. And they won't listen to him and <em>wash their hands</em>!</p> <p>In this notebook, we're going to reanalyze the data that made Semmelweis discover the importance of <em>handwashing</em>. Let's start by looking at the data that made Semmelweis realize that something was wrong with the procedures at Vienna General Hospital.</p> ``` # importing modules # ... YOUR CODE FOR TASK 1 ... import pandas as pd # Read datasets/yearly_deaths_by_clinic.csv into yearly yearly = pd.read_csv("yearly_deaths_by_clinic.csv") # Print out yearly # ... YOUR CODE FOR TASK 1 ... print(yearly) ``` ## 2. The alarming number of deaths <p>The table above shows the number of women giving birth at the two clinics at the Vienna General Hospital for the years 1841 to 1846. You'll notice that giving birth was very dangerous; an <em>alarming</em> number of women died as the result of childbirth, most of them from childbed fever.</p> <p>We see this more clearly if we look at the <em>proportion of deaths</em> out of the number of women giving birth. Let's zoom in on the proportion of deaths at Clinic 1.</p> ``` # Calculate proportion of deaths per no. births # ... YOUR CODE FOR TASK 2 ... yearly['proportion_deaths'] = yearly['deaths']/yearly['births'] # Extract clinic 1 data into yearly1 and clinic 2 data into yearly2 yearly1 = yearly[yearly['clinic'] == 'clinic 1'] yearly2 = yearly[yearly['clinic'] == 'clinic 2'] # Print out yearly1 # ... YOUR CODE FOR TASK 2 ... print(yearly1) ``` ## 3. Death at the clinics <p>If we now plot the proportion of deaths at both clinic 1 and clinic 2 we'll see a curious pattern...</p> ``` from matplotlib import pyplot as plt # This makes plots appear in the notebook %matplotlib inline # Plot yearly proportion of deaths at the two clinics # ... YOUR CODE FOR TASK 3 ... ax = yearly1.plot(y="proportion_deaths", x="year", label="Yearly 1") yearly2.plot(y="proportion_deaths", x="year", label= "Yearly 2", ax=ax) ax.set_ylabel("Proportion deaths") ``` ## 4. The handwashing begins <p>Why is the proportion of deaths constantly so much higher in Clinic 1? Semmelweis saw the same pattern and was puzzled and distressed. The only difference between the clinics was that many medical students served at Clinic 1, while mostly midwife students served at Clinic 2. While the midwives only tended to the women giving birth, the medical students also spent time in the autopsy rooms examining corpses. </p> <p>Semmelweis started to suspect that something on the corpses, spread from the hands of the medical students, caused childbed fever. So in a desperate attempt to stop the high mortality rates, he decreed: <em>Wash your hands!</em> This was an unorthodox and controversial request, nobody in Vienna knew about bacteria at this point in time. </p> <p>Let's load in monthly data from Clinic 1 to see if the handwashing had any effect.</p> ``` # Read datasets/monthly_deaths.csv into monthly monthly = pd.read_csv("monthly_deaths.csv" , parse_dates = ["date"]) # Calculate proportion of deaths per no. births # ... YOUR CODE FOR TASK 4 ... monthly['proportion_deaths'] = monthly['deaths']/ monthly['births'] # Print out the first rows in monthly # ... YOUR CODE FOR TASK 4 ... print(monthly.head(1)) ``` ## 5. The effect of handwashing <p>With the data loaded we can now look at the proportion of deaths over time. In the plot below we haven't marked where obligatory handwashing started, but it reduced the proportion of deaths to such a degree that you should be able to spot it!</p> ``` # Plot monthly proportion of deaths # ... YOUR CODE FOR TASK 5 ... ax = monthly.plot(y="proportion_deaths", x="date") plt.ylabel("Proportion deaths") ``` ## 6. The effect of handwashing highlighted <p>Starting from the summer of 1847 the proportion of deaths is drastically reduced and, yes, this was when Semmelweis made handwashing obligatory. </p> <p>The effect of handwashing is made even more clear if we highlight this in the graph.</p> ``` # Date when handwashing was made mandatory import pandas as pd handwashing_start = pd.to_datetime('1847-06-01') # Split monthly into before and after handwashing_start before_washing = monthly[ monthly["date"] < handwashing_start] after_washing = monthly[ monthly["date"] >= handwashing_start] # Plot monthly proportion of deaths before and after handwashing # ... YOUR CODE FOR TASK 6 ... ax = before_washing.plot(y="proportion_deaths", x="date", label="before_washing") after_washing.plot(y="proportion_deaths", x="date", label= "after_washing",ax=ax) plt.ylabel("Proportion deaths") ``` ## 7. More handwashing, fewer deaths? <p>Again, the graph shows that handwashing had a huge effect. How much did it reduce the monthly proportion of deaths on average?</p> ``` # Difference in mean monthly proportion of deaths due to handwashing before_proportion = before_washing['proportion_deaths'] after_proportion = after_washing['proportion_deaths'] mean_diff = after_proportion.mean() - before_proportion.mean() mean_diff ``` ## 8. A Bootstrap analysis of Semmelweis handwashing data <p>It reduced the proportion of deaths by around 8 percentage points! From 10% on average to just 2% (which is still a high number by modern standards). </p> <p>To get a feeling for the uncertainty around how much handwashing reduces mortalities we could look at a confidence interval (here calculated using the bootstrap method).</p> ``` # A bootstrap analysis of the reduction of deaths due to handwashing boot_mean_diff = [] for i in range(3000): boot_before = before_proportion.sample(frac=1, replace= True) boot_after = after_proportion.sample(frac=1, replace= True) boot_mean_diff.append(boot_after.mean() - boot_before.mean()) # Calculating a 95% confidence interval from boot_mean_diff confidence_interval = pd.Series(boot_mean_diff).quantile([0.025, 0.975]) confidence_interval ``` ## 9. The fate of Dr. Semmelweis <p>So handwashing reduced the proportion of deaths by between 6.7 and 10 percentage points, according to a 95% confidence interval. All in all, it would seem that Semmelweis had solid evidence that handwashing was a simple but highly effective procedure that could save many lives.</p> <p>The tragedy is that, despite the evidence, Semmelweis' theory — that childbed fever was caused by some "substance" (what we today know as <em>bacteria</em>) from autopsy room corpses — was ridiculed by contemporary scientists. The medical community largely rejected his discovery and in 1849 he was forced to leave the Vienna General Hospital for good.</p> <p>One reason for this was that statistics and statistical arguments were uncommon in medical science in the 1800s. Semmelweis only published his data as long tables of raw data, but he didn't show any graphs nor confidence intervals. If he would have had access to the analysis we've just put together he might have been more successful in getting the Viennese doctors to wash their hands.</p> ``` # The data Semmelweis collected points to that: doctors_should_wash_their_hands = True ```
github_jupyter
## Import the scripts ``` %run Implied_Volatility.ipynb %run Option_Greeks.ipynb ``` ## Draw the Impied Volatility Graph of 0130 and 0131 ``` date_ = '0130' S = todayStockPrice(date = date_) list_StockPrices = moneyness_list(S, gapType = "month", gapNum = 3) # only 3 OTM price on monthly basis # Split the df to df_call and df_put df = df_generate(product = 'TXO', date = date_ ) df_put = df[df.買賣權 == 'Put'].reset_index(drop=True) df_call = df[df.買賣權 == 'Call'].reset_index(drop=True) # append the IV df_put = Flag_Moneyness(df_put, S = S, code = 'Put') list_IV_put, categoryNAPut = IV_List(df = df_put, Stock = S, d = date_ , flag = 'Put') df_put["IV"] = list_IV_put df_call = Flag_Moneyness(df_call, S = S, code = 'Call') list_IV_call, categoryNACall = IV_List(df = df_call, Stock = S, d = date_ , flag = 'Call') df_call["IV"] = list_IV_call # Combine call and put with only OTM df_OTM = combine_OTM(df_call, df_put, list_StockPrices) plot_IV(df = df_OTM, S = S, date = date_) df_OTM ``` ## Call- Put Implied Volatility Spread ``` CPIV = [] for ivc, ivp in zip(list_IV_call,list_IV_put): if type(ivc) == str or type(ivp) == str: CPIV.append("NA") else: CPIV.append(ivc - ivp) df_CPIV = pd.DataFrame({'CPIV': CPIV}) K_cut = list(df_call["履約價"][10:25]) CPIV_cut = CPIV[10:25] plt.style.use('ggplot') plt.figure(figsize=(10,5)) plt.plot(K_cut, CPIV_cut, marker='o') plt.axvline(x = S, linestyle = 'dashed', color = 'black') for a, b in zip(K_cut, CPIV_cut): plt.text(a, b, str(round(b, 3))) title_name = "CPIV Spread_" + date_ plt.title(title_name) plt.xlabel('Excercise Price', fontsize=14) plt.ylabel('Implied Volatility Spread', fontsize=14) plt.savefig(os.path.join(work_dir, 'Graph', title_name)) plt.show() date_ = '0130' S = todayStockPrice(date = date_) list_StockPrices = moneyness_list(S, gapType = "month", gapNum = 3) # only 3 OTM price on monthly basis # Split the df to df_call and df_put df = df_generate(product = 'TXO', date = date_ ) df_put = df[df.買賣權 == 'Put'].reset_index(drop=True) df_call = df[df.買賣權 == 'Call'].reset_index(drop=True) # append the IV df_put = Flag_Moneyness(df_put, S = S, code = 'Put') list_IV_put, categoryNAPut = IV_List(df = df_put, Stock = S, d = date_ , flag = 'Put') df_put["IV"] = list_IV_put df_call = Flag_Moneyness(df_call, S = S, code = 'Call') list_IV_call, categoryNACall = IV_List(df = df_call, Stock = S, d = date_ , flag = 'Call') df_call["IV"] = list_IV_call # Combine call and put with only OTM df_OTM = combine_OTM(df_call, df_put, list_StockPrices) plot_IV(df = df_OTM, S = S, date = date_) date_ = '0131' S = todayStockPrice(date = date_) list_StockPrices = moneyness_list(S, gapType = "month", gapNum = 3) # only 3 OTM price on monthly basis # Split the df to df_call and df_put df = df_generate(product = 'TXO', date = date_ ) df_put = df[df.買賣權 == 'Put'].reset_index(drop=True) df_call = df[df.買賣權 == 'Call'].reset_index(drop=True) # append the IV df_put = Flag_Moneyness(df_put, S = S, code = 'Put') list_IV_put, categoryNAPut = IV_List(df = df_put, Stock = S, d = date_ , flag = 'Put') df_put["IV"] = list_IV_put df_call = Flag_Moneyness(df_call, S = S, code = 'Call') list_IV_call, categoryNACall = IV_List(df = df_call, Stock = S, d = date_ , flag = 'Call') df_call["IV"] = list_IV_call # Combine call and put with only OTM df_OTM_2 = combine_OTM(df_call, df_put, list_StockPrices) plot_IV(df = df_OTM_2, S = S, date = date_) K1 = list(df_OTM["履約價"]) IV1 = list(df_OTM["IV"]) K2 = list(df_OTM_2["履約價"]) IV2 = list(df_OTM_2["IV"]) S1 = todayStockPrice(date = '0130') S2 = todayStockPrice(date = '0131') plt.style.use('ggplot') meanIV1 = mean(IV1) meanIV2 = mean(IV2) plt.figure(figsize=(10,5)) plt.plot(K1, IV1, marker='o') plt.plot(K2, IV2, marker='o') plt.axvline(x = S1, linestyle = 'dashed', color = 'grey') for a, b in zip(K1, IV1): plt.text(a, b, str(round(b, 3))) plt.axvline(x = S2, linestyle = 'dashed', color = 'black') for a, b in zip(K2, IV2): plt.text(a, b, str(round(b, 3))) title_name = "Implied Volatility Comparison" plt.title(title_name) plt.xlabel('Excercise Price', fontsize=14) plt.ylabel('Implied Volatility', fontsize=14) plt.savefig(os.path.join(work_dir, 'Graph', title_name)) plt.show() ## Des. Statistics of IV def describe_list(list_iv, list_na): list_a = [] for ele in list_iv: if not isinstance(ele, str): #NA value of Imp Vol is "-" list_a.append(ele) print("mean: " , np.average(list_a)) print("max: ", max(list_a)) print("min: ", min(list_a)) print("NA: ", sum(categoryNAPut)) describe_list(list_IV_put, categoryNAPut) describe_list(list_IV_call, categoryNACall) ``` ## Calculate the Individual Option Greeks ``` s = 11368 exp_date = '20200219' eval_date = '20200130' rf = 0.0 div = 0.0 # Build up a delta table list_k = list(df_call["履約價"]) delta_C = [] delta_P = [] for k in list_k: opt_C = Option(s=s, k=k, eval_date=eval_date, exp_date=exp_date, rf=rf, vol=vol, right='Call', div = div) delta_C.append(opt_C.get_greeks()[0]) opt_P = Option(s=s, k=k, eval_date=eval_date, exp_date=exp_date, rf=rf, vol=vol, right='Put', div = div) delta_P.append(opt_P.get_greeks()[0]) df_delta = pd.DataFrame({'Excercise Price': list_k, 'Delta_C': delta_C, 'Delta_P': delta_P}) df_delta[10:25] # Search for certain area ``` # Construct the Option Strategies ### We expect the vega decrease, then we can construct the strategies with approximate delta neutral ### 1. Short Strangle: Sell 1 Call and Sell 1 Put ``` df_call[df_call.履約價 == 11300] # delta_Call: 0.56 df_put[df_put.履約價 == 11500] # delta_Put: -0.58 d_option1 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11300', '今日': '20200130', '部位': '-1', '結算價': '245.0', 'IV': '0.196352'} d_option2 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Put', '履約價': '11500', '今日': '20200130', '部位': '-1', '結算價': '265.0', 'IV': '0.179389'} df_options_strangle = pd.DataFrame([d_option1, d_option2]) opt_strat1 = Options_strategy(df_options_strangle) greeks_strat1 = opt_strat1.get_greeks() opt_strat1.describe_portfolio() print("max Payoff: " , opt_strat1.get_maxPayoff()) opt_strat1.portfolio_payoff(fileName = "Short Strangle") ``` ### 2. Long Condor: Short Strangle & Long Strangle ``` df_call[df_call.履約價 == 11100] # delta_Call: 0.27 df_put[df_put.履約價 == 11700] # delta_Put: -0.29 d_option3 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11100', '今日': '20200130', '部位': '1', '結算價': '384.0', 'IV': '0.210976'} d_option4 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Put', '履約價': '11700', '今日': '20200130', '部位': '1', '結算價': '388.0', 'IV': '0.166417'} df_options_condor = pd.DataFrame([d_option1, d_option2, d_option3, d_option4]) opt_strat2 = Options_strategy(df_options_condor) greeks_strat2 = opt_strat2.get_greeks() opt_strat2.describe_portfolio() print("max Payoff: " , opt_strat2.get_maxPayoff()) opt_strat2.portfolio_payoff(fileName = "Long Condor") ``` ### 3. Bull Spread: Buy 1 Call (at lower K) and Sell 1 Call (at higher K) ``` df_call[df_call.履約價 == 11300] df_call[df_call.履約價 == 11400] d_option1 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11300', '今日': '20200130', '部位': '1', '結算價': '245.0', 'IV': '0.196352'} d_option2 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Call', '履約價': '11400', '今日': '20200130', '部位': '-1', '結算價': '186.0', 'IV': '0.187406'} df_options_strangle = pd.DataFrame([d_option1, d_option2]) opt_strat3 = Options_strategy(df_options_strangle) greeks_strat3 = opt_strat3.get_greeks() opt_strat3.describe_portfolio() print("max Payoff: " , opt_strat3.get_maxPayoff()) opt_strat3.portfolio_payoff(fileName = "Bull Spread") ``` ### if we offset the stratrgy tomorrow ## 1. Short Strangle ``` # 0130 cost_ss0130 = get_optionPrice(date = "0130", right = "Call", k = 11300) + get_optionPrice(date = "0130", right = "Put", k = 11500) cost_ss0130 # 0131 cost_ss0131 = get_optionPrice(date = "0131", right = "Call", k = 11300) + get_optionPrice(date = "0131", right = "Put", k = 11500) cost_ss0131 # Short 1 call and 1 put and Buy back tomorrow Pnl_ss = cost_ss0130 - cost_ss0131 Pnl_ss ``` ## 2. Long Condor ``` cost_lc0130 = cost_ss0130 - get_optionPrice(date = "0130", right = "Call", k = 11100) - get_optionPrice(date = "0130", right = "Put", k = 11700) cost_lc0130 cost_lc0131 = cost_ss0131 - get_optionPrice(date = "0131", right = "Call", k = 11100) - get_optionPrice(date = "0131", right = "Put", k = 11700) cost_lc0131 # Long Condor and Sell back tomorrow Pnl_lc = cost_lc0130 - cost_lc0131 Pnl_lc call_price = 180 Stock = 10125 K = 10100 t = 5/252 r = 0.0 q = 0 call_iv = iv(price = call_price, flag = 'c', S = Stock, K = K, t = t, r = r, q = q) call_iv d_option7 = {'現貨價格': '10125', '到期日': '20200415', '買賣權': 'Call', '履約價': '10100', '今日': '20200408', '部位': '1', '結算價': '180.0', 'IV': '0.29425'} # d_option4 = {'現貨價格': '11386', '到期日': '20200219', '買賣權': 'Put', '履約價': '11700', # '今日': '20200130', '部位': '1', '結算價': '388.0', 'IV': '0.166417'} df_options_condor = pd.DataFrame([d_option7]) opt_strat7 = Options_strategy(df_options_condor) greeks_strat7 = opt_strat7.get_greeks() opt_strat7.describe_portfolio() ```
github_jupyter
# Neural Network for binary classification using finite difference approximation to update the weights, Leaky ReLu in between the layers and sigmoid for the output ``` import numpy as np import matplotlib.pyplot as plt import copy import matplotlib as mpl global_dpi = 120 mpl.rcParams['figure.dpi']= global_dpi ``` # Generate dummy data ``` _input = [] output = [] input_dims = 5 std_dev = 0.15 number_of_points = 200 for i in range(number_of_points): intie = np.random.randint(0,2) if intie == 1: output.append(intie) data = np.ones(input_dims) data = np.asarray([np.random.normal(intie,std_dev) for x in data]) _input.append(copy.deepcopy(data)) else: output.append(intie) data = np.ones(input_dims) data = np.asarray([np.random.normal(intie,std_dev) for x in data]) _input.append(copy.deepcopy(data)) _input = np.asarray(_input) output = np.asarray(output) ``` # Normalize input per variable (column) ``` _input = _input.T for i in _input: mini = np.min(i) maxi = np.max(i) for k in range(len(i)): i[k] = (i[k]-mini)/(maxi-mini) _input = _input.T ``` # Split dummy data ``` fraction = 0.7 index = number_of_points*0.7 index = int(np.floor(index)) train_x = _input[:index,:] train_y = output[:index] test_x = _input[index:,:] test_y = output[index:] ``` # Neural Network for binary classification ``` class Neural_Network: def __init__(self,layers): self.layers = layers ################## Outputs self.losses = [] self.accuracies = [] self.test_accuracies = [] self.test_losses = [] ################################# ###################### Initialize weights randomly weights = [] for i in range(len(layers)-1): cols =layers[i] rows = layers[i+1] w = np.random.uniform(-1,1,size = (rows,cols)) weights.append(copy.deepcopy(w)) self.weights = weights ################################# def leaky_relu(self,x): result = [] for i in x: result.append(max(0.01*i,i)) return np.asarray(result) def sigmoid(self,x): return 1/(1+np.exp(-x)) def loss(self,p,y): for i in range(len(p)): ##tolerance p[i] = min(p[i],0.9995) p[i] = max(0.0005,p[i]) ### return -(y*np.log(p)+(1-y)*np.log(1-p)) def predict(self,x,weights = None, custom = False): if not custom: weights = self.weights predictions = [] if len(x.shape) == 2: for b in x: cache = b counter = 0 for i in weights: cache = np.dot(i,cache) if counter < (len(weights)-1): cache = self.leaky_relu(cache) else: cache = self.sigmoid(cache) counter = counter + 1 predictions.append(cache[0]) elif len(x.shape) == 1: cache = x counter = 0 for i in weights: cache = np.dot(i,cache) if counter < (len(weights)-1): cache = self.leaky_relu(cache) else: cache = self.sigmoid(cache) counter = counter + 1 predictions.append(cache[0]) else: raise Exception('Unsupported input dimensions: ' + str(x.shape)) return np.asarray(predictions) def gradients(self,x,y,derriv_step_size = 0.00001): ## calculate current loss predictions = self.predict(x) ##### Train losses current_loss = self.loss(predictions,y) current_loss = np.sum(current_loss) ##### Train accuracies current_acc = self.accuracy(predictions,y) ##### init_gradients = [] for i in self.weights: init_gradients.append(np.zeros((i.shape))) for i in range(len(self.weights)): for k in range(self.weights[i].shape[0]): for j in range(self.weights[i].shape[1]): weights = copy.deepcopy(self.weights) weights[i][k,j] += derriv_step_size predictions = self.predict(x,weights = weights,custom = True) new_loss = np.sum(self.loss(predictions,y)) gradient = (new_loss-current_loss)/derriv_step_size init_gradients[i][k,j] = gradient return init_gradients,current_loss,current_acc def train(self,x,y,step_size = 0.005,iters =10,derriv_step_size = 0.00001, calc_test = False, x_test = None, y_test = None): for i in range(iters): ################ add all to lists if calc_test: predictions = self.predict(x_test) self.test_losses.append(np.sum(self.loss(predictions,y_test))) self.test_accuracies.append(self.accuracy(predictions,y_test) ) grads, loss, acc = self.gradients(x,y,derriv_step_size = derriv_step_size) self.losses.append(loss) self.accuracies.append(acc) ################################# #### Update weights for w,g in zip(self.weights,grads): w += - step_size*g ##### ###### final outputs here predictions = self.predict(x) self.losses.append(np.sum(self.loss(predictions,y))) self.accuracies.append(self.accuracy(predictions,y)) if calc_test: predictions = self.predict(x_test) self.test_losses.append(np.sum(self.loss(predictions,y_test))) self.test_accuracies.append(self.accuracy(predictions,y_test)) ############### def accuracy(self,p,y): cache = copy.deepcopy(p) for i in range(len(cache)): if cache[i] >= 0.5: cache[i] = 1 else: cache[i] = 0 cache = [int(i) for i in cache] total = 0 correct = 0 for i,j in zip(cache,y): if i == j: correct += 1 total += 1 return correct/total ``` # Test Neural Network ``` ### First layer of input dimension and last layer of dimension 1 (sigmoid) NN = Neural_Network([input_dims,3,1]) #### NN.train(train_x,train_y,iters = 1000,step_size = 0.005,derriv_step_size = 0.00001, calc_test = True, x_test = test_x, y_test = test_y) plt.plot(NN.losses) plt.plot(NN.test_losses) plt.xlabel('Iterations') plt.ylabel('Loss') plt.legend(('Train','Validation')) plt.show() plt.plot(NN.accuracies) plt.plot(NN.test_accuracies) plt.xlabel('Iterations') plt.ylabel('Accuracy') plt.legend(('Train','Validation')) plt.show() ``` # Weights ``` print(NN.weights) ```
github_jupyter
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt star_wars = pd.read_csv("star_wars.csv", encoding="ISO-8859-1") star_wars.head(3) ``` Remove all rows where the `RespondentID` column is not null (NaN). ``` star_wars = star_wars[pd.notnull(star_wars["RespondentID"])] star_wars.head() ``` Convert column string values from "Yes"/"No" to corresponding booleans by mapping a dictionary to each value of the Series: ``` yes_no = { "Yes": True, "No": False } star_wars["Have you seen any of the 6 films in the Star Wars franchise?"] = \ star_wars["Have you seen any of the 6 films in the Star Wars franchise?"].map(yes_no) star_wars["Do you consider yourself to be a fan of the Star Wars film franchise?"] = \ star_wars["Do you consider yourself to be a fan of the Star Wars film franchise?"].map(yes_no) star_wars.head() star_wars.info() ``` Convert column string values from the name of the movie to True or Nan to False. Use a mapping dictionary for this whcih we'll create from the column name (if the column name is the value as well then it corresponds to True, otherwise it's NaN and corresponds to False): ``` print("BEFORE MAPPING") star_wars[star_wars.columns[3:9]] def t_or_f(value): if value is np.NaN: return False else: return True for col in star_wars.columns[3:9]: # mapper = {col : True, np.NaN: False} star_wars[col] = star_wars[col].map(t_or_f) star_wars[star_wars.columns[3:9]] star_wars = star_wars.rename(columns={ "Which of the following Star Wars films have you seen? Please select all that apply.": "seen1", "Unnamed: 4": "seen2", "Unnamed: 5": "seen3", "Unnamed: 6": "seen4", "Unnamed: 7": "seen5", "Unnamed: 8": "seen6", "Please rank the Star Wars films in order of preference with 1 being your favorite film in the franchise and 6 being your least favorite film.": "favorite1", "Unnamed: 10": "favorite2", "Unnamed: 11": "favorite3", "Unnamed: 12": "favorite4", "Unnamed: 13": "favorite5", "Unnamed: 14": "favorite6" }) star_wars.head(3) star_wars[star_wars.columns[9:15]] = star_wars[star_wars.columns[9:15]].astype(float) means = star_wars[star_wars.columns[9:15]].mean(axis=0) %matplotlib inline import seaborn as sns sns.set_style("whitegrid") ax = sns.barplot(x=star_wars.columns[9:15], y=means) seens = star_wars[star_wars.columns[3:9]].sum() ax = sns.barplot(x=star_wars.columns[3:9], y=seens) males = star_wars[star_wars["Gender"] == "Male"] females = star_wars[star_wars["Gender"] == "Female"] means_female = females[females.columns[9:15]].mean(axis=0) means_male = males[males.columns[9:15]].mean(axis=0) seens_female = females[females.columns[3:9]].sum() seens_male = males[males.columns[3:9]].sum() fig = plt.figure(figsize=(12, 9)) ax1 = fig.add_subplot(221) ax1.set_title("Male Ranking") ax2 = fig.add_subplot(223) ax2.set_title("Male Totals") ax3 = fig.add_subplot(222) ax3.set_title("Female Ranking") ax4 = fig.add_subplot(224) ax4.set_title("Female Totals") sns.barplot(x=star_wars.columns[9:15], y=means_male, ax=ax1) sns.barplot(x=star_wars.columns[9:15], y=means_female, ax=ax3) sns.barplot(x=star_wars.columns[3:9], y=seens_male, ax=ax2) sns.barplot(x=star_wars.columns[3:9], y=seens_female, ax=ax4) ```
github_jupyter
<a href="https://colab.research.google.com/github/linesn/xmen/blob/main/covariance_analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` import pandas as pd import numpy as np import seaborn as sn import matplotlib.pyplot as plt from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.datasets import make_classification pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) # load the data characters = pd.read_csv('characters.csv') cv = pd.read_csv('character_visualization.csv') cb = pd.read_csv('comic_bechdel.csv') covers = pd.read_csv('covers.csv') ic = pd.read_csv('issue_collaborators.csv') locations = pd.read_csv('locations.csv') xmen_bechdel = pd.read_csv('xmen_bechdel.csv') characters.head() # create x and Y matrices to store the data x = characters[['rendered_unconcious']].to_numpy() y = characters[['captured']].to_numpy() # convert the target variable (X-Man character names) to integer representations character_name = characters['character'].unique() character_name_dict = dict(zip(set(character_name), range(len(character_name)))) characters['character'] = characters['character'].apply(lambda x: character_name_dict[x]) # take a subset of the features my_cols = ['character', 'rendered_unconcious', 'captured', 'declared_dead', 'redressed', 'depowered', 'clothing_torn', 'subject_to_torture', 'quits_team', 'surrenders', 'number_of_kills_humans', 'number_of_kills_non_humans','shower_number_of_panels_shower_lasts', 'bath_number_of_panels_bath_lasts', 'depicted_eating_food', 'visible_tears_number_of_panels', 'visible_tears_number_of_intances'] # subset the dataframe on the columns we selected characters_subset = characters[my_cols] # store the correlation matrix corrMatrix = characters_subset.corr() # display the correlation matrix corrMatrix # view a heatmap of the correlations fig, ax = plt.subplots(figsize=(15,15)) sn.heatmap(corrMatrix, annot=True) # store teh X and y variables X = characters[my_cols] y = characters[['character']] # perform 75/25 test train split X_train, X_test, y_train, y_test = train_test_split(X, y) clf = RandomForestClassifier(max_depth=10, random_state=0) clf.fit(X, np.ravel(y)) clf.score(X_test, y_test) # perform 10 fold cross validation scores = cross_val_score(clf, X, np.ravel(y), cv=10) # view the scores for the 10 folds scores # view the average scores for the 10 folds np.average(scores) ```
github_jupyter
``` # Import lib # =========================================================== import csv import pandas as pd import numpy as np import random import time import collections import math import sys from tqdm import tqdm from time import sleep import matplotlib.pyplot as plt # %matplotlib inline plt.style.use('fivethirtyeight') from datascience import * from scipy import stats import statsmodels.formula.api as smf import statsmodels.api as sm # from statsmodels.genmod.families.links import logit from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve # Initialize useful data # =========================================================== df = pd.read_csv('clinvar_conflicting_clean.csv', low_memory=False) df = df.fillna(value=0) # resample to get a balanced dataset df_zero = df.loc[df['CLASS'] == 0] df_zero = df_zero.sample(n=1000) df_one = df.loc[df['CLASS'] == 1] df_one = df_one.sample(n=1000) # concatenate and reallocate all data df = pd.concat([df_zero, df_one]) df = df.sample(n = df.shape[0]) all_rows = df.values.tolist() row_num = len(all_rows) df.head() # Divide whole dataset into Input and Output # =========================================================== # Features - all columns except 'CLASS' # Target label - 'CLASS' column X = df.drop('CLASS', axis=1) y = df['CLASS'] # One hot encoding X = pd.get_dummies(X, drop_first=True) y = pd.get_dummies(y, drop_first=True) # Train/Test split train_X, test_X, train_y, test_y = train_test_split(X, y) # Normalize using StandardScaler scaler = StandardScaler() train_X = scaler.fit_transform(train_X) test_X = scaler.transform(test_X) # Train Model # =========================================================== model = LogisticRegression() start = time.time() model.fit(train_X, train_y) pred_y = model.predict(test_X) score = accuracy_score(test_y, pred_y) end = time.time() print("Logistic Regression Model Trained! Time: %.03fs" % (end - start)) # Compare Actual label and Predicted label # =========================================================== pred_score = model.predict_proba(test_X) fpr, tpr, thresholds = roc_curve(test_y, pred_score[:,1]) final = Table().with_column('IDX', [i for i in range(len(pred_score))]) final = final.with_columns('ACT_CLASS', test_y.transpose().values.tolist()[0], 'PRE_CLASS', pred_score[:, 1]) final.show(5) # Compute TN, TP, FN, FP, etc. # =========================================================== ROC = Table(make_array('CUTOFF', 'TN', 'FN', 'FP', 'TP', 'ACC')) step_size = 0.05 for cutoff in np.arange(0, 1 + step_size, step_size): temp_final = final.with_column('INDICATE', final.apply(lambda x, y: (int(x >= cutoff) << 1) + y, 'PRE_CLASS', 'ACT_CLASS')) # 00(0) -> TN # 01(1) -> FN # 10(2) -> FP # 11(3) -> TP group = temp_final.group('INDICATE') indicator = group.column(0) counts = group.column(1) # print(indicator, counts) output = [cutoff] idx = 0 for i in range(4): # print(counts[idx]) if i in indicator: output.append(counts[idx]) idx += 1 else: output.append(0) acc = (output[1] + output[4]) / sum(output[1:]) output.append(acc) ROC = ROC.with_row(output) ROC = ROC.with_columns('SENSITIVITY', ROC.apply(lambda TP, FN: TP / (TP + FN + 0.00000001), 'TP', 'FN')) ROC = ROC.with_columns('FPR', ROC.apply(lambda TN, FP: FP / (TN + FP + 0.00000001), 'TN', 'FP')) ROC = ROC.with_column('FMEAS', ROC.apply(lambda TP, FP, FN: 2 * (TP / (TP + FN)) * (TP / (TP + FP)) / (TP / (TP + FN) + TP / (TP + FP)), 'TP', 'FP', 'FN')) ROC.show() # Acc Curve by cutoff # =========================================================== fig = plt.figure() plt.xlabel('Cutoff') plt.ylabel('Accuracy') plt.title('Accuracy - Cutoff of Logistic Regression') plt.plot(np.arange(0, 1.1, 0.1), [0.5 for i in np.arange(0, 1.1, 0.1)], color='black') plt.plot(ROC.column('CUTOFF'), ROC.column('ACC'), color='orange') plt.axis([0, 1, 0, 1.1]) plt.show() fig.savefig('Logistic ACC.png', bbox_inches='tight') # ROC_CURVE # =========================================================== fig = plt.figure() plt.xlabel('False Positive Rate') plt.ylabel('Sensitivity') plt.title('ROC - Curve of Logistic Regression') plt.plot(np.arange(0, 1.1, 0.1), np.arange(0, 1.1, 0.1), color='black') plt.plot(ROC.column('FPR'), ROC.column('SENSITIVITY'), color='orange') plt.legend(['Null', 'Logistic']) plt.axis([0, 1, 0, 1.1]) plt.show() fig.savefig('Logistic ROC.png', bbox_inches='tight') # Compute AUC # =========================================================== length = len(ROC.column('FPR')) auc = 0 for i in range(length - 1): auc += 0.5 * abs(ROC.column('FPR')[i + 1] - ROC.column('FPR')[i]) * (ROC.column('SENSITIVITY')[i] + ROC.column('SENSITIVITY')[i + 1]) print("auc = %.03f" %auc) acc, tpr, fpr = ROC.column('ACC'), ROC.column('SENSITIVITY'), ROC.column('FPR') acc tpr fpr ```
github_jupyter
# NumPy 入門 本章では、Python で数値計算を高速に行うためのライブラリ([注釈1](#note1))である NumPy の使い方を学びます。 本章の目標は、[単回帰分析と重回帰分析](https://tutorials.chainer.org/ja/07_Regression_Analysis.html)の章で学んだ重回帰分析を行うアルゴリズムを**NumPy を用いて実装すること**です。 NumPy による**多次元配列(multidimensional array)**の扱い方を知ることは、他の様々なライブラリを利用する際に役立ちます。 例えば、様々な機械学習手法を統一的なインターフェースで利用できる **scikit-learn** や、ニューラルネットワークの記述・学習を行うためのフレームワークである **Chainer** は、NumPy に慣れておくことでとても使いやすくなります。 それでは、まず NumPy の基礎的な使用方法を説明します。 ## NumPy を使う準備 NumPy は Google Colaboratory(以下 Colab)上のノートブックにはデフォルトでインストールされているため、ここではインストールの方法は説明しません。自分のコンピュータに NumPy をインストールしたい場合は、こちらを参照してください。:[Installing packages](https://scipy.org/install.html) Colab 上ではインストール作業は必要ないものの、ノートブックを開いた時点ではまだ `numpy` モジュールが読み込まれていません。 ライブラリの機能を利用するには、そのライブラリが提供するモジュールを読み込む必要があります。 例えば `A` というモジュールを読み込みたいとき、一番シンプルな記述方法は `import A` です。 ただ、もし `A` というモジュール名が長い場合は、`import A as B` のようにして別名を付けることができます。 `as` を使って別名が与えられると、以降そのモジュールはその別名を用いて利用することができます。 `import A as B` と書くと、`A` というモジュールは `B` という名前で利用することができます。 これは Python の機能なので NumPy 以外のモジュールを読み込みたい場合にも使用可能です。 慣習的に、`numpy` にはしばしば `np` という別名が与えられます。 コード中で頻繁に使用するモジュールには、短い別名をつけて定義することがよく行われます。 それでは、`numpy` を `np` という名前で `import` してみましょう。 ``` import numpy as np ``` ## 多次元配列を定義する ベクトル・行列・テンソルなどは、プログラミング上は多次元配列により表現でき、NumPy では ndarray というクラスで多次元配列を表現します([注釈2](#note2))。早速、これを用いてベクトルを定義してみましょう。 ``` # ベクトルの定義 a = np.array([1, 2, 3]) a ``` このように、Python リスト `[1, 2, 3]` を `np.array()` に渡すことで、$[1, 2, 3]$ というベクトルを表す ndarray オブジェクトを作ることができます。 ndarray オブジェクトは `shape` という**属性 (attribute)** を持っており、その多次元配列の**形 (shape)** が保存されています。 上で定義した `a` という ndarray オブジェクトの形を調べてみましょう。 ``` a.shape ``` `(3,)` という要素数が 1 の Python のタプルが表示されています。 ndarray の形は、要素が整数のタプルで表され、要素数はその多次元配列の**次元数 (dimensionality, number of dimensions)** を表します。 形は、その多次元配列の各次元の大きさを順に並べた整数のタプルになっています。 次元数は、ndarray の `ndim` という属性に保存されています。 ``` a.ndim ``` これは、`len(a.shape)` と同じ値になります。 今、`a` という ndarray は 1 次元配列なので、`a.shape` は要素数が 1 のタプルで、`ndim` の値は 1 でした([注釈3](#note3))。 では次に、$3 \times 3$ 行列を定義してみましょう。 ``` # 行列の定義 b = np.array( [[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) b ``` 形と次元数を調べます。 ``` print('Shape:', b.shape) print('Rank:', b.ndim) ``` ここで、`size` という属性も見てみましょう。 ``` b.size ``` これは、`b` という ndarray が持つ要素の数を表しています。 `b` は $3 \times 3$ 行列なので、要素数は 9 です。 **「形」「次元数」「サイズ」という言葉がそれぞれ意味するものの違いを確認してください。** NumPy の ndarray の作成方法には、`np.array()` を用いて Python のリストから多次元配列を作る方法以外にも、色々な方法があります。 以下に代表的な例をいくつか紹介します。 ``` # 形を指定して、要素が全て 0 で埋められた ndarray を作る a = np.zeros((3, 3)) a # 形を指定して、要素が全て 1 で埋められた ndarray を作る b = np.ones((2, 3)) b # 形と値を指定して、要素が指定した値で埋められた ndarray を作る c = np.full((3, 2), 9) c # 指定された大きさの単位行列を表す ndarray を作る d = np.eye(5) d # 形を指定して、 0 ~ 1 の間の乱数で要素を埋めた ndarray を作る e = np.random.random((4, 5)) e # 3 から始まり 10 になるまで 1 ずつ増加する数列を作る(10 は含まない) f = np.arange(3, 10, 1) f ``` ## 多次元配列の要素を選択する 前節では NumPy を使って多次元配列を定義するいくつかの方法を紹介しました。 本節では、作成した ndarray のうちの特定の要素を選択して、値を取り出す方法を紹介します。 最もよく行われる方法は `[]` を使った**添字表記 (subscription)** による要素の選択です。 ### 整数による要素の選択 例えば、上で作成した `e` という $4 \times 5$ 行列を表す多次元配列から、1 行 2 列目の値を取り出すには、以下のようにします。 ``` val = e[0, 1] val ``` 「1 行 2 列目」を指定するのに、インデックスは `[0, 1]` でした。 これは、NumPy の ndarray の要素は Python リストと同じく、添字が 0 から始まる**ゼロベースインデックス (zero-based index)** が採用されているためです。 つまり、この行列の i 行 j 列目の値は、`[i - 1, j - 1]` で取り出すことができます。 ### スライスによる要素の選択 NumPy の ndarray に対しても、Python のリストと同様に**スライス表記 (slicing)** を用いて選択したい要素を範囲指定することができます。 ndarray はさらに、カンマ区切りで複数の次元に対するスライスを指定できます。 ``` # 4 x 5 行列 e の真ん中の 2 x 3 = 6 個の値を取り出す center = e[1:3, 1:4] center ``` 前節最後にある `e` の出力を見返すと、ちょうど真ん中の部分の $2 \times 3$ 個の数字が取り出せていることが分かります。 ここで、`e` の中から `[1, 1]` の要素を起点として 2 行 3 列を取り出して作られた `center` の形を、`e` の形と比較してみましょう。 ``` print('Shape of e:', e.shape) print('Shape of center:', center.shape) ``` また、インデックスを指定したり、スライスを用いて取り出した ndarray の一部に対し、値を代入することもできます。 ``` # 先程の真ん中の 6 個の値を 0 にする e[1:3, 1:4] = 0 e ``` ### 整数配列による要素の選択 ndarray の `[]` には、整数やスライスの他に、整数配列を渡すこともできます。 整数配列とは、ここでは整数を要素とする Python リストまたは ndarray のことを指しています。 具体例を示します。 まず、$3 \times 3$ 行列を表す `a` という ndarray を定義します。 ``` a = np.array( [[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) a ``` この ndarray から、 1. 1 行 2 列目:`a[0, 1]` 2. 3 行 2 列目:`a[2, 1]` 3. 2 行 1 列目:`a[1, 0]` の 3 つの要素を選択して並べ、形が `(3,)` であるような ndarray を作りたいとします。 これは、以下のように、順に対象の要素を指定して並べて新しい ndarray にすることでももちろん実現できます。 ``` np.array([a[0, 1], a[2, 1], a[1, 0]]) ``` しかし、同じことが**選択したい行、選択したい列を、順にそれぞれリストとして与える**ことでも行えます。 ``` a[[0, 2, 1], [1, 1, 0]] ``` **選択したい 3 つの値がどの行にあるか**だけに着目すると、それぞれ 1 行目、3 行目、2 行目にある要素です。 ゼロベースインデックスでは、それぞれ 0, 2, 1 行目です。 これが `a` の `[]` に与えられた 1 つ目のリスト `[0, 2, 1]` の意味です。 同様に、**列に着目**すると、ゼロベースインデックスでそれぞれ 1, 1, 0 列目の要素です。 これが `a` の `[]` に与えられた 2 つ目のリスト `[1, 1, 0]` の意味です。 ## ndarray のデータ型 1 つの ndarray の要素は、全て同じ型を持ちます。 NumPy では様々なデータ型を使うことができますが、ここでは一部だけを紹介します。 NumPy は Python リストを渡して ndarray を作る際などには、その値からデータ型を推測します。 ndarray のデータ型は、`dtype` という属性に保存されています。 ``` # 整数(Python の int 型)の要素をもつリストを与えた場合 x = np.array([1, 2, 3]) x.dtype # 浮動小数点数(Python の float 型)の要素をもつリストを与えた場合 x = np.array([1., 2., 3.]) x.dtype ``` 以上のように、**Python の int 型は自動的に NumPy の int64 型**になりました。 また、**Python の float 型は自動的に NumPy の float64 型**になりました。 Python の int 型は NumPy の int_ 型に対応づけられており、Python の float 型は NumPy の float_ 型に対応づけられています。 この int_ 型はプラットフォームによって int64 型と同じ場合と int32 型と同じ場合があります。 float_ 型についても同様で、プラットフォームによって float64 型と同じ場合と float32 型と同じ場合があります。 特定の型を指定して ndarray を作成するには、以下のようにします。 ``` x = np.array([1, 2, 3], dtype=np.float32) x.dtype ``` このように、`dtype` という引数に NumPy の dtype オブジェクトを渡します。 これは 32 ビット浮動小数点数型を指定する例です。 同じことが、文字列で指定することによっても行えます。 ``` x = np.array([1, 2, 3], dtype='float32') x.dtype ``` これはさらに、以下のように短く書くこともできます。 ``` x = np.array([1, 2, 3], dtype='f') x.dtype ``` 一度あるデータ型で定義した配列のデータ型を別のものに変更するには、`astype` を用いて変換を行います。 ``` x = x.astype(np.float64) x.dtype ``` ## 多次元配列を用いた計算 ndarray を使って行列やベクトルを定義して、それらを用いていくつかの計算を行ってみましょう。 ndarray として定義されたベクトルや行列同士の**要素ごとの加減乗除**は、Python の数値同士の四則演算に用いられる `+`、`-`、`*`、`/` という記号を使って行えます。 それでは、同じ形の行列を 2 つ定義し、それらの**要素ごとの**加減乗除を実行してみましょう。 ``` # 同じ形 (3 x 3) の行列を 2 つ定義する a = np.array([ [0, 1, 2], [3, 4, 5], [6, 7, 8] ]) b = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) # 足し算 c = a + b c # 引き算 c = a - b c # 掛け算 c = a * b c # 割り算 c = a / b c ``` NumPy では、与えられた多次元配列に対して要素ごとに計算を行う関数が色々と用意されています。 以下にいくつかの例を示します。 ``` # 要素ごとに平方根を計算する c = np.sqrt(b) c # 要素ごとに値を n 乗する n = 2 c = np.power(b, n) c ``` 要素ごとに値を n 乗する計算は、以下のようにしても書くことができます。 ``` c ** n ``` はじめに紹介した四則演算は、**同じ大きさの** 2 つの行列同士で行っていました。 ここで、$3 \times 3$ 行列 `a` と 3 次元ベクトル `b` という大きさのことなる配列を定義して、それらを足してみましょう。 ``` a = np.array([ [0, 1, 2], [3, 4, 5], [6, 7, 8] ]) b = np.array([1, 2, 3]) c = a + b c ``` 形が同じ行列同士の場合と同様に計算することができました。 これは NumPy が自動的に**ブロードキャスト(broadcast)**と呼ばれる操作を行っているためです。 これについて次節で説明します。 ## ブロードキャスト 行列同士の要素ごとの四則演算は、通常は行列の形が同じでなければ定義できません。 しかし、前節の最後では $3 \times 3$ 行列に 3 次元ベクトルを足す計算が実行できました。 これが要素ごとの計算と同じように実行できる理由は、NumPy が自動的に 3 次元ベクトル `b` を 3 つ並べてできる $3 \times 3$ 行列を想定し、`a` と同じ形に揃える操作を暗黙に行っているからです。 この操作を、**ブロードキャスト**と呼びます。 算術演算を異なる形の配列同士で行う場合、NumPy は自動的に小さい方の配列を**ブロードキャスト**し、大きい方の配列と形を合わせます。 ただし、この自動的に行われるブロードキャストでは、行いたい算術演算が、大きい方の配列の一部に対して**繰り返し行われる**ことで実現されるため、実際に小さい方の配列のデータをコピーして大きい配列をメモリ上に作成することは可能な限り避けられます。 また、この繰り返しの計算は NumPy の内部の C 言語によって実装されたループで行われるため、高速です。 よりシンプルな例で考えてみましょう。 以下のような配列 `a` があり、この全ての要素を 2 倍にしたいとします。 ``` a = np.array([1, 2, 3]) a ``` このとき、一つの方法は以下のように同じ形で要素が全て 2 である別の配列を定義し、これと要素ごとの積を計算するやり方です。 ``` b = np.array([2, 2, 2]) c = a * b c ``` しかし、スカラの 2 をただ `a` に掛けるだけでも同じ結果が得られます。 ``` c = a * 2 c ``` `* 2` という計算が、`c` の 3 つの要素の**どの要素に対する計算なのか**が明示されていないため、NumPy はこれを**全ての要素に対して行うという意味**だと解釈して、スカラの 2 を `a` の要素数 3 だけ引き伸ばしてから掛けてくれます。 **形の異なる配列同士の計算がブロードキャストによって可能になるためにはルールがあります。** それは、**「2 つの配列の各次元が同じ大きさになっているか、どちらかが 1 であること」**です。 このルールを満たさない場合、NumPy は "ValueError: operands could not be broadcast together with shapes (1 つ目の配列の形) (2 つ目の配列の形)" というエラーを出します。 ブロードキャストされた配列の各次元のサイズ([注釈4](#note4))は、入力された配列のその次元のサイズの中で最大の値と同じになっています。 入力された配列は、各次元のサイズが入力のうち大きい方のサイズと同じになるようブロードキャストされ、その拡張されたサイズで計算されます。 もう少し具体例を見てみましょう。 以下のような 2 つの配列 `a` と `b` を定義し、足します。 ``` # 0 ~ 9 の範囲の値をランダムに用いて埋められた (2, 1, 3) と (3, 1) という大きさの配列を作る a = np.random.randint(0, 10, (2, 1, 3)) b = np.random.randint(0, 10, (3, 1)) print('a:\n', a) print('\na.shape:', a.shape) print('\nb:\n', b) print('\nb.shape:', b.shape) # 加算 c = a + b print('\na + b:\n', c) print('\n(a + b).shape:', c.shape) ``` `a` の形は `(2, 1, 3)` で、`b` の形は `(3, 1)` でした。 この 2 つの配列の**末尾次元 (trailing dimension)**([注釈5](#note5)) はそれぞれ 3 と 1 なので、ルールにあった「次元が同じサイズであるか、どちらかが 1 であること」を満たしています。 次に、各配列の第 2 次元に注目してみましょう。 それぞれ 1 と 3 です。 これもルールを満たしています。 ここで、`a` は 3 次元配列ですが、`b` は 2 次元配列です。 つまり、次元数が異なっています。 このような場合は、`b` は**一番上の次元にサイズが 1 の次元が追加された形** `(1, 3, 1)` として扱われます。 そして 2 つの配列の各次元ごとのサイズの最大値をとった形 `(2, 3, 3)` にブロードキャストされ、足し算が行われます。 このように、もし 2 つの配列のランクが異なる場合は、次元数が小さい方の配列が大きい方と同じ次元数になるまでその形の先頭に新たな次元が追加されます。 サイズが 1 の次元がいくつ追加されても、要素の数は変わらないことに注意してください。 要素数(`size` 属性で取得できる値)は、各次元のサイズの掛け算になるので、1 を何度かけても値は変わらないことから、これが成り立つことが分かります。 NumPy がブロードキャストのために自動的に行う新しい次元の挿入は、`[]` を使った以下の表な表記を用いることで**手動で行うこともできます。** ``` print('Original shape:', b.shape) b_expanded = b[np.newaxis, :, :] print('Added new axis to the top:', b_expanded.shape) b_expanded2 = b[:, np.newaxis, :] print('Added new axis to the middle:', b_expanded2.shape) ``` `np.newaxis` が指定された位置に、新しい次元が挿入されます。 配列が持つ数値の数は変わっていません。 そのため、挿入された次元のサイズは必ず 1 になります。 ``` b b_expanded b_expanded2 ``` NumPy のブロードキャストは慣れるまで直感に反するように感じる場合があるかもしれません。 しかし、使いこなすと同じ計算が Python のループを使って行うよりも高速に行えるため、ブロードキャストを理解することは非常に重要です。 一つ具体例を見てみます。 $5 \times 5$ 行列 `a` に、3 次元ベクトル `b` を足します。 まず、`a`、`b` および結果を格納する配列 `c` を定義します。 ``` a = np.array([ [0, 1, 2, 1, 0], [3, 4, 5, 4, 3], [6, 7, 8, 7, 6], [3, 4, 5, 4, 4], [0, 1, 2, 1, 0] ]) b = np.array([1, 2, 3, 4, 5]) # 結果を格納する配列を先に作る c = np.empty((5, 5)) ``` `%%timeit` という Jupyter Notebook で使用できるそのセルの実行時間を計測するためのマジックを使って、`a` の各行(1 次元目)に `b` の値を足していく計算を Python のループを使って 1 行ずつ処理していくコードの実行時間を測ってみます。 ``` %%timeit for i in range(a.shape[0]): c[i, :] = a[i, :] + b c ``` 次に、NumPy のブロードキャストを活用した方法で同じ計算を行ってみます。 ``` %%timeit c = a + b c ``` 計算結果は当然同じになります。 しかし、実行時間が数倍短くなっています。 このように、ブロードキャストを理解して活用することで、記述が簡単になるだけでなく、実行速度という点においても有利になります。 ## 行列積 行列の要素ごとの積は `*` を用いて計算できました。 一方、通常の行列同士の積(行列積)の計算は、`*` ではなく、別の方法で行います。 方法は 2 種類あります。 1つは、`np.dot()` 関数を用いる方法です。 `np.dot()` は 2 つの引数をとり、それらの行列積を計算して返す関数です。 今、`A` という行列と `B` という行列があり、行列積 `AB` を計算したいとします。 これは `np.dot(A, B)` と書くことで計算できます。 もし `BA` を計算したい場合は、`np.dot(B, A)` と書きます。 もう 1 つは、ndarray オブジェクトが持つ `dot()` メソッドを使う方法です。 これを用いると、同じ計算が `A.dot(B)` と書くことによって行えます。 ``` # 行列 A の定義 A = np.array([ [0, 1, 2], [3, 4, 5], [6, 7, 8] ]) # 行列 B の定義 B = np.array([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ]) ``` 実際にこの $3 \times 3$ の 2 つの行列の行列積を計算してみましょう。 ``` # 行列積の計算 (1) C = np.dot(A, B) C ``` 同じ計算をもう一つの記述方法で行ってみます。 ``` C = A.dot(B) C # データ型の確認(整数値) a.dtype ``` ## 基本的な統計量の求め方 本節では、多次元配列に含まれる値の平均・分散・標準偏差・最大値・最小値といった統計値を計算する方法を紹介します。 $8 \times 10$ の行列を作成し、この中に含まれる値全体に渡るこれらの統計値を計算してみましょう。 ``` x = np.random.randint(0, 10, (8, 10)) x # 平均値 x.mean() # 分散 x.var() # 標準偏差 x.std() # 最大値 x.max() # 最小値 x.min() ``` ここで、`x` は 2 次元配列なので、各次元に沿ったこれらの統計値の計算も行えます。 例えば、最後の次元内だけで平均をとると、8 個の平均値が得られるはずです。 平均を計算したい軸(何次元目に沿って計算するか)を `axis` という引数に指定します。 ``` x.mean(axis=1) ``` これは、以下のように 1 次元目の値の平均を計算していったものを並べているのと同じことです。 (ゼロベースインデックスで考えています。`x` の形は `(8, 10)` なので、0 次元目のサイズが 8、1 次元目のサイズが 10 です。) ``` np.array([ x[0, :].mean(), x[1, :].mean(), x[2, :].mean(), x[3, :].mean(), x[4, :].mean(), x[5, :].mean(), x[6, :].mean(), x[7, :].mean(), ]) ``` ## NumPy を用いた重回帰分析 [単回帰分析と重回帰分析](https://tutorials.chainer.org/ja/07_Regression_Analysis.html)の章で説明した重回帰分析を NumPy を用いて行いましょう。 4 つのデータをまとめた、以下のようなデザイン行列が与えられたとします。 ``` # Xの定義 X = np.array([ [2, 3], [2, 5], [3, 4], [5, 9], ]) X ``` 4 章の解説と同様に、切片を重みベクトルに含めて扱うため、デザイン行列の 0 列目に 1 という値を付け加えます。 ``` # データ数(X.shape[0]) と同じ数だけ 1 が並んだ配列 ones = np.ones((X.shape[0], 1)) # concatenate を使い、1 次元目に 1 を付け加える X = np.concatenate((ones, X), axis=1) # 先頭に 1 が付け加わったデザイン行列 X ``` また、目標値が以下で与えられたとします。 ``` # t の定義 t = np.array([1, 5, 6, 8]) t ``` 重回帰分析は、正規方程式を解くことで最適な 1 次方程式の重みを決定することができました。 正規方程式の解は以下のようなものでした。 $$ {\bf w} = ({\bf X}^{{\rm T}}{\bf X})^{\rm -1}{\bf X}^{\rm T}{\bf t} $$ これを、4 つのステップに分けて計算していきます。 まずは、${\bf X}^{\rm T}{\bf X}$ の計算です。ndarrayに対して `.T` で転置した配列を得られます。 ``` # Step 1 xx = np.dot(X.T, X) xx ``` 次に、この逆行列を計算します。 ``` # Step 2 xx_inv = np.linalg.inv(xx) xx_inv ``` 逆行列の計算は `np.linalg.inv()` で行うことができます。 次に、${\bf X}^{\rm T}{\bf t}$ の計算をします。 ``` # Step 3 xt = np.dot(X.T, t) xt ``` 最後に、求めた `xx_inv` と `xt` を掛け合わせます。 ``` # Step 4 w = np.dot(xx_inv, xt) w ``` **以上の計算は、以下のように 1 行で行うこともできます。** ``` w_ = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(t) w_ ``` 実際には逆行列を陽に求めることは稀で、連立一次方程式を解く、すなわち逆行列を計算してベクトルに掛けるのに等しい計算をひとまとめに行う関数 `numpy.linalg.solve` を呼ぶ方が速度面でも精度面でも有利です。 ``` w_ = np.linalg.solve(X.T.dot(X), X.T.dot(t)) w_ ``` 数式を NumPy による配列の計算に落とし込むことに慣れていくには少し時間がかかりますが、慣れると少ない量のコードで記述できるだけでなく、高速に計算が行なえるため、大きな恩恵があります。 <hr /> <div class="alert alert-info"> **注釈 1** ライブラリとは、汎用性の高い複数の関数やクラスなどを再利用可能な形でひとまとまりにしたもので、Python の世界では**パッケージ**とも呼ばれます。また、Python で関数やクラスの定義、文などが書かれたファイルのことを**モジュール**と呼び、パッケージはモジュールが集まったものです。 [▲上へ戻る](#ref_note1) </div> <div class="alert alert-info"> **注釈 2** NumPy には matrix というクラスも存在しますが、本チュートリアルでは基本的に多次元配列を表す ndarray をベクトルや行列を表すために用います。 [▲上へ戻る](#ref_note2) </div> <div class="alert alert-info"> **注釈 3** これは、その多次元配列が表すテンソルの**階数(rank、以下ランク)**と対応します。 [▲上へ戻る](#ref_note3) </div> <div class="alert alert-info"> **注釈 4** 「次元のサイズ」と言った場合はその次元の大きさを意味し、配列の `size` 属性とは異なるものを指しています。 [▲上へ戻る](#ref_note4) </div> <div class="alert alert-info"> **注釈 5** 末尾次元(trailing dimension)とは、その配列の形を表すタプルの一番最後の値のことを指します。 [▲上へ戻る](#ref_note5) </div>
github_jupyter
# HuberRegressorw with StandardScaler This Code template is for the regression analysis using a Huber Regression and the feature rescaling technique StandardScaler in a pipeline. ### Required Packages ``` import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as se from sklearn.linear_model import HuberRegressor from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error warnings.filterwarnings('ignore') ``` ### Initialization Filepath of CSV file ``` #filepath file_path= "" ``` List of features which are required for model training . ``` #x_values features= [] ``` Target feature for prediction. ``` #y_value target='' ``` ### Data Fetching Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools. We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry. ``` df=pd.read_csv(file_path) df.head() ``` ### Feature Selections It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model. We will assign all the required input features to X and target/outcome to Y. ``` X=df[features] Y=df[target] ``` ### Data Preprocessing Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes. ``` def NullClearner(df): if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])): df.fillna(df.mean(),inplace=True) return df elif(isinstance(df, pd.Series)): df.fillna(df.mode()[0],inplace=True) return df else:return df def EncodeX(df): return pd.get_dummies(df) ``` Calling preprocessing functions on the feature and target set. ``` x=X.columns.to_list() for i in x: X[i]=NullClearner(X[i]) X=EncodeX(X) Y=NullClearner(Y) X.head() ``` #### Correlation Map In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns. ``` f,ax = plt.subplots(figsize=(18, 18)) matrix = np.triu(X.corr()) se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix) plt.show() ``` ### Data Splitting The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data. ``` x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123) ``` ### Model Linear regression model that is robust to outliers. The Huber Regressor optimizes the squared loss for the samples where |(y - X'w) / sigma| < epsilon and the absolute loss for the samples where |(y - X'w) / sigma| > epsilon, where w and sigma are parameters to be optimized. The parameter sigma makes sure that if y is scaled up or down by a certain factor, one does not need to rescale epsilon to achieve the same robustness. Note that this does not take into account the fact that the different features of X may be of different scales. This makes sure that the loss function is not heavily influenced by the outliers while not completely ignoring their effect. #### Data Scaling Used sklearn.preprocessing.StandardScaler Standardize features by removing the mean and scaling to unit variance The standard score of a sample x is calculated as: z = (x - u) / s Where u is the mean of the training samples or zero if with_mean=False, and s is the standard deviation of the training samples or one if with_std=False. Read more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) ``` Input=[("standard",StandardScaler()),("model",HuberRegressor())] model = Pipeline(Input) model.fit(x_train,y_train) ``` #### Model Accuracy We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model. > **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction. ``` print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100)) ``` > **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions. > **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model. > **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model. ``` y_pred=model.predict(x_test) print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100)) print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred))) print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred))) ``` #### Prediction Plot First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis. For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis. ``` plt.figure(figsize=(14,10)) plt.plot(range(20),y_test[0:20], color = "green") plt.plot(range(20),model.predict(x_test[0:20]), color = "red") plt.legend(["Actual","prediction"]) plt.title("Predicted vs True Value") plt.xlabel("Record number") plt.ylabel(target) plt.show() ``` #### Creator: Snehaan Bhawal , Github: [Profile](https://github.com/Sbhawal)
github_jupyter
``` import glob import numpy as np from scipy.interpolate import interp2d import astropy.units as au import astropy.time as at import astropy.coordinates as ac import h5py import os import pylab as plt from RadioArray import RadioArray from UVWFrame import UVW from PointingFrame import Pointing def getDatumIdx(antIdx,timeIdx,dirIdx,numAnt,numTimes): '''standarizes indexing''' idx = antIdx + numAnt*(timeIdx + numTimes*dirIdx) return idx def getDatum(datumIdx,numAnt,numTimes): antIdx = datumIdx % numAnt timeIdx = (datumIdx - antIdx)/numAnt % numTimes dirIdx = (datumIdx - antIdx - numAnt*timeIdx)/numAnt/numTimes return antIdx,timeIdx,dirIdx class DataPack(object): '''dataDict = {'radioArray':radioArray,'antennas':outAntennas,'antennaLabels':outAntennaLabels, 'times':outTimes,'timestamps':outTimestamps, 'directions':outDirections,'patchNames':outPatchNames,'dtec':outDtec} ''' def __init__(self,dataDict=None,filename=None): '''get the astropy object defining rays and then also the dtec data''' if dataDict is not None: self.addDataDict(**dataDict) else: if filename is not None: self.load(filename) return self.refAnt = None print("Loaded {0} antennas, {1} times, {2} directions".format(self.Na,self.Nt,self.Nd)) def __repr__(self): return "DataPack: numAntennas = {}, numTimes = {}, numDirections = {}\nReference Antenna = {}".format(self.Na,self.Nt,self.Nd,self.refAnt) def clone(self): dataPack = DataPack({'radioArray':self.radioArray, 'antennas':self.antennas, 'antennaLabels':self.antennaLabels, 'times':self.times, 'timestamps':self.timestamps, 'directions':self.directions, 'patchNames' : self.patchNames, 'dtec':self.dtec}) dataPack.setReferenceAntenna(self.refAnt) return dataPack def save(self,filename): dt = h5py.special_dtype(vlen=str) f = h5py.File(filename,'w') antennaLabels = f.create_dataset("datapack/antennas/labels",(self.Na,),dtype=dt) f["datapack/antennas"].attrs['frequency'] = self.radioArray.frequency antennas = f.create_dataset("datapack/antennas/locs",(self.Na,3),dtype=np.double) antennaLabels[...] = self.antennaLabels antennas[:,:] = self.antennas.cartesian.xyz.to(au.m).value.transpose()#to Nax3 in m patchNames = f.create_dataset("datapack/directions/patchnames",(self.Nd,),dtype=dt) ra = f.create_dataset("datapack/directions/ra",(self.Nd,),dtype=np.double) dec = f.create_dataset("datapack/directions/dec",(self.Nd,),dtype=np.double) patchNames[...] = self.patchNames ra[...] = self.directions.ra.deg dec[...] = self.directions.dec.deg timestamps = f.create_dataset("datapack/times/timestamps",(self.Nt,),dtype=dt) gps = f.create_dataset("datapack/times/gps",(self.Nt,),dtype=np.double) timestamps[...] = self.timestamps gps[...] = self.times.gps dtec = f.create_dataset("datapack/dtec",(self.Na,self.Nt,self.Nd),dtype=np.double) dtec[:,:,:] = self.dtec dtec.attrs['refAnt'] = str(self.refAnt) f.close() def load(self,filename): f = h5py.File(filename,'r') self.antennaLabels = f["datapack/antennas/labels"][:].astype(str) antennas = f["datapack/antennas/locs"][:,:] frequency = f["datapack/antennas"].attrs['frequency'] self.radioArray = RadioArray(antennaPos = antennas,frequency = frequency) self.antennas = ac.SkyCoord(antennas[:,0]*au.m,antennas[:,1]*au.m,antennas[:,2]*au.m,frame='itrs') self.patchNames = f["datapack/directions/patchnames"][:].astype(str) ra = f["datapack/directions/ra"][:] dec = f["datapack/directions/dec"][:] self.directions = ac.SkyCoord(ra*au.deg,dec*au.deg,frame='icrs') self.timestamps = f["datapack/times/timestamps"][:].astype(str) self.times = at.Time(self.timestamps,format='isot',scale='tai') self.dtec = f["datapack/dtec"][:,:,:] self.refAnt = np.array(f["datapack/dtec"].attrs['refAnt']).astype(str).item(0) self.Na = len(self.antennas) self.Nt = len(self.times) self.Nd = len(self.directions) self.setReferenceAntenna(self.refAnt) f.close() def addDataDict(self,**args): '''Set up variables here that will hold references throughout''' for attr in args.keys(): try: setattr(self,attr,args[attr]) except: print("Failed to set {0} to {1}".format(attr,args[attr])) self.Na = len(self.antennas) self.Nt = len(self.times) self.Nd = len(self.directions) def set_dtec(self,dtec,antIdx=[],timeIdx=[], dirIdx=[],refAnt=None): '''Set the specified dtec solutions corresponding to the requested indices. value of -1 means all.''' if antIdx is -1: antIdx = np.arange(self.Na) if timeIdx is -1: timeIdx = np.arange(self.Nt) if dirIdx is -1: dirIdx = np.arange(self.Nd) antIdx = np.sort(antIdx) timeIdx = np.sort(timeIdx) dirIdx = np.sort(dirIdx) Na = len(antIdx) Nt = len(timeIdx) Nd = len(dirIdx) i = 0 while i < Na: j = 0 while j < Nt: k = 0 while k < Nd: self.dtec[antIdx[i],timeIdx[j],dirIdx[k]] = dtec[i,j,k] k += 1 j += 1 i += 1 if refAnt is not None: self.setReferenceAntenna(refAnt) else: if self.refAnt is not None: self.setReferenceAntenna(self.refAnt) def get_dtec(self,antIdx=[],timeIdx=[], dirIdx=[]): '''Retrieve the specified dtec solutions corresponding to the requested indices. value of -1 means all.''' if antIdx is -1: antIdx = np.arange(self.Na) if timeIdx is -1: timeIdx = np.arange(self.Nt) if dirIdx is -1: dirIdx = np.arange(self.Nd) antIdx = np.sort(antIdx) timeIdx = np.sort(timeIdx) dirIdx = np.sort(dirIdx) Na = len(antIdx) Nt = len(timeIdx) Nd = len(dirIdx) output = np.zeros([Na,Nt,Nd],dtype=np.double) i = 0 while i < Na: j = 0 while j < Nt: k = 0 while k < Nd: output[i,j,k] = self.dtec[antIdx[i],timeIdx[j],dirIdx[k]] k += 1 j += 1 i += 1 return output def get_antennas(self,antIdx=[]): '''Get the list of antenna locations in itrs''' if antIdx is -1: antIdx = np.arange(self.Na) antIdx = np.sort(antIdx) output = self.antennas[antIdx] Na = len(antIdx) outputLabels = [] i = 0 while i < Na: outputLabels.append(self.antennaLabels[antIdx[i]]) i += 1 return output, outputLabels def get_times(self,timeIdx=[]): '''Get the gps times''' if timeIdx is -1: timeIdx = np.arange(self.Nt) timeIdx = np.sort(timeIdx) output = self.times[timeIdx] Nt = len(timeIdx) outputLabels = [] j = 0 while j < Nt: outputLabels.append(self.timestamps[timeIdx[j]]) j += 1 return output, outputLabels def get_directions(self, dirIdx=[]): '''Get the array of directions in itrs''' if dirIdx is -1: dirIdx = np.arange(self.Nd) dirIdx = np.sort(dirIdx) output = self.directions[dirIdx] Nd = len(dirIdx) outputLabels = [] k = 0 while k < Nd: outputLabels.append(self.patchNames[dirIdx[k]]) k += 1 return output, outputLabels def setReferenceAntenna(self,refAnt): if refAnt is None: return refAntIdx = None i = 0 while i < self.Na: if self.antennaLabels[i] == refAnt: refAntIdx = i break i += 1 assert refAntIdx is not None, "{} is not a valid antenna. Choose from {}".format(refAnt,self.antennaLabels) #print("Setting refAnt: {}".format(refAnt)) self.refAnt = refAnt self.dtec = self.dtec - self.dtec[refAntIdx,:,:] def getCenterDirection(self): raMean = np.mean(self.directions.transform_to('icrs').ra) decMean = np.mean(self.directions.transform_to('icrs').dec) phase = ac.SkyCoord(raMean,decMean,frame='icrs') return phase def findFlaggedAntennas(self): '''Determine which antennas are flagged''' assert self.refAnt is not None, "Set a refAnt before finding flagged antennas" mask = np.sum(np.sum(self.dtec,axis=2),axis=1) == 0 i = 0 while i < self.Na: if self.antennaLabels[i] == self.refAnt: refAntIdx = i break i += 1 mask[refAntIdx] = False return list(self.antennaLabels[mask]) def flagAntennas(self,antennaLabels): '''remove data corresponding to the given antenna names if it exists''' assert type(antennaLabels) == type([]), "{} is not a list of station names. Choose from {}".format(antennaLabels,self.antennaLabels) mask = np.ones(len(self.antennaLabels), dtype=bool) antennasFound = 0 i = 0 while i < self.Na: if self.antennaLabels[i] in antennaLabels: antennasFound += 1 mask[i] = False i += 1 #some flags may have not existed in data self.antennaLabels = self.antennaLabels[mask] self.antennas = self.antennas[mask] self.dtec = self.dtec[mask,:,:] self.Na = len(self.antennas) def flagPatches(self,patchNames): '''remove data corresponding to the given antenna names if it exists''' assert type(patchNames) == type([]), "{} is not a list of patch names. Choose from {}".format(antennaLabels,self.antennaLabels) mask = np.ones(len(self.patchNames), dtype=bool) patchesFound = 0 i = 0 while i < self.Nd: if self.patchNames[i] in patchNames: patchesFound += 1 mask[i] = False i += 1 #some flags may have not existed in data self.patchNames = self.patchNames[mask] self.directions = self.directions[mask] self.dtec = self.dtec[:,:,mask] self.Nd = len(self.directions) def transferPatchData(infoFile, dataFolder, hdf5Out): '''transfer old numpy format to hdf5. Only run with python 2.7''' assert os.path.isdir(dataFolder), "{0} is not a directory".format(dataFolder) dt = h5py.special_dtype(vlen=str) f = h5py.File(hdf5Out,"w") info = np.load(infoFile) #these define the direction order patches = info['patches']#names radec = info['directions']#astrpy.icrs Nd = len(patches) print("Loading {} patches".format(Nd)) namesds = f.create_dataset("dtecObservations/patchNames",(Nd,),dtype=dt) #rads = f.create_dataset("dtecObservations/patches/ra",(Nd,),dtype=np.double) #dec = f.create_dataset("dtecObservations/patches/dec",(Nd,),dtype=np.double) dset = f['dtecObservations'] dset.attrs['frequency'] = 150e6 namesds[...] = patches #rads[...] = radec.ra.deg #decds[...] = radec.dec.deg patchIdx = 0 while patchIdx < Nd: patch = patches[patchIdx] #find the appropriate file (this will be standardized later) files = glob.glob("{0}/*_{1}_*.npz".format(dataFolder,patch)) if len(files) == 1: patchFile = files[0] else: print('Too many files found. Could not find patch: {0}'.format(patch)) patchIdx += 1 continue try: d = np.load(patchFile) print("Loading data file: {0}".format(patchFile)) except: print("Failed loading data file: {0}".format(patchFile)) return if "dtecObservations/antennaLabels" not in f: antennaLabels = d['antennas']#labels Na = len(antennaLabels) antennaLabelsds = f.create_dataset("dtecObservations/antennaLabels",(Na,),dtype=dt) antennaLabelsds[...] = antennaLabels if "dtecObservations/timestamps" not in f: times = d['times']#gps tai timestamps = at.Time(times,format='gps',scale='tai').isot Nt = len(times) print(len(timestamps[0])) timeds = f.create_dataset("dtecObservations/timestamps",(Nt,),dtype=dt) timeds[...] = timestamps patchds = f.create_dataset("dtecObservations/patches/{}".format(patch),(Nt,Na),dtype=np.double) patchds[...] = d['data'] patchds.attrs['ra'] = radec[patchIdx].ra.deg patchds.attrs['dec'] = radec[patchIdx].dec.deg patchIdx += 1 f.close() def prepareDataPack(hdf5Datafile,timeStart=0,timeEnd=-1,arrayFile='arrays/lofar.hba.antenna.cfg'): '''Grab real data from soltions products. Stores in a DataPack object.''' f = h5py.File(hdf5Datafile,'r') dset = f['dtecObservations'] frequency = dset.attrs['frequency'] print("Using radio array file: {}".format(arrayFile)) #get array stations (they must be in the array file to be considered for data packaging) radioArray = RadioArray(arrayFile,frequency=frequency)#set frequency from solutions todo print("Created {}".format(radioArray)) patchNames = f["dtecObservations/patchNames"][:].astype(str) Nd = len(patchNames) ra = np.zeros(Nd,dtype= np.double) dec = np.zeros(Nd,dtype=np.double) antennaLabels = f["dtecObservations/antennaLabels"][:].astype(str) Na = len(antennaLabels) antennas = np.zeros([3,Na],dtype=np.double) antIdx = 0#index in solution table while antIdx < Na: ant = antennaLabels[antIdx] labelIdx = radioArray.getAntennaIdx(ant) if labelIdx is None: print("failed to find {} in {}".format(ant,radioArray.labels)) return #ITRS WGS84 stationLoc = radioArray.locs[labelIdx] antennas[:,antIdx] = stationLoc.cartesian.xyz.to(au.km).value.flatten() antIdx += 1 antennas = ac.SkyCoord(antennas[0,:]*au.km,antennas[1,:]*au.km, antennas[2,:]*au.km,frame='itrs') timestamps = f["dtecObservations/timestamps"][:].astype(str) times = at.Time(timestamps,format="isot",scale='tai') Nt = len(timestamps) dtec = np.zeros([Na,Nt,Nd],dtype=np.double) patchIdx = 0 while patchIdx < Nd: patchName = patchNames[patchIdx] patchds = f["dtecObservations/patches/{}".format(patchName)] ra[patchIdx] = patchds.attrs['ra'] dec[patchIdx] = patchds.attrs['dec'] dtec[:,:,patchIdx] = patchds[:,:].transpose()#from NtxNa to NaxNt patchIdx += 1 f.close() directions = ac.SkyCoord(ra*au.deg,dec*au.deg,frame='icrs') dataDict = {'radioArray':radioArray,'antennas':antennas,'antennaLabels':antennaLabels, 'times':times,'timestamps':timestamps, 'directions':directions,'patchNames':patchNames,'dtec':dtec} return DataPack(dataDict) def interpNearest(x,y,z,x_,y_): dx = np.subtract.outer(x_,x) dy = np.subtract.outer(y_,y) r = dx**2 dy *= dy r += dy np.sqrt(r,out=r) arg = np.argmin(r,axis=1) z_ = z[arg] return z_ def plotDataPack(datapack,antIdx=-1,timeIdx=[0], dirIdx=-1,figname=None,vmin=None,vmax=None): assert datapack.refAnt is not None, "set DataPack refAnt first" directions, patchNames = datapack.get_directions(dirIdx=dirIdx) antennas, antLabels = datapack.get_antennas(antIdx=antIdx) times,timestamps = datapack.get_times(timeIdx=timeIdx) dtec = np.stack([np.mean(datapack.get_dtec(antIdx = antIdx,dirIdx=dirIdx,timeIdx=timeIdx),axis=1)],axis=1) Na = len(antennas) Nt = len(times) Nd = len(directions) refAntIdx = None for i in range(Na): if antLabels[i] == datapack.refAnt: refAntIdx = i fixtime = times[Nt>>1] phase = datapack.getCenterDirection() arrayCenter = datapack.radioArray.getCenter() uvw = UVW(location = arrayCenter.earth_location,obstime = fixtime,phase = phase) ants_uvw = antennas.transform_to(uvw) dtec = np.stack([np.mean(dtec,axis=1)],axis=1) #make plots, M by 4 M = (Na>>2) + 1 + 1 fig = plt.figure(figsize=(11.,11./4.*M)) #use direction average as phase tracking direction if vmax is None: vmax = np.percentile(dtec.flatten(),99) #vmax=np.max(dtec) if vmin is None: vmin = np.percentile(dtec.flatten(),1) #vmin=np.min(dtec) N = 25 dirs_uvw = directions.transform_to(uvw) factor300 = 300./dirs_uvw.w.value U,V = np.meshgrid(np.linspace(np.min(dirs_uvw.u.value*factor300),np.max(dirs_uvw.u.value*factor300),N), np.linspace(np.min(dirs_uvw.v.value*factor300),np.max(dirs_uvw.v.value*factor300),N)) i = 0 while i < Na: ax = fig.add_subplot(M,4,i+1) dx = np.sqrt((ants_uvw.u[i] - ants_uvw.u[refAntIdx])**2 + (ants_uvw.v[i] - ants_uvw.v[refAntIdx])**2).to(au.km).value ax.annotate(s="{} : {:.2g} km".format(antLabels[i],dx),xy=(.2,.8),xycoords='axes fraction') if i == 0: #ax.annotate(s="{} : {:.2g} km\n{}".format(antLabels[i],dx,fixtime.isot),xy=(.2,.8),xycoords='axes fraction') #ax.annotate(s=fixtime.isot,xy=(.2,0.05),xycoords='axes fraction') ax.set_title(fixtime.isot) #ax.set_title("Ref. Proj. Dist.: {:.2g} km".format(dx)) ax.set_xlabel("U km") ax.set_ylabel("V km") D = interpNearest(dirs_uvw.u.value*factor300,dirs_uvw.v.value*factor300,dtec[i,0,:],U.flatten(),V.flatten()).reshape(U.shape) im = ax.imshow(D,origin='lower',extent=(np.min(U),np.max(U),np.min(V),np.max(V)),aspect='auto', vmin = vmin, vmax= vmax,cmap=plt.cm.coolwarm,alpha=1.) sc1 = ax.scatter(dirs_uvw.u.value*factor300,dirs_uvw.v.value*factor300, c='black', marker='+') i += 1 ax = fig.add_subplot(M,4,Na+1) plt.colorbar(im,cax=ax,orientation='vertical') if figname is not None: plt.savefig("{}.png".format(figname),format='png') else: plt.show() plt.close() def test_plotDataPack(): datapack = DataPack(filename="output/test/datapackObs.hdf5") try: os.makedirs('output/test/plotDataPack') except: pass plotDataPack(datapack,antIdx=-1,timeIdx=[0,1,2,3], dirIdx=-1,figname=None)#'output/test/plotDataPack/fig') def test_prepareDataPack(): dataPack = prepareDataPack('SB120-129/dtecData.hdf5',timeStart=0,timeEnd=-1, arrayFile='arrays/lofar.hba.antenna.cfg') dataPack.flagAntennas(['CS007HBA1','CS007HBA0','CS013HBA0','CS013HBA1']) dataPack.setReferenceAntenna(dataPack.antennaLabels[0]) #'CS501HBA1' dataPack.save("output/test/datapackObs.hdf5") if __name__ == '__main__': #transferPatchData(infoFile='SB120-129/WendysBootes.npz', # dataFolder='SB120-129/', # hdf5Out='SB120-129/dtecData.hdf5') test_plotDataPack() ```
github_jupyter
# Signal Autoencoder ``` import numpy as np import scipy as sp import scipy.stats import itertools import logging import matplotlib.pyplot as plt import pandas as pd import torch.utils.data as utils import math import time import tqdm import torch import torch.optim as optim import torch.nn.functional as F from argparse import ArgumentParser from torch.distributions import MultivariateNormal import torch.nn as nn import torch.nn.init as init import sys sys.path.append("../new_flows") from flows import RealNVP, Planar, MAF from models import NormalizingFlowModel ####MAF class VAE_NF(nn.Module): def __init__(self, K, D): super().__init__() self.dim = D self.K = K self.encoder = nn.Sequential( nn.Linear(12, 50), nn.LeakyReLU(True), nn.Linear(50, 30), nn.LeakyReLU(True), nn.Linear(30, 20), nn.LeakyReLU(True), nn.Linear(20, D * 2) ) self.decoder = nn.Sequential( nn.Linear(D, 20), nn.LeakyReLU(True), nn.Linear(20, 30), nn.LeakyReLU(True), nn.Linear(30, 50), nn.LeakyReLU(True), nn.Linear(50, 12) ) flow_init = MAF(dim=D) flows_init = [flow_init for _ in range(K)] prior = MultivariateNormal(torch.zeros(D).cuda(), torch.eye(D).cuda()) self.flows = NormalizingFlowModel(prior, flows_init) def forward(self, x): # Run Encoder and get NF params enc = self.encoder(x) mu = enc[:, :self.dim] log_var = enc[:, self.dim: self.dim * 2] # Re-parametrize sigma = (log_var * .5).exp() z = mu + sigma * torch.randn_like(sigma) kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp()) # Construct more expressive posterior with NF z_k, _, sum_ladj = self.flows(z) kl_div = kl_div / x.size(0) - sum_ladj.mean() # mean over batch # Run Decoder x_prime = self.decoder(z_k) return x_prime, kl_div #prong_2 = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5") #prong_3 = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5") rnd_data = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5") testprior_data = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/ThreeProng_5000_500_500.h5") dt = rnd_data.values dt_prior = testprior_data.values correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) dt = dt[correct] correct = (dt_prior[:,3]>0) &(dt_prior[:,19]>0) & (dt_prior[:,1]>0) & (dt_prior[:,2]>0) dt_prior = dt_prior[correct] for i in range(13,19): dt[:,i] = dt[:,i]/dt[:,3] for i in range(29,35): dt[:,i] = dt[:,i]/(dt[:,19]) for i in range(13,19): dt_prior[:,i] = dt_prior[:,i]/dt_prior[:,3] for i in range(29,35): dt_prior[:,i] = dt_prior[:,i]/(dt_prior[:,19]) #correct = (dt[:,16]>0) & (dt[:,29]>=0) & (dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1) #dt = dt[correct] #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] #Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]] #bkg_idx = np.where(idx==0)[0] ##signal_idx = np.where((idx==1)) [0] #dt = dt[signal_idx] correct = (dt[:,0]>=2800) dt = dt[correct] correct = (dt_prior[:,0]>=2800) dt_prior = dt_prior[correct] idx = dt[:,-1] #sig_idx = np.where((dt_prior[:,3]>450) & (dt_prior[:,3]<550) & (dt_prior[:,19]>100) & (dt_prior[:,19]<200) & (dt_prior[:,0]>4200) & (dt_prior[:,0]<4800))[0] sig_idx = np.where((dt_prior[:,3]>200) & (dt_prior[:,19]>200) & (dt_prior[:,0]>4000) & (dt_prior[:,0]<6000))[0] bkg_idx = np.where(idx==0)[0] #bsmlike = np.where(dt[:,16]>0.9)[0] #dt = dt[bsmlike] dt_sig = dt_prior[sig_idx] dt_bkg = dt[bkg_idx] #dt = prong_2.values #correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) #dt = dt[correct]# #for i in range(13,19): # dt[:,i] = dt[:,i]/dt[:,3]# #for i in range(29,35): # dt[:,i] = dt[:,i]/(dt[:,19])# ##correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1) ##dt = dt[correct]# # ##Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] ##Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # When no jet 1,2 raw mass included ##Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]] #idx = dt[:,-1] #bkg_idx = np.where(idx==0)[0] #sig_idx = np.where((idx==1) & (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150))[0] ##signal_idx = np.where((idx==1)) [0] #dt_sig = dt[sig_idx] # #sig_refine_range = (dt_sig[:,0]>3400) & (dt_sig[:,0]<3600) #dt_sig = dt_sig[sig_refine_range] dt_sig.shape plt.hist(dt_sig[:,0],bins=np.arange(0,8000,50)); f.columns[[3,4,5,6,11,12,19,20,21,22,27,28]] #Y = dt_sig[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] #[3,4,5,6,11,12,19,20,21,22,27,28] Y = dt_sig[:,[3,4,5,6,11,12,19,20,21,22,27,28]] Y.shape #if nprong == 3: # dt = prong_3.values # correct = (dt[:,3]>20) &(dt[:,19]>20) # dt = dt[correct] # for i in range(13,19): # dt[:,i] = dt[:,i]/dt[:,3] # for i in range(29,35): # dt[:,i] = dt[:,i]/(dt[:,19]) # correct = (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1) # dt = dt[correct] # Y = dt[:,[4,5,6,7,8,11,12,13,14,15,16,17,18,20,21,22,23,24,27,28,29,30,31,32,33,34]] # #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] # idx = dt[:,-1] # bkg_idx = np.where(idx==0)[0] # signal_idx = np.where((idx==1) & (dt[:,3]>400))[0] # #signal_idx = np.where((idx==1)) [0] # Y = Y[signal_idx] bins = np.linspace(0,1,100) bins.shape column = 3 #print(f_rnd.columns[column]) plt.hist(Y[:,0],bins,alpha=0.5,color='b'); #plt.hist(sigout[:,column],bins,alpha=0.5,color='r'); #plt.hist(out2[:,column],bins,alpha=0.5,color='g'); #plt.axvline(np.mean(Y[:,column])) Y.shape sig_mean = [] sig_std = [] for i in range(12): mean = np.mean(Y[:,i]) std = np.std(Y[:,i]) sig_mean.append(mean) sig_std.append(std) Y[:,i] = (Y[:,i]-mean)/std sig_mean sig_std total_sig = torch.tensor(Y) total_sig.shape bins = np.linspace(-3,3,100) bins.shape column = 0 #print(f_rnd.columns[column]) plt.hist(Y[:,11],bins,alpha=0.5,color='b'); #plt.hist(sigout[:,column],bins,alpha=0.5,color='r'); #plt.hist(out2[:,column],bins,alpha=0.5,color='g'); #plt.axvline(np.mean(Y[:,column])) N_EPOCHS = 30 PRINT_INTERVAL = 2000 NUM_WORKERS = 4 LR = 1e-4 #N_FLOWS = 6 #Z_DIM = 8 N_FLOWS = 10 Z_DIM = 8 n_steps = 0 sigmodel = VAE_NF(N_FLOWS, Z_DIM).cuda() bs = 800 sig_train_iterator = utils.DataLoader(total_sig, batch_size=bs, shuffle=True) sig_test_iterator = utils.DataLoader(total_sig, batch_size=bs) sigoptimizer = optim.Adam(sigmodel.parameters(), lr=1e-6) beta = 1 def sigtrain(): global n_steps train_loss = [] sigmodel.train() for batch_idx, x in enumerate(sig_train_iterator): start_time = time.time() x = x.float().cuda() x_tilde, kl_div = sigmodel(x) mseloss = nn.MSELoss(size_average=False) huberloss = nn.SmoothL1Loss(size_average=False) #loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0) loss_recons = mseloss(x_tilde,x ) / x.size(0) #loss_recons = huberloss(x_tilde,x ) / x.size(0) loss = loss_recons + beta* kl_div sigoptimizer.zero_grad() loss.backward() sigoptimizer.step() train_loss.append([loss_recons.item(), kl_div.item()]) if (batch_idx + 1) % PRINT_INTERVAL == 0: print('\tIter [{}/{} ({:.0f}%)]\tLoss: {} Time: {:5.3f} ms/batch'.format( batch_idx * len(x), 50000, PRINT_INTERVAL * batch_idx / 50000, np.asarray(train_loss)[-PRINT_INTERVAL:].mean(0), 1000 * (time.time() - start_time) )) n_steps += 1 def sigevaluate(split='valid'): global n_steps start_time = time.time() val_loss = [] sigmodel.eval() with torch.no_grad(): for batch_idx, x in enumerate(sig_test_iterator): x = x.float().cuda() x_tilde, kl_div = sigmodel(x) mseloss = nn.MSELoss(size_average=False) huberloss = nn.SmoothL1Loss(size_average=False) #loss_recons = F.binary_cross_entropy(x_tilde, x, size_average=False) / x.size(0) loss_recons = mseloss(x_tilde,x ) / x.size(0) #loss_recons = huberloss(x_tilde,x ) / x.size(0) loss = loss_recons + beta * kl_div val_loss.append(loss.item()) #writer.add_scalar('loss/{}/ELBO'.format(split), loss.item(), n_steps) #writer.add_scalar('loss/{}/reconstruction'.format(split), loss_recons.item(), n_steps) #writer.add_scalar('loss/{}/KL'.format(split), kl_div.item(), n_steps) print('\nEvaluation Completed ({})!\tLoss: {:5.4f} Time: {:5.3f} s'.format( split, np.asarray(val_loss).mean(0), time.time() - start_time )) return np.asarray(val_loss).mean(0) ae_def = { "type":"sig", "trainon":"3prong", "features":"12features", "architecture":"MAF", "selection":"mjj4500_nojetmasscut", "trainloss":"MSELoss", "beta":"beta1", "zdimnflow":"z8f10", } ae_def #from torchsummary import summary sigmodel.load_state_dict(torch.load(f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5")) N_EPOCHS = 10 BEST_LOSS = 0 LAST_SAVED = -1 PATIENCE_COUNT = 0 PATIENCE_LIMIT = 5 for epoch in range(1, 1000): print("Epoch {}:".format(epoch)) sigtrain() cur_loss = sigevaluate() if cur_loss <= BEST_LOSS: PATIENCE_COUNT = 0 BEST_LOSS = cur_loss LAST_SAVED = epoch print("Saving model!") torch.save(sigmodel.state_dict(),f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5") else: PATIENCE_COUNT += 1 print("Not saving model! Last saved: {}".format(LAST_SAVED)) if PATIENCE_COUNT > 10: print("Patience Limit Reached") break sigmodel.load_state_dict(torch.load(f"/data/t3home000/spark/QUASAR/weights/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['architecture']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}.h5")) sigout = sigmodel(torch.tensor(Y).float().cuda())[0] sigout = sigout.data.cpu().numpy() bins = np.linspace(-3,3,100) bins.shape column = 3 #print(f_rnd.columns[column] plt.hist(Y[:,column],bins,alpha=0.5,color='b'); plt.hist(sigout[:,column],bins,alpha=0.5,color='r'); #plt.hist(out2[:,column],bins,alpha=0.5,color='g'); plt.axvline(np.mean(Y[:,column])) mjj, j1mass, j2mass = [4000, 150, 150] f = pd.read_hdf(f"/data/t3home000/spark/QUASAR/preprocessing/delphes_output_{mjj}_{j1mass}_{j2mass}.h5") dt = f.values correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) dt = dt[correct] for i in range(13,19): dt[:,i] = dt[:,i]/dt[:,3] for i in range(29,35): dt[:,i] = dt[:,i]/(dt[:,19]) correct = (dt[:,0]>mjj-300) & (dt[:,0]<mjj+300) dt = dt[correct] correct = (dt[:,3]>j1mass-100) & (dt[:,3]<j1mass+100) & (dt[:,19]>j2mass-100) & (dt[:,19]<j2mass+100) dt = dt[correct] Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] for i in range(12): Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i] sigout = sigmodel(torch.tensor(Y).float().cuda())[0] sigout = sigout.data.cpu().numpy() bins = np.linspace(-3,3,101) bins.shape column = 2 #print(f_rnd.columns[column] #plt.hist(dt[:,column],bins,alpha=0.5,color='b'); plt.hist(sigout[:,column],bins,alpha=0.5,color='r'); plt.hist(Y[:,column],bins,alpha=0.5,color='g'); #plt.axvline(np.mean(Y[:,column])) bins = np.linspace(-3,3,100) bins.shape column = 5 #print(f_rn.columns[column] plt.hist(Y[:,column],bins,alpha=0.5,color='b'); plt.hist(sigout[:,column],bins,alpha=0.5,color='r'); #plt.hist(out2[:,column],bins,alpha=0.5,color='g'); plt.axvline(np.mean(Y[:,column])) varyj1mass_wps = ([4000, 150, 150],[4000, 300, 150],[4000, 450, 150],[4000, 500, 150],[4000, 650, 150],[4000, 700, 150],[4000, 850, 150],[4000, 900, 150]) for mjj, j1mass, j2mass in varyj1mass_wps: f = pd.read_hdf(f"/data/t3home000/spark/QUASAR/preprocessing/delphes_output_{mjj}_{j1mass}_{j2mass}.h5") dt = f.values correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) dt = dt[correct] for i in range(13,19): dt[:,i] = dt[:,i]/dt[:,3] for i in range(29,35): dt[:,i] = dt[:,i]/(dt[:,19]) correct = (dt[:,0]>mjj-300) & (dt[:,0]<mjj+300) dt = dt[correct] correct = (dt[:,3]>j1mass-100) & (dt[:,3]<j1mass+100) & (dt[:,19]>j2mass-100) & (dt[:,19]<j2mass+100) dt = dt[correct] Y = dt[:,[4,5,6,11,12,20,21,22,27,28]] #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]] #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]] #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] print(Y.shape) for i in range(10): Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i] total_bb_test = torch.tensor(Y) #huberloss = nn.SmoothL1Loss(reduction='none') sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy() bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy() waic = sigae_bbloss + bbvar #sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy() print(waic[0:10]) plt.hist(waic,bins=np.linspace(0,10,1001),density=True); plt.xlim([0,2]) #np.save(out_file_waic,waic) np.save(f'sigaetestprior4500500150_wp_{mjj}_{j1mass}_{j2mass}.npy',sigae_bbloss) losslist = [] for mjj, j1mass, j2mass in varyj1mass_wps: a = np.load(f'sigae_wp_{mjj}_{j1mass}_{j2mass}.npy') losslist.append(a) losslist[1] plt.hist(losslist[0],bins = np.arange(0,10,.1),alpha=0.2); plt.hist(losslist[1],bins = np.arange(0,10,.1),alpha=0.2); plt.hist(losslist[2],bins = np.arange(0,10,.1),alpha=0.2); plt.hist(losslist[3],bins = np.arange(0,10,.1),alpha=0.2); plt.hist(losslist[4],bins = np.arange(0,10,.1),alpha=0.2); inputlist = [ '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5', '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB2.h5', '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB3.h5', '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_background.h5', '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5', '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_rnd.h5', '/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5', '/data/t3home000/spark/QUASAR/preprocessing/delphes_output_4500_500_150.h5' ] ae_def outputlist_waic = [ f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb1.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb2.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_bb3.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_purebkg.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_rndbkg.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_2prong.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_3prong.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_WAICloss_4500.npy" ] outputlist_justloss = [ f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb1.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb2.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_bb3.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy", f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_4500.npy" ] exist_signalflag = [ False, False, False, False, True, True, True, False, ] is_signal = [ False, False, False, False, False, True, True, True ] nprong = [ None, None, None, None, None, '2prong', '3prong', '4500' ] for in_file, out_file_waic, out_file_justloss, sigbit_flag, is_sig, n_prong in zip(inputlist,outputlist_waic,outputlist_justloss,exist_signalflag,is_signal, nprong): f_bb = pd.read_hdf(in_file) dt = f_bb.values #correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) &(dt[:,2]>0) & (dt[:,16]>0) & (dt[:,32]>0) #dt = dt[correct] correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0) dt = dt[correct] for i in range(13,19): dt[:,i] = dt[:,i]/dt[:,3] for i in range(29,35): dt[:,i] = dt[:,i]/(dt[:,19]) #correct = (dt[:,16]>0) & (dt[:,29]>=0) &(dt[:,29]<=1)&(dt[:,30]>=0) &(dt[:,30]<=1)&(dt[:,31]>=0) &(dt[:,31]<=1)&(dt[:,32]>=0) &(dt[:,32]<=1)&(dt[:,33]>=0) &(dt[:,33]<=1)&(dt[:,34]>=-0.01) &(dt[:,34]<=1) #dt = dt[correct] #correct = (dt[:,3]>100) #dt = dt[correct] #correct = (dt[:,19]>20) #dt = dt[correct] correct = (dt[:,0]>=2800) dt = dt[correct] #bsmlike = np.where(dt[:,16]>0.9)[0] #dt = dt[bsmlike] if sigbit_flag: idx = dt[:,-1] sigidx = (idx == 1) bkgidx = (idx == 0) if is_sig: dt = dt[sigidx] else: dt = dt[bkgidx] if n_prong == '2prong': correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150) & (dt[:,0]>3400) & (dt[:,0]<3600) dt = dt[correct] if n_prong == '3prong': correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>50) & (dt[:,19]<150) & (dt[:,0]>3400) & (dt[:,0]<3600) dt = dt[correct] if n_prong == '4500': correct = (dt[:,3]>450) & (dt[:,3]<550) & (dt[:,19]>100) & (dt[:,19]<200) & (dt[:,0]>4200) & (dt[:,0]<4800) dt = dt[correct] Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]] #Y = dt[:,[3,4,5,6,11,12,13,14,15,16,17,18,19,20,21,22,27,28,29,30,31,32,33,34]] #Y = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] print(Y.shape) for i in range(12): Y[:,i] = (Y[:,i]-sig_mean[i])/sig_std[i] total_bb_test = torch.tensor(Y) #huberloss = nn.SmoothL1Loss(reduction='none') sigae_bbloss = torch.mean((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy() bbvar = torch.var((sigmodel(total_bb_test.float().cuda())[0]- total_bb_test.float().cuda())**2,dim=1).data.cpu().numpy() waic = sigae_bbloss + bbvar #sigae_bbloss = torch.mean(huberloss(model(total_bb_test.float().cuda())[0],total_bb_test.float().cuda()),dim=1).data.cpu().numpy() print(waic[0:10]) plt.hist(waic,bins=np.linspace(0,10,1001),density=True); plt.xlim([0,2]) np.save(out_file_waic,waic) np.save(out_file_justloss,sigae_bbloss) print(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy") loss_prong3 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_3prong.npy") loss_prong2 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_2prong.npy") loss_purebkg = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_purebkg.npy") loss_rndbkg = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_rndbkg.npy") loss_4500 = np.load(f"../data_strings/{ae_def['type']}_{ae_def['trainon']}_{ae_def['features']}_{ae_def['selection']}_{ae_def['trainloss']}_{ae_def['beta']}_{ae_def['zdimnflow']}_Justloss_4500.npy") plt.hist(loss_rndbkg,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='Pure Bkg'); #plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg'); plt.hist(loss_prong2,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong (rnd)sig'); plt.hist(loss_prong3,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='3prong (rnd)sig'); plt.hist(loss_4500,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong 4500'); #plt.yscale('log') plt.xlabel('Loss (SigAE trained on 2prong sig)') plt.legend(loc='upper right') #plt.savefig('sigae_trained_on_2prongsig.png') plt.hist(loss_rndbkg,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='Pure Bkg'); #plt.hist(loss_rndbkg,bins=np.linspace(0,2,100),density=False,alpha=0.3,label='(rnd) bkg'); plt.hist(loss_prong2,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='2prong (rnd)sig'); plt.hist(loss_prong3,bins=np.linspace(0,10,100),density=True,alpha=0.3,label='3prong (rnd)sig'); #plt.yscale('log') plt.xlabel('Loss (SigAE trained on 2prong sig)') plt.legend(loc='upper right') #plt.savefig('sigae_trained_on_2prongsig.png') len(loss_prong2) outputlist_waic outputlist_justloss sigae_bbloss ae_def sigae_bbloss plt.hist(sigae_bbloss,bins=np.linspace(0,10,1001)); np.save('../data_strings/sigae_2prong_loss_bb3.npy',sigae_bbloss) X_bkg = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]] X_bkg = X_bkg[bkg_idx] for i in range(12): X_bkg[:,i] = (X_bkg[:,i]-sig_mean[i])/sig_std[i] total_bkg_test = torch.tensor(X_bkg) sigae_bkgloss = torch.mean((sigmodel(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy() sigae_sigloss = torch.mean((sigmodel(total_sig.float().cuda())[0]- total_sig.float().cuda())**2,dim=1).data.cpu().numpy() f_3prong = pd.read_hdf("/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_3prong_rnd.h5") f_bb1 = pd.read_hdf('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5') dt_bb1 = f_bb1.values X_bb1 = dt_bb1[:,[3,4,5,6,11,12,19,20,21,22,27,28]] X_bb1.shape sig_mean sig_std for i in range(12): X_bb1[:,i] = (X_bb1[:,i]-sig_mean[i])/sig_std[i] plt.hist(X_bb1[:,0],bins = np.linspace(-2,2,10)) (torch.tensor(dt[i * chunk_size:(i + 1) * chunk_size]) for i in range ) def get_loss(dt): chunk_size=5000 total_size=1000000 i = 0 i_max = total_size // chunk_size print(i_max) gen = (torch.tensor(dt[i*chunk_size: (i + 1) * chunk_size]) for i in range(i_max)) with torch.no_grad(): loss = [ n for total_in_selection in gen for n in torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy() ] return loss def get_loss(dt): def generator(dt, chunk_size=5000, total_size=1000000): i = 0 i_max = total_size // chunk_size print(i_max) for i in range(i_max): start=i * chunk_size stop=(i + 1) * chunk_size yield torch.tensor(dt[start:stop]) loss = [] with torch.no_grad(): for total_in_selection in generator(dt,chunk_size=5000, total_size=1000000): loss.extend(torch.mean((sigmodel(total_in_selection.float().cuda())[0]- total_in_selection.float().cuda())**2,dim=1).data.cpu().numpy()) return loss bb1_loss_sig = get_loss(X_bb1) bb1_loss_sig = np.array(bb1_loss_sig,dtype=np.float) print(bb1_loss_sig) plt.hist(bb1_loss_sig,bins=np.linspace(0,100,1001)); np.save('../data_strings/sigaeloss_bb1.npy',bb1_loss_sig) dt_3prong = f_3prong.values Z = dt_3prong[:,[3,4,5,6,11,12,19,20,21,22,27,28]] Z.shape for i in range(12): Z[:,i] = (Z[:,i]-sig_mean[i])/sig_std[i] total_3prong = torch.tensor(Z) bkgae_bkgloss = torch.mean((model(total_bkg_test.float().cuda())[0]- total_bkg_test.float().cuda())**2,dim=1).data.cpu().numpy() bkgae_3prongloss = torch.mean((model(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy() sigae_3prongloss = torch.mean((sigmodel(total_3prong.float().cuda())[0]- total_3prong.float().cuda())**2,dim=1).data.cpu().numpy() sigae_3prongloss.shape bins = np.linspace(0,10,1001) plt.hist(sigae_sigloss,bins,weights = np.ones(len(signal_idx))*10,alpha=0.4,color='r',label='2 prong signal'); plt.hist(sigae_3prongloss,bins,weights = np.ones(100000)*10,alpha=0.5,color='g',label='3 prong signal'); plt.hist(sigae_bkgloss,bins,alpha=0.4,color='b',label='background'); #plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.legend(loc='upper right') plt.xlabel('Signal AE Loss',fontsize=15) def get_tpr_fpr(sigloss,bkgloss,aetype='sig'): bins = np.linspace(0,50,1001) tpr = [] fpr = [] for cut in bins: if aetype == 'sig': tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss)) fpr.append(np.where(bkgloss<cut)[0].shape[0]/len(bkgloss)) if aetype == 'bkg': tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss)) fpr.append(np.where(bkgloss>cut)[0].shape[0]/len(bkgloss)) return tpr,fpr def get_precision_recall(sigloss,bkgloss,aetype='bkg'): bins = np.linspace(0,100,1001) tpr = [] fpr = [] precision = [] for cut in bins: if aetype == 'sig': tpr.append(np.where(sigloss<cut)[0].shape[0]/len(sigloss)) precision.append((np.where(sigloss<cut)[0].shape[0])/(np.where(bkgloss<cut)[0].shape[0]+np.where(sigloss<cut)[0].shape[0])) if aetype == 'bkg': tpr.append(np.where(sigloss>cut)[0].shape[0]/len(sigloss)) precision.append((np.where(sigloss>cut)[0].shape[0])/(np.where(bkgloss>cut)[0].shape[0]+np.where(sigloss>cut)[0].shape[0])) return precision,tpr tpr_2prong, fpr_2prong = get_tpr_fpr(sigae_sigloss,sigae_bkgloss,'sig') tpr_3prong, fpr_3prong = get_tpr_fpr(sigae_3prongloss,sigae_bkgloss,'sig') plt.plot(fpr_2prong,tpr_2prong,label='signal AE') #plt.plot(VAE_bkg_fpr,VAE_bkg_tpr,label='Bkg VAE-Vanilla') plt.plot(bkg_fpr4,bkg_tpr4,label='Bkg NFlowVAE-Planar') plt.xlabel(r'$1-\epsilon_{bkg}$',fontsize=15) plt.ylabel(r'$\epsilon_{sig}$',fontsize=15) #plt.semilogy() #plt.legend(bbox_to_anchor=(1.05, 1.0), loc='upper left') plt.legend(loc='lower right') plt.xlim([0.0,1.0]) plt.ylim([0.0,1.0]) plt.savefig('ROC_Curve_sigae.png') precision,recall = get_precision_recall(loss_sig,loss_bkg,aetype='bkg') np.save('NFLOWVAE_PlanarNEW_22var_sigloss.npy',loss_sig) np.save('NFLOWVAE_PlanarNEW_22var_bkgloss.npy',loss_bkg) np.save('NFLOWVAE_PlanarNEW_precision.npy',precision) np.save('NFLOWVAE_PlanarNEW_recall.npy',recall) np.save('NFLOWVAE_PlanarNEW_bkgAE_fpr.npy',bkg_fpr) np.save('NFLOWVAE_PlanarNEW_bkgAE_tpr.npy',bkg_tpr) np.save('NFLOWVAE_PlanarNEW_sigloss.npy',loss_sig) np.save('NFLOWVAE_PlanarNEW_bkgloss.npy',loss_bkg) plt.plot(recall,precision) flows = [1,2,3,4,5,6] zdim = [1,2,3,4,5] for N_flows in flows: for Z_DIM in zdim: model = VAE_NF(N_FLOWS, Z_DIM).cuda() optimizer = optim.Adam(model.parameters(), lr=LR) BEST_LOSS = 99999 LAST_SAVED = -1 PATIENCE_COUNT = 0 PATIENCE_LIMIT = 5 for epoch in range(1, N_EPOCHS): print("Epoch {}:".format(epoch)) train() cur_loss = evaluate() if cur_loss <= BEST_LOSS: PATIENCE_COUNT = 0 BEST_LOSS = cur_loss LAST_SAVED = epoch print("Saving model!") if mode == 'ROC': torch.save(model.state_dict(),f"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_RND_22var_z{Z_DIM}_f{N_FLOWS}.h5") else: torch.save(model.state_dict(), f"/data/t3home000/spark/QUASAR/weights/bkg_vae_NF_planar_PureBkg_22var_z{Z_DIM}_f{N_FLOWS}.h5") else: PATIENCE_COUNT += 1 print("Not saving model! Last saved: {}".format(LAST_SAVED)) if PATIENCE_COUNT > 3: print("Patience Limit Reached") break loss_bkg = get_loss(dt_PureBkg[bkg_idx]) loss_sig = get_loss(dt_PureBkg[signal_idx]) np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_sigloss.npy',loss_sig) np.save(f'NFLOWVAE_PlanarNEW_22var_z{Z_DIM}_f{N_flows}_bkgloss.npy',loss_bkg) ```
github_jupyter
# Deep Learning Intro ``` %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import numpy as np ``` ## Shallow and Deep Networks ``` from sklearn.datasets import make_moons X, y = make_moons(n_samples=1000, noise=0.1, random_state=0) plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5) plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5) plt.legend(['0', '1']) X.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.optimizers import SGD, Adam ``` ### Shallow Model ``` model = Sequential() model.add(Dense(1, input_shape=(2,), activation='sigmoid')) model.compile(Adam(learning_rate=0.05), 'binary_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=200, verbose=0) results = model.evaluate(X_test, y_test) results print("The Accuracy score on the Train set is:\t{:0.3f}".format(results[1])) def plot_decision_boundary(model, X, y): amin, bmin = X.min(axis=0) - 0.1 amax, bmax = X.max(axis=0) + 0.1 hticks = np.linspace(amin, amax, 101) vticks = np.linspace(bmin, bmax, 101) aa, bb = np.meshgrid(hticks, vticks) ab = np.c_[aa.ravel(), bb.ravel()] c = model.predict(ab) cc = c.reshape(aa.shape) plt.figure(figsize=(12, 8)) plt.contourf(aa, bb, cc, cmap='bwr', alpha=0.2) plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5) plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5) plt.legend(['0', '1']) plot_decision_boundary(model, X, y) ``` ### Deep model ``` model = Sequential() model.add(Dense(4, input_shape=(2,), activation='tanh')) model.add(Dense(2, activation='tanh')) model.add(Dense(1, activation='sigmoid')) model.compile(Adam(learning_rate=0.05), 'binary_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=100, verbose=0) model.evaluate(X_test, y_test) from sklearn.metrics import accuracy_score, confusion_matrix y_train_pred = model.predict_classes(X_train) y_test_pred = model.predict_classes(X_test) print("The Accuracy score on the Train set is:\t{:0.3f}".format(accuracy_score(y_train, y_train_pred))) print("The Accuracy score on the Test set is:\t{:0.3f}".format(accuracy_score(y_test, y_test_pred))) plot_decision_boundary(model, X, y) ``` ## Multiclass classification ### The Iris dataset ``` df = pd.read_csv('../data/iris.csv') import seaborn as sns sns.pairplot(df, hue="species") df.head() X = df.drop('species', axis=1) X.head() target_names = df['species'].unique() target_names target_dict = {n:i for i, n in enumerate(target_names)} target_dict y= df['species'].map(target_dict) y.head() from tensorflow.keras.utils import to_categorical y_cat = to_categorical(y) y_cat[:10] X_train, X_test, y_train, y_test = train_test_split(X.values, y_cat, test_size=0.2) model = Sequential() model.add(Dense(3, input_shape=(4,), activation='softmax')) model.compile(Adam(learning_rate=0.1), loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, epochs=20, validation_split=0.1) y_pred = model.predict(X_test) y_pred[:5] y_test_class = np.argmax(y_test, axis=1) y_pred_class = np.argmax(y_pred, axis=1) from sklearn.metrics import classification_report print(classification_report(y_test_class, y_pred_class)) confusion_matrix(y_test_class, y_pred_class) ``` ## Exercise 1 The [Pima Indians dataset](https://archive.ics.uci.edu/ml/datasets/diabetes) is a very famous dataset distributed by UCI and originally collected from the National Institute of Diabetes and Digestive and Kidney Diseases. It contains data from clinical exams for women age 21 and above of Pima indian origins. The objective is to predict based on diagnostic measurements whether a patient has diabetes. It has the following features: - Pregnancies: Number of times pregnant - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test - BloodPressure: Diastolic blood pressure (mm Hg) - SkinThickness: Triceps skin fold thickness (mm) - Insulin: 2-Hour serum insulin (mu U/ml) - BMI: Body mass index (weight in kg/(height in m)^2) - DiabetesPedigreeFunction: Diabetes pedigree function - Age: Age (years) The last colum is the outcome, and it is a binary variable. In this first exercise we will explore it through the following steps: 1. Load the ..data/diabetes.csv dataset, use pandas to explore the range of each feature - For each feature draw a histogram. Bonus points if you draw all the histograms in the same figure. - Explore correlations of features with the outcome column. You can do this in several ways, for example using the `sns.pairplot` we used above or drawing a heatmap of the correlations. - Do features need standardization? If so what stardardization technique will you use? MinMax? Standard? - Prepare your final `X` and `y` variables to be used by a ML model. Make sure you define your target variable well. Will you need dummy columns? ## Exercise 2 Build a fully connected NN model that predicts diabetes. Follow these steps: 1. Split your data in a train/test with a test size of 20% and a `random_state = 22` - define a sequential model with at least one inner layer. You will have to make choices for the following things: - what is the size of the input? - how many nodes will you use in each layer? - what is the size of the output? - what activation functions will you use in the inner layers? - what activation function will you use at output? - what loss function will you use? - what optimizer will you use? - fit your model on the training set, using a validation_split of 0.1 - test your trained model on the test data from the train/test split - check the accuracy score, the confusion matrix and the classification report ## Exercise 3 Compare your work with the results presented in [this notebook](https://www.kaggle.com/futurist/d/uciml/pima-indians-diabetes-database/pima-data-visualisation-and-machine-learning). Are your Neural Network results better or worse than the results obtained by traditional Machine Learning techniques? - Try training a Support Vector Machine or a Random Forest model on the exact same train/test split. Is the performance better or worse? - Try restricting your features to only 4 features like in the suggested notebook. How does model performance change? ## Exercise 4 [Tensorflow playground](http://playground.tensorflow.org/) is a web based neural network demo. It is really useful to develop an intuition about what happens when you change architecture, activation function or other parameters. Try playing with it for a few minutes. You don't need do understand the meaning of every knob and button in the page, just get a sense for what happens if you change something. In the next chapter we'll explore these things in more detail.
github_jupyter