code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.0 64-bit # language: python # name: python3 # --- words = 'His e-mail is <EMAIL>' pieces = words.split() parts = pieces[3].split('-') n = parts[1] print(n) # ## Dictionary # A bag of values, eaach with its own label # # ## List # a liena collection of vvalues that stay in order #dictionaries purse = dict() purse['money'] = 12 purse['candy'] = 3 purse['tissues'] = 75 print(purse) purse['candy'] = purse['candy']+2 print(purse) ## COUNT NUMBER OF SPECIFIC WORDS IN A LIST OF WORDS ## counts = { 'quincy' : 1 , 'mrugesh' : 42, 'beau': 100, '0': 10} print(counts.get('kris', 0)) # + ### MAKING HISTOGRAM ### """ Using dictionary you can use it to count the number of words in a list """ ccc = dict() ccc['csev'] = 1 ccc['cwen'] = 1 print(ccc) ccc['cwen'] = ccc['cwen']+1 print(ccc) ''' The proble /w dictionary - there are rules that y have to respect - we can use 'in' operator - to see if a key is in the dictionary ''' ## HISTOGRAM CODE ## count = dict() names = ['csev', 'cwen', 'csev', 'zquian', 'cwen'] for name in names: if name not in counts: count[name] = 1 else: counts[name] = counts[name]+1 print(counts) # + ## dictionaries and loops ## ## count the most common word present into a lecture #inizializing a counts dictionary counts = dict() text = """edge worker on an average is spending more than 60 per cent of the work week engaged in electronic communication and Internet searching, with close to 30 per cent of a worker s time dedicated to reading and answering e-mails. <NAME> in his Wall Street journal bestseller book Deep Work: Rules for Focused Success in a Distracted World has a valid reason to state that the reason knowledge workers are losing their familiarity with deep work is well established: network tools It is quite evident that the rise of social media networks combined with ubiquitous access to them through smart phones and networked office computers has fragmented most knowledge workers attention into slivers There is increasing evidence today of the knowledge workers not been involved in cognitively demanding tasks qualified as deep work but rather in more logical style mundane tasks, which the author refers to as shallow work To substantiate with an example say if we set about trying to brainstorm different approaches to a problem at hand that is deep work. If we just answer a reply all in a department, thats shallow work. The author observes that if our nature of work is primarily shallow in nature and does not warrant intellectual abilities, we increasing lose our capacity to perform cognitively challenging work referred to as deep work by the author. Therefore, the proposition of the book is based on the hypothesis that the ability to perform deep work is becoming increas- ingly rare at exactly the same time when it is becoming increasingly valuable in our economy. As a consequence, the few who cultivate this skill, and then make it the core of their working life, will thrive""" print('Enter a line of text:' ) line = input('') words = line.split() print('Words:', words) print('Counting...') for word in words: count[word] = count.get(word, 0) + 1 print('Counts', counts) # -
2022/Python-FreeCodeCamp/chapter_9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from IPython.core.display import display, HTML display(HTML("<style>.container { width:95% !important; }</style>")) # %pylab inline import os.path from os import path pylab.rcParams['figure.figsize'] = (20.0, 10.0) matplotlib.rcParams.update({'font.size': 25,'legend.fontsize':25})#,'font.family': 'serif'}) pylab.rc('axes', linewidth=1) params = {'text.usetex': True, 'text.latex.preamble': [r'\usepackage{cmbright}', r'\usepackage{amsmath}']} pylab.rcParams.update(params) from matplotlib.font_manager import FontProperties from matplotlib.ticker import AutoMinorLocator import warnings warnings.simplefilter(action = "ignore", category = RuntimeWarning) # + def motion_space_p(fr_l,fd_l,dis_l,ca): len_mc_l = len(fr_l) fr_dis_l = [] fd_dis_l = [] fr_ndis_l = [] fd_ndis_l = [] for i_mc in range(len_mc_l): if max(dis_l[:,i_mc]) >= ca: fr_dis_l.append(fr_l[i_mc]) fd_dis_l.append(fd_l[i_mc]) if max(dis_l[:,i_mc]) < ca: fr_ndis_l.append(fr_l[i_mc]) fd_ndis_l.append(fd_l[i_mc]) fr_dis_l = array(fr_dis_l) fd_dis_l = array(fd_dis_l) fr_ndis_l = array(fr_ndis_l) fd_ndis_l = array(fd_ndis_l) return fr_dis_l,fd_dis_l,fr_ndis_l,fd_ndis_l def trans_fr(fr_l,fu_l,froude,d_fd): n_fdr_l = len(fr_l) fu = [] for i in range(n_fdr_l): if fr_l[i] >= froude - d_fd and fr_l[i] <= froude + d_fd: fu.append(fu_l[i]) fu = asarray(fu) if len(fu)== 0: tt = amax(fu_l) else: tt = amin(fu) return(tt) def make_outputfile(fig,tt='test',res=200): fname_eps = 'Fig_{t1}.eps'.format(t1=tt) fname_png = 'Fig_{t1}.png'.format(t1=tt) fname_pdf = 'Fig_{t1}.pdf'.format(t1=tt) fname_svg = 'Fig_{t1}.svg'.format(t1=tt) fig.savefig(fname_eps,bbox_inches='tight', transparent=True) fig.savefig(fname_png,dpi=res,bbox_inches='tight', transparent=True) fig.savefig(fname_pdf,dpi=res,bbox_inches='tight', transparent=True) fig.savefig(fname_svg,dpi=res,bbox_inches='tight', transparent=True) print("Files '{t1}', '{t3}', '{t2} and {t5}' created, where the latter two have dpi={t4}".format(t1=fname_eps,t2=fname_png,t3=fname_pdf,t5=fname_svg,t4=str(res))) # - from src.helpers import det_runname,read_input_sldl,read_model_data,read_input,round_n help(read_model_data) def exclude_unph_fr(fd_local1, fu_local1): fr_low = 0.5 fr_high = 2.0 dummy = [] for i in range(len(fd_local1)): if fd_local1[i] != 0.0 and fu_local1[i] !=0.0: dummy.append(fu_local1[i]/(sqrt(9.81*fd_local1[i]))) else: dummy.append(0.0) dummy = asarray(dummy) fd_local1[dummy<fr_low] = 'NaN' fu_local1[dummy<fr_low] = 'NaN' fd_local1[dummy>fr_high] = 'NaN' fu_local1[dummy>fr_high] = 'NaN' return fd_local1, fu_local1 # + def find_fr(fd_local, fu_local,fr_number_local): n_df = 0.01 dummy = [] nfd_local = [] nfu_local = [] # print(max(fd_local)) # print(max(fu_local)) for i in range(len(fd_local)): if fd_local[i] != 0.0 and fu_local[i] !=0.0: dummy_val = fu_local[i]/(sqrt(9.81*fd_local[i])) if dummy_val < fr_number_local+n_df and dummy_val > fr_number_local-n_df: nfd_local.append(fd_local[i]) nfu_local.append(fu_local[i]) #print(len(nfd_local),len(nfu_local)) if len(nfd_local) != 0 and len(nfu_local) != 0: output1 = min(nfd_local) output2 = min(nfu_local) else: output1 = 'NaN' output2 = 'NaN' return output1,output2 def mask_zero(c_fu_dis_l,c_fd_dis_l,c_fu_ndis_l,c_fd_ndis_l): c_fu_dis_l[c_fu_dis_l == 0.0] = 'NaN' c_fd_dis_l[c_fd_dis_l == 0.0] = 'NaN' c_fu_ndis_l[c_fu_ndis_l == 0.0] = 'NaN' c_fd_ndis_l[c_fd_ndis_l == 0.0] = 'NaN' print(shape(c_fu_dis_l)) nc_fu_dis_l = empty_like(c_fu_dis_l) nc_fu_dis1_l = empty_like(c_fu_dis_l) nc_fu_dis2_l = empty_like(c_fu_dis_l) nc_fu_ndis_l = empty_like(c_fu_ndis_l) nc_fd_dis_l = empty_like(c_fd_dis_l) nc_fd_ndis_l = empty_like(c_fd_ndis_l) for i in range(len(c_fd_dis_l[:,0])): nc_fd_dis_l[i,:],nc_fu_dis_l[i,:] = exclude_unph_fr(c_fd_dis_l[i,:],c_fu_dis_l[i,:]) nc_fd_ndis_l[i,:],nc_fu_ndis_l[i,:] = exclude_unph_fr(c_fd_ndis_l[i,:],c_fu_ndis_l[i,:]) return nc_fu_dis_l,nc_fd_dis_l,nc_fu_ndis_l,nc_fd_ndis_l def froude_number(fd_local,fr_number_l): fu_local = [] for i in range(len(fd_local)): fu_local.append(fr_number_l*sqrt(9.81*fd_local[i])) fu_local = asarray(fu_local) return fu_local def find_trans(sl_change_l,dl_change_l,c_fd_dis_l,c_fu_dis_l,fr_value_l=1.0): index_array = zeros([len(sl_change_l),len(dl_change_l)],dtype=int) for j in range(len(sl_change_l)): # print(j*len(dl_change_l),(j)*len(dl_change_l)+len(dl_change_l)-1) index_array[j,:] = linspace(j*len(dl_change_l),(j)*len(dl_change_l)+len(dl_change_l)-1,len(dl_change_l),dtype=int) x1_d_l = empty_like(index_array,dtype=float) y1_d_l = empty_like(index_array,dtype=float) # print(shape(c_fd_dis_l)) for i in range(len(sl_change_l)): for j in range(len(dl_change_l)): x1_d_l[i,j],y1_d_l[i,j] = find_fr(c_fd_dis_l[index_array[i,j],:],c_fu_dis_l[index_array[i,j],:],fr_value_l) return x1_d_l,y1_d_l # + def read_data_parameter(inputfile,p_name): with open(inputfile) as f: lines = f.readlines() dummy =[] for line in lines: if line.split()[0] == p_name: dummy.append(float(line.split()[-1])) dummy = float(dummy[-1]) return dummy def read_model_data_sum(path_l): #from boulder.in file boulder_in_name = '{t1}boulder{t2}.in'.format(t1=path_l,t2=path_l[-4:-1]) dens_l = read_data_parameter(boulder_in_name,'min_density:') aaa_l = read_data_parameter(boulder_in_name,'a-axis:') bbb_l = read_data_parameter(boulder_in_name,'b-axis:') ccc_l = read_data_parameter(boulder_in_name,'c-axis:') sl_change_l = loadtxt(path_l+'slope.dat') dl_change_l = loadtxt(path_l+'delta.dat') c_fu_dis_l = loadtxt(path_l+'u_dis.dat') c_fd_dis_l = loadtxt(path_l+'d_dis.dat') c_fu_ndis_l = loadtxt(path_l+'d_dis.dat') c_fd_ndis_l = loadtxt(path_l+'d_dis.dat') # print(path_l+'slope.dat') # sl_change_l = array(sl_change_l) # dl_change_l = array(dl_change_l) # print(nanmax(c_fu_dis_l)) return aaa_l,bbb_l,ccc_l,dens_l,sl_change_l,dl_change_l,c_fu_dis_l,c_fd_dis_l,c_fu_ndis_l,c_fd_ndis_l # + dumm1,dumm1,dumm1,dumm1,sl_change1,dl_change1,c_fu_dis1,dumm1,dumm1,dumm1 = read_model_data_sum('./data/ddd/data_density000/') print(shape(c_fu_dis1)) nx1 = len(sl_change1) nx2 = len(dl_change1) dd,nx3 = shape(c_fu_dis1) nt = 25 a = zeros(nt) b = zeros(nt) c = zeros(nt) dens = zeros(nt) sl_change = zeros([nt,nx1]) dl_change = zeros([nt,nx2]) x1 = zeros([nt,nx1,nx2]) y1 = zeros([nt,nx1,nx2]) c_fu_dis = zeros([nt,nx1*nx2,nx3]) c_fd_dis = zeros([nt,nx1*nx2,nx3]) c_fu_ndis = zeros([nt,nx1*nx2,nx3]) c_fd_ndis = zeros([nt,nx1*nx2,nx3]) nc_fu_dis = zeros([nt,nx1*nx2,nx3]) nc_fd_dis = zeros([nt,nx1*nx2,nx3]) nc_fu_ndis = zeros([nt,nx1*nx2,nx3]) nc_fd_ndis = zeros([nt,nx1*nx2,nx3]) print(shape(x1)) # - for nt_i in range(nt): folder = './data/ddd/data_density{t1:03d}/'.format(t1=nt_i) a[nt_i],b[nt_i],c[nt_i],dens[nt_i],sl_change[nt_i,:],dl_change[nt_i,:],c_fu_dis[nt_i,:,:],\ c_fd_dis[nt_i,:,:],c_fu_ndis[nt_i,:,:],c_fd_ndis[nt_i,:,:] = read_model_data_sum(folder) print(a,b,c,dens,dl_change[nt_i,:]) for nt_i in range(nt): nc_fu_dis[nt_i,:,:],nc_fd_dis[nt_i,:,:],nc_fu_ndis[nt_i,:,:],nc_fd_ndis[nt_i,:,:] = mask_zero(c_fu_dis[nt_i,:,:],c_fd_dis[nt_i,:,:],c_fu_ndis[nt_i,:,:],c_fd_ndis[nt_i,:,:]) print(nanmax(nc_fu_dis[1:,:])) for nt_i in range(nt): x1[nt_i,:,:],y1[nt_i,:,:] = find_trans(sl_change[nt_i,:],dl_change[nt_i,:],nc_fd_dis[nt_i,:,:],nc_fu_dis[nt_i,:,:]) #def calc_average(arr): # + nt=25 c_xp10=[] c_xnul=[] c_xn10=[] for nt_i in range(nt): mass = dens[nt_i] * (c[nt_i] * b[nt_i] * a[nt_i]) aspect_r = a[nt_i]/c[nt_i] c_xn10.append(aspect_r**(0) * (dens[nt_i] * a[nt_i] * c[nt_i] * x1[nt_i,0,:])/mass) c_xnul.append(aspect_r**(0) * (dens[nt_i] * a[nt_i] * c[nt_i] * x1[nt_i,2,:])/mass) c_xp10.append(aspect_r**(0) * (dens[nt_i] * a[nt_i] * c[nt_i] * x1[nt_i,4,:])/mass) c_xp10 = array(c_xp10) c_xnul = array(c_xnul) c_xn10 = array(c_xn10) average_xm10 = [] average_xnul = [] average_xp10 = [] for nx2_i in range(nx2): average_xm10.append(nanmean(c_xn10[:,nx2_i])) average_xnul.append(nanmean(c_xnul[:,nx2_i])) #average_p10.append(nanmean(c_xp10[:,nx2_i])) average_xp10.append(nanmean(c_xp10[:,nx2_i])) average_xm10 = array(average_xm10) average_xnul = array(average_xnul) average_xp10 = array(average_xp10) average_xm10[-1]=average_xm10[-2]+(average_xm10[-2]-average_xm10[-3]) average_xnul[-1]=average_xnul[-2]+(average_xnul[-2]-average_xnul[-3]) average_xp10[-1]=average_xp10[-2]+(average_xp10[-2]-average_xp10[-3]) print(average_xp10) # + ###This is the cell I need to change nt=25 c_yp10=[] c_ynul=[] c_yn10=[] g=9.81 for nt_i in range(nt): mass = dens[nt_i] * (c[nt_i] * b[nt_i] * a[nt_i]) aspect_r = a[nt_i]/c[nt_i] c_yn10.append(aspect_r**(0) * mass*y1[nt_i,0,:]**2 /(g*a[nt_i]**2.0 * c[nt_i]**2.0 * dens[nt_i])) c_ynul.append(aspect_r**(0) * mass*y1[nt_i,2,:]**2/(g*a[nt_i]**2.0 * c[nt_i]**2.0 * dens[nt_i])) c_yp10.append(aspect_r**(0) * mass*y1[nt_i,4,:]**2/(g*a[nt_i]**2.0 * c[nt_i]**2.0 * dens[nt_i])) # KG / M^3 # KG * M^3/KG * S^2/M^2 * 1/M^2 * 1/M^2 # KG M^3 S^2 # KG M^4 # KG S^2 # KG M # mass * FD/ # g *rho )a # KG M^2/S^2 M^3/KG *1/M^2 * 1/M^2 S^2/M # KG M^2 M^3 S^2 # KG S^2 M^2 M^2 M c_yp10 = array(c_yp10) c_ynul = array(c_ynul) c_yn10 = array(c_yn10) average_ym10 = [] average_ynul = [] average_yp10 = [] for nx2_i in range(nx2): average_ym10.append(nanmean(c_yn10[:,nx2_i])) average_ynul.append(nanmean(c_ynul[:,nx2_i])) #average_yp10.append(nanmean(c_yp10[:,nx2_i])) average_yp10.append(nanmean(c_yp10[:,nx2_i])) average_ym10 = array(average_ym10) average_ynul = array(average_ynul) average_yp10 = array(average_yp10) average_ym10[-1]=average_ym10[-2]+(average_ym10[-2]-average_ym10[-3]) average_ynul[-1]=average_ynul[-2]+(average_ynul[-2]-average_ynul[-3]) average_yp10[-1]=average_yp10[-2]+(average_yp10[-2]-average_yp10[-3]) print(average_yp10) # - plot(dl_change[0,:],average_ym10,'r-',lw=3) plot(dl_change[0,:],average_ynul,'b-',lw=3) plot(dl_change[0,:],average_yp10,'g-',lw=3) # + fig, (ax1,ax2,ax3) = subplots(nrows=1,ncols=3,figsize=(20,7.5)) font0 = FontProperties() font = font0.copy() font.set_weight('bold') flow_depth = linspace(0.0,20.0,100) al=0.1 li=3 #print(sl_change0,sl_change1,sl_change2) print("flow depth") ax1.tick_params(which='both', width=3) ax1.tick_params(which='major', length=12) ax2.tick_params(which='both', width=3) ax2.tick_params(which='major', length=12) ax3.tick_params(which='both', width=3) ax3.tick_params(which='major', length=12) index1=-1 index2=0 #print(mass0,mass1,mass2) for nt_i in range(nt): ax1.plot(dl_change[nt_i,:],(x1[nt_i,index1,:]),'r-') for nt_i in range(nt): ax1.plot(dl_change[nt_i,:],(x1[nt_i,index2,:]),'b-') ax1.set_ylim(0,20) #ax1.legend() #ax1.set_ylim(0,10) for nt_i in range(nt): aspect_r = a[nt_i]/c[nt_i] ax2.plot(dl_change[nt_i,:],aspect_r**(-1)*(x1[nt_i,index1,:])/c[nt_i],'r-') for nt_i in range(nt): aspect_r = a[nt_i]/c[nt_i] ax2.plot(dl_change[nt_i,:],aspect_r**(-1)*(x1[nt_i,index2,:])/c[nt_i],'b-') ax2.set_ylim(0,20) for nt_i in range(nt): mass = dens[nt_i] * (c[nt_i] * b[nt_i] * a[nt_i]) aspect_r = a[nt_i]/c[nt_i] ax3.plot(dl_change[nt_i,:],aspect_r**(0) * (dens[nt_i] * a[nt_i] * c[nt_i] * x1[nt_i,index1,:])/mass,'r-',alpha=0.1) print(shape(x1)) print(nx2) for nt_i in range(nt): mass = dens[nt_i] * (c[nt_i] * b[nt_i] * a[nt_i]) aspect_r = a[nt_i]/c[nt_i] # ax3.plot(dl_change[nt_i,:],aspect_r**(0) * (dens[nt_i] * a[nt_i] * c[nt_i] * x1[nt_i,index2,:])/mass,'b-') # ax3.plot(dl_change[nt_i,:],x1[nt_i,:],'b-',alpha=0.1) ax3.plot(dl_change[0,:],average_xm10,'r-',lw=3) ax3.plot(dl_change[0,:],average_xnul,'b-',lw=3) ax3.plot(dl_change[0,:],average_xp10,'g-',lw=3) ax3.set_ylim(0,10) #ax3.legend() #ax3.set_ylim(0,10) # - def make_lines(data_x,data_y): z = polyfit(data_x, data_y, 4) f = poly1d(z) # calculate new x's and y's x_new = np.linspace(data_x[0], data_x[-1], 50) y_new = f(x_new) return x_new,y_new # + # x1,y1 = make_lines(dl_change,x2_d[0,:]) # x2,y2 = make_lines(dl_change,x2_d[-1,:]) # x3,y3 = make_lines(dl_change,x2_d[2,:]) color_ar = [(0.54, 0.81, 0.94),(0.13, 0.67, 0.8)] fig, (ax1,ax2) = subplots(nrows=1,ncols=2,figsize=(20,7.5),constrained_layout=True)#,gridspec_kw = {'wspace':0.3, 'hspace':0}) font0 = FontProperties() font = font0.copy() font.set_weight('bold') flow_depth = linspace(0.0,20.0,100) al=0.1 li=3 ax1.tick_params(which='both', width=1) ax1.tick_params(which='major', length=12) ax2.tick_params(which='both', width=1) ax2.tick_params(which='major', length=12) # for i in range(len(sl_change)): # ax1.scatter(dl_change,x2_d[i,:],label=str(sl_change[i])) #ax3.plot(dl_change[0,:],average_m10,'r-',lw=3) #ax3.plot(dl_change[0,:],average_nul,'b-',lw=3) #ax3.plot(dl_change[0,:],average_p10,'g-',lw=3) nx_1,ny_1 = make_lines(dl_change[0,:],average_xm10) nx_2,ny_2 = make_lines(dl_change[0,:],average_xnul) nx_3,ny_3 = make_lines(dl_change[0,:],average_xp10) ax1.fill_between(nx_1,ny_1,ny_2-0.2, color=color_ar[0],lw=0) ax1.fill_between(nx_1,ny_2-0.2,ny_3, color=color_ar[1],lw=0) ax1.plot(nx_1,ny_1,'k-') ax1.plot(nx_2,ny_2-0.2,'k--') ax1.plot(nx_3,ny_3,'k-') ax1.set_ylim(0,10) ax1.annotate( r'-10$^\mathrm{o}$', xy=(nx_1[20], ny_1[20]), xytext=(nx_1[20]+0.1,ny_1[20]-0.75), ha='center', \ arrowprops={'arrowstyle':'-|>'}) ax1.annotate( r'10$^\mathrm{o}$', xy=(nx_2[30], ny_2[30]), xytext=(nx_2[30]-0.1,ny_2[30]+0.7), ha='center', \ arrowprops={'arrowstyle':'-|>'}) ax1.annotate( r'0$^\mathrm{o}$', xy=(nx_3[25], ny_3[25]-0.2), xytext=(nx_3[25]+0.075,ny_3[25]-0.5), ha='center', \ arrowprops={'arrowstyle':'-|>'}) ax1.set_xlim(0.1,0.95) ax1.set_xlabel(r'$\frac{\mathrm{Roughness}}{\mathrm{c}}$',size=40,fontweight='bold') #ax1.set_xlabel('Roughness [m]',fontweight='bold') ax1.set_ylabel(r"$\frac{\rho \cdot\mathrm{a \cdot c \cdot Flow\,Depth}}{\mathrm{Mass}}$",size=40) #ax1.text(0.12,0.2,'Fr = 1.0 for (a) \& (b)') ax1.text(0.11,9.5,'(a)',size=25) # for i in range(len(sl_change)): # ax2.scatter(dl_change,y2_d[i,:],label=str(sl_change[i])) nx_1,ny_1 = make_lines(dl_change[0,:],average_ym10) nx_2,ny_2 = make_lines(dl_change[0,:],average_ynul) nx_3,ny_3 = make_lines(dl_change[0,:],average_yp10) ax2.fill_between(nx_1,ny_1,ny_2-0.2, color=color_ar[0],lw=0) ax2.fill_between(nx_1,ny_2-0.2,ny_3, color=color_ar[1],lw=0) ax2.plot(nx_3,ny_3,'k-') ax2.plot(nx_2,ny_2-0.2,'k--') ax2.plot(nx_1,ny_1,'k-') #\definecolor{bondiblue}{rgb}{0.37, 0.62, 0.63} #\definecolor{camouflagegreen}{rgb}{0.47, 0.53, 0.42} ax2.annotate('Downhill Dislodgement', xy=(nx_1[23], ny_1[23]+0.45), xytext=(nx_1[23]+0.06,ny_1[23]-.95), ha='center', \ arrowprops={'arrowstyle':'-|>'}) ax2.annotate('Uphill Dislodgement', xy=(nx_2[5], ny_2[5]-0.125), xytext=(nx_2[5]+0.07,ny_2[5]+0.85), ha='center', \ arrowprops={'arrowstyle':'-|>'}) ax2.set_xlim(0.1,0.95) #ax1.legend() ax2.set_ylim(0,10) ax2.set_xlabel(r'$\frac{\mathrm{Roughness}}{\mathrm{c}}$',size=40,fontweight='bold') ax2.set_ylabel(r"$\frac{\mathrm{Mass \cdot Flow\,Speed^2}}{\rho \cdot \mathrm{g \cdot a^2 \cdot c^2}}$",size=40) #mass*y1[nt_i,0,:]**2 /(g*a[nt_i]**2.0 * c[nt_i]**2.0 * dens[nt_i])) #ax2.set_ylabel(r'Flow Speed [m s $^{\mathbf{-1}}$]',fontweight='bold') ax2.text(0.11,9.5,'(b)',size=25) # + #make_outputfile(fig,tt='compv2',res=400) # -
Figures_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from gensim.models import AuthorTopicModel model = AuthorTopicModel.load('model.atmodel') # - model.show_topics(num_topics=100) aut_top = {} for key, value in model.id2author.items(): aut_top[value] = model.get_author_topics(value) aut_top # %%time from sklearn.manifold import TSNE tsne = TSNE(n_components=2, random_state=0) smallest_author = 0 # Ignore authors with documents less than this. authors = [model.author2id[a] for a in model.author2id.keys() if len(model.author2doc[a]) >= smallest_author] _ = tsne.fit_transform(model.state.gamma[authors, :]) # Result stored in tsne.embedding_ # + from bokeh.io import output_file output_file('grafica.html') # + from bokeh.models import HoverTool from bokeh.plotting import figure, show, ColumnDataSource x = tsne.embedding_[:, 0] y = tsne.embedding_[:, 1] author_names = [model.id2author[a] for a in authors] scale = 0.01 author_sizes = [len(model.author2doc[a]) for a in author_names] radii = [size * scale for size in author_sizes] source = ColumnDataSource( data=dict( x=x, y=y, author_names=author_names, author_sizes=author_sizes, radii=radii, ) ) hover = HoverTool( tooltips=[ ("author", "@author_names"), ("size", "@author_sizes"), ] ) p = figure(tools=[hover, 'crosshair,pan,wheel_zoom,box_zoom,reset,save,lasso_select']) p.scatter('x', 'y', radius='radii', source=source, fill_alpha=0.6, line_color=None) show(p) # + from gensim.similarities import MatrixSimilarity index = MatrixSimilarity(model[list(model.id2author.values())]) # + from gensim import matutils import pandas as pd author_vecs = [model.get_author_topics(author) for author in model.id2author.values()] def similarity(vec1, vec2): dist = matutils.hellinger(matutils.sparse2full(vec1, model.num_topics), \ matutils.sparse2full(vec2, model.num_topics)) sim = 1.0 / (1.0 + dist) return sim def get_sims(vec): sims = [similarity(vec, vec2) for vec2 in author_vecs] return sims def get_table(name, top_n=10, smallest_author=1): sims = get_sims(model.get_author_topics(name)) table = [] for elem in enumerate(sims): author_name = model.id2author[elem[0]] sim = elem[1] author_size = len(model.author2doc[author_name]) if author_size >= smallest_author: table.append((author_name, sim, author_size)) df = pd.DataFrame(table, columns=['Author', 'Score', 'Size']) df = df.sort_values('Score', ascending=False)[:top_n] return df # - get_table('Pajaropolitico',top_n=136) import pickle dictionary = pickle.load(open("dictionary.p", "rb")) import pyLDAvis.gensim pyLDAvis.enable_notebook() pyLDAvis.gensim.prepare(model, model.corpus,dictionary)
modelos_viejos/modelo8/modelo8.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from pyspark.sql import SQLContext from pyspark.ml.feature import MinMaxScaler from pyspark.sql import functions as sf from matplotlib import pyplot as plt from pyspark.sql.functions import col,avg import pandas as pd from pyspark.ml.feature import VectorAssembler import numpy as np from pyspark.ml.clustering import KMeans from pyspark.sql import Row import pyspark sc.stop() sc = pyspark.SparkContext(master="spark://172.16.27.208:7077",appName="spark") sc # ## Load Average Profile csv base_path = "/home/test5/Desktop/smart-meters-in-london/" sqlcontext = SQLContext(sc) #avg_power_df = sqlcontext.read.format("com.databricks.spark.csv").load(base_path+"avg.csv/*")#csv(base_path+"avg.csv/",header=True,inferSchema=True) avg_power_df = sqlcontext.read.csv(base_path+"avg.csv",header=True,inferSchema=True).cache() avg_power_df.count() # ## Adding "`" to properly select OW Analysis exception # # ### just to be clear, the reason for this is that the column name has a period in it. Spark is in general a little buggy as far as properly supporting backticks throughout codebase, but in this case they're fine avg_power_df = avg_power_df.drop("_c0") avg_power_df.printSchema() # + old_Feature_cols = [] new_Feature_cols = [] for i in range(1,49): if i<20: old_Feature_cols.append("0"+str(i*0.5)) else: old_Feature_cols.append(str(i*0.5)) if i<10: new_Feature_cols.append("0"+str(i)) else: new_Feature_cols.append(str(i)) for c,n in zip(old_Feature_cols,new_Feature_cols): avg_power_df=avg_power_df.withColumnRenamed(c,n) for i in range(1,len(new_Feature_cols),2): avg_power_df = avg_power_df.withColumn(new_Feature_cols[i],avg_power_df[new_Feature_cols[i-1]]+avg_power_df[new_Feature_cols[i]]) avg_power_df = avg_power_df.drop(new_Feature_cols[i-1]) for i in range(24): avg_power_df = avg_power_df.withColumnRenamed(new_Feature_cols[2*i+1],new_Feature_cols[i]) avg_power_df.printSchema() avg_power_df.take(1) # - # ###### TODO : Scaling Feature using MinMaxSacler or any other can be done avg_house_data = avg_power_df new_Feature_cols = new_Feature_cols[0:24] vecAssembler = VectorAssembler(inputCols=new_Feature_cols, outputCol="features") df_kmeans = vecAssembler.transform(avg_power_df) df_kmeans.take(1) scaler = MinMaxScaler(inputCol="features",outputCol="scaled_features") df_kmeans = scaler.fit(df_kmeans).transform(df_kmeans) # # K-Means # ## Chosing K wsse = np.zeros(11) for k in range(2,11): kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol("scaled_features") model = kmeans.fit(df_kmeans) wsse[k] = model.computeCost(df_kmeans) print(wsse[k]) fig, ax = plt.subplots(1,1, figsize =(8,6)) ax.plot(range(2,11),wsse[2:11]) ax.set_xlabel('Number of cluster(K)') ax.set_ylabel('Within Sum Of Squared Error(WSSE)') ax.set_title("Elbow Method:") fig.savefig(base_path+"/plot/K-means_elbow_k=8") # ### k = 8 from elbow method # ### Train Model # + k = 8 kmeans = KMeans().setK(k).setSeed(1).setFeaturesCol("scaled_features") model = kmeans.fit(df_kmeans) centers = model.clusterCenters() for i in range(len(centers)): centers[i] = centers[i].tolist() centers[i] = ["Cluster "+str(i)] + centers[i] # print("Cluster Centers: ") # for center in centers: # print(center) # type(centers) transformed = model.transform(df_kmeans) print(transformed.printSchema()) transformed = transformed.withColumnRenamed("prediction","cluster_id") # transformed.select("cluster_id").show() transformed.select("LCLid","cluster_id").toPandas().to_csv(base_path+"cluster_info_k_means_k-8.csv", header=True) # - cluster_centroid_rdd = sc.parallelize(centers) cluster_centroid_rdd = cluster_centroid_rdd.flatMap(lambda x: Row(x)) # cluster_centroid_rdd.collect() df_cluster_center=sqlcontext.createDataFrame(cluster_centroid_rdd,new_Feature_cols) len(centers) scaled_feature_list = [list(row) for row in df_kmeans.select("scaled_features").collect()] scaled_feature_list = [[float(x) for x in list(row[0].values)] for row in scaled_feature_list] avg_house_data = avg_house_data.drop("LCLid") scaled_df = sc.parallelize(scaled_feature_list) scaled_df = scaled_df.flatMap(lambda x : Row(x)) scaled_df = sqlcontext.createDataFrame(scaled_df,avg_house_data.schema) scaled_df.printSchema() type(scaled_feature_list[0][0]) # If we don't assign color all plot will be of different color, # by putting ax=ax we are plotting on same graph, # if removed new graph below will be plotted. df_cluster_center = df_cluster_center.withColumnRenamed("01","K-means Cluster") # ax = avg_house_data.toPandas().set_index("LCLid").T.plot(figsize=(13,8), legend=False, color='black',alpha=0.3) ax = scaled_df.toPandas().T.plot(figsize=(13,8), legend=False, color='black',alpha=0.2) plot = df_cluster_center.toPandas().set_index("K-means Cluster").T.plot(ax=ax,alpha=2,style='--') ax.set_xlabel("Hour of the day (H)") ax.set_ylabel("Scaled average load profile (Kw/h)") # ax.set_title("K-menas Clu") plot.get_figure().savefig(base_path+"/plot/cluster-8-means-scaled.png") # # Bisecting K-means from pyspark.ml.clustering import BisectingKMeans wsse = np.zeros(11) for k in range(2,11): bkm = BisectingKMeans().setK(k).setSeed(5).setFeaturesCol("scaled_features") model = bkm.fit(df_kmeans) wsse[k] = model.computeCost(df_kmeans) print(wsse[k]) fig, ax = plt.subplots(1,1, figsize =(8,6)) ax.plot(range(2,11),wsse[2:11]) ax.set_xlabel('Number of cluster(K)') ax.set_ylabel('Within Sum Of Squared Error(WSSE)') ax.set_title("Elbow Method:") fig.savefig(base_path+"/plot/BK-means_elbow_k=8") # ## k = 8 from elbow method k = 8 bkm = BisectingKMeans().setK(8).setSeed(1).setFeaturesCol("scaled_features") bkm_model = bkm.fit(df_kmeans) bkm_cluster_center = bkm_model.clusterCenters() bkm_transformed = bkm_model.transform(df_kmeans) bkm_transformed = bkm_transformed.withColumnRenamed("prediction","cluster_id") # transformed.select("cluster_id").show() bkm_transformed.select("LCLid","cluster_id").toPandas().to_csv(base_path+"cluster_info_b-k_means_k-8.csv", header=True) print(len(bkm_cluster_center)) for i in range(len(bkm_cluster_center)): bkm_cluster_center[i] = bkm_cluster_center[i].tolist() bkm_cluster_center[i] = ["Cluster "+str(i)] + bkm_cluster_center[i] bkm_cluster_centroid_rdd = sc.parallelize(bkm_cluster_center) bkm_cluster_centroid_rdd = bkm_cluster_centroid_rdd.flatMap(lambda x: Row(x)) # cluster_centroid_rdd.collect() bkm_df_cluster_center=sqlcontext.createDataFrame(bkm_cluster_centroid_rdd,new_Feature_cols) bkm_df_cluster_center = bkm_df_cluster_center.withColumnRenamed("01","Bisecting K-means Cluster") # ax = avg_house_data.toPandas().set_index("LCLid").T.plot(figsize=(13,8), legend=False, color='black',alpha=0.3) ax = scaled_df.toPandas().T.plot(figsize=(13,8), legend=False, color='black',alpha=0.2) plot = bkm_df_cluster_center.toPandas().set_index("Bisecting K-means Cluster").T.plot(ax=ax,alpha=2,style='--') ax.set_xlabel("Hour of the day (H)") ax.set_ylabel("Scaled average load profile (Kw/h)") # ax.set_title("K-menas Clu") plot.get_figure().savefig(base_path+"/plot/cluster-8bk-means-scaled.png")
Clustering_load_profile.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import pandas as pd, numpy as np #load list of all circuits and years with races from wikipedia df=pd.read_html('https://en.wikipedia.org/wiki/List_of_World_Rally_Championship_rallies', header=0) df=df[1][['Rally','Headquarters','WRC years','Location']] #geocode circuit names and create list with circuits from pygeocoder import Geocoder circs=[] apikey='<KEY>' for i in df.T.iteritems(): circ={} g=i[1][0] if '[' in g: g=g[:g.find('[')] circ['name']=g g=i[1][2] if '[' in g: g=g[:g.find('[')] circ['races']=g g=i[1][1] if '[' in g: g=g[:g.find('[')] circ['place']=g+', '+i[1][3] if g=='Gap': circ['place']='Gap, France' circ['coord']=Geocoder(apikey).geocode(circ['place']).coordinates circs.append(circ) print circs[-1] calendar={i:[] for i in range(1973,2019)} for i in range(len(circs)): g=circs[i]['races'] if '[' in g: g=g[:g.find('[')] for k in g.replace(u'\u2013', '-').strip().replace(" ", ",").replace(",,", ",").replace(",,", ",").split(","): r=k.find('-') if r==-1: calendar[np.int(k)].append(i) else: for j in range(np.int(k[:r]),np.int(k[r+1:])+1): calendar[j].append(i) #save data import json file('calendar_wrc2018.json','w').write(json.dumps(calendar)) file('circs_wrc2018.json','w').write(json.dumps(circs))
f1 circuits/.ipynb_checkpoints/wrc_2018-checkpoint.ipynb
import pyping import nmap import dominate from dominate.tags import * import paramiko import base64 nm = nmap.PortScanner() allscan = nm.scan('192.168.1.*') allhosts = nm.all_hosts() pinglis = [] nplis = [] for hosts in allhosts: print(hosts) pinglis.append(pyping.ping(hosts)) for pls in pinglis: print pls.output[0] nplis.append(pls.output[0]) nplis nmapdoc = dominate.document(title='nmap scan') # + doc = dominate.document(title='nmap ping') with doc.head: link(rel='stylesheet', href='style.css') script(type='text/javascript', src='script.js') with doc: #with div(id='header').add(ol()): # for i in pinglis: # li(a(i(), href='/%s.html' % i)) with div(): attr(cls='body') p(nplis) print doc # - key = paramiko.RSAKey(data=base64.decodestring('AAA...')) key = paramiko.RSAKey(data=base64.decodestring('AAA...')) client = paramiko.SSHClient() clientkey = client.get_host_keys() clientkey client.connect('192.168.1.6', username='deb', password='<PASSWORD>') # + active="" #
wcping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import cv2 import numpy as np import pandas as pd import string import tensorflow as tf # 设置参数 from params import * # ## Dataset # + def getPath(path): ''' 获取路径 ''' for root, dirs, files in os.walk(trainDir): # 去除隐藏文件 if '.DS_Store' in files: files.remove('.DS_Store') # 添加系统路径 files = pd.Series(files).apply(lambda x: os.path.join(trainDir, x)).values return files def getLabel(filesPath): # 获取标签 labels = np.zeros((len(filesPath), n_len , n_class), dtype= np.uint8) for i in range(len(filesPath)): #获取文件名 name = os.path.splitext(filesPath[i])[0].split('_') num, content = name[0], name[1] # 把标记赋值给张量 for j, ch in enumerate(content): labels[i][j, :] = 0 labels[i][j, characters.find(ch)] = 1 return labels # - def load_and_preprocess_image(filePath): # 读取图片 image = tf.io.read_file(filePath) # 将png格式的图片解码,得到一个张量(一维的矩阵) image = tf.image.decode_png(image, channels=1) # 调整大小 image = tf.image.resize(image, [height, width]) # 对每个像素点的RGB值做归一化处理 image /= 255.0 return image def getDataset(dirPath): # 获取图片 filesPath = getPath(dirPath) # 获取标签 labels = getLabel(filesPath) # 构建图片路径的“dataset” dataset = tf.data.Dataset.from_tensor_slices(filesPath) # 使用AUTOTUNE自动调节管道参数 AUTOTUNE = tf.data.experimental.AUTOTUNE # 处理图片 image_ds = dataset.map(load_and_preprocess_image,num_parallel_calls=AUTOTUNE) # 构建类标数据的“dataset” label_ds = tf.data.Dataset.from_tensor_slices(labels) # 将图片和类标压缩为(图片,类标)对 image_label_ds = tf.data.Dataset.zip((image_ds, label_ds)) # 形成batch image_label_ds = image_label_ds.batch(batch_size) # 让数据集重复多次 image_label_ds = image_label_ds.repeat() # 通过“prefetch”方法让模型的训练和每个batch数据集的加载并行 image_label_ds = image_label_ds.prefetch(buffer_size=AUTOTUNE) return image_label_ds # ## 数据生成器 def gen(path = trainDir, batch_size = 32): ''' 获取训练数据/验证数据 ''' X = np.zeros((batch_size, height, width, channel), dtype= np.float16) y = np.zeros((batch_size, n_len , n_class), dtype= np.uint8) # 遍历目录 for root, dirs, files in os.walk(path): # 去除隐藏文件 if '.DS_Store' in files: files.remove('.DS_Store') # 设置起始指针 pointer = 0 while(True): # 若指针超过文件数量,从头开始 if pointer + batch_size >= len(files): pointer = 0 # 遍历文件名 for i in range(batch_size): file = files[pointer + i] #获取文件名 name = os.path.splitext(file)[0].split('_') num, content = name[0], name[1] #生成读取路径 readPath = os.path.join(path, file) # 读取图片 imgBuffer = cv2.imread(readPath, 0) # 改变图片大小 imgBuffer = cv2.resize(imgBuffer, (width, height)) # 二值化 # t, imgBuffer = cv2.threshold(imgBuffer, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 归一化 minX = imgBuffer.min() imgBuffer = imgBuffer - minX maxX = max(1, imgBuffer.max()) imgBuffer = np.array(imgBuffer / maxX, np.float16) # 改变图片维度,适应模型输入 imgBuffer = np.expand_dims(imgBuffer, axis = 2) # 把图像赋值给张量 X[i] = imgBuffer # 把标记赋值给张量 for j, ch in enumerate(content): y[i][j, :] = 0 y[i][j, characters.find(ch)] = 1 # 指针指向下一个batch pointer += batch_size # 输出 yield X, y def test_gen(path = testDir, batch_size = 1): ''' 获取测试数据 ''' X = np.zeros((batch_size, height, width, channel), dtype= np.float16) # 遍历目录 for root, dirs, files in os.walk(path): # 去除隐藏文件 if '.DS_Store' in files: files.remove('.DS_Store') # 设置起始指针 pointer = 0 while(True): # 若指针超过文件数量,从头开始 if pointer + batch_size >= len(files): pointer = 0 # 遍历文件名 for i in range(batch_size): file = files[pointer + i] #获取文件名 num = os.path.splitext(file)[0] #生成读取路径 readPath = os.path.join(path, file) # 读取图片 imgBuffer = cv2.imread(readPath, 0) # 改变图片大小 imgBuffer = cv2.resize(imgBuffer, (width, height)) # 二值化 # t, imgBuffer = cv2.threshold(imgBuffer, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # 归一化 minX = imgBuffer.min() imgBuffer = imgBuffer - minX maxX = max(1, imgBuffer.max()) # 将对比度拉伸到0-255范围内 imgBuffer = np.array(imgBuffer / maxX, np.float16) # 改变图片维度,适应模型输入 imgBuffer = np.expand_dims(imgBuffer, axis = 2) # 把图像赋值给张量 X[i] = imgBuffer # 指针指向下一个batch pointer += batch_size # 输出 yield X, num
getData.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="eM2WI45DjCVp" # # Assignment 1: Sentiment with Deep Neural Networks # # Welcome to the first assignment of course 3. In this assignment you will explore sentiment analysis using deep neural networks. # ## Outline # - [Part 1: Import libraries and try out Trax](#1) # - [Part 2: Importing the data](#2) # - [2.1 Loading in the data](#2.1) # - [2.2 Building the vocabulary](#2.2) # - [2.3 Converting a tweet to a tensor](#2.3) # - [Exercise 01](#ex01) # - [2.4 Creating a batch generator](#2.4) # - [Exercise 02](#ex02) # - [Part 3: Defining classes](#3) # - [3.1 ReLU class](#3.1) # - [Exercise 03](#ex03) # - [3.2 Dense class ](#3.2) # - [Exercise 04](#ex04) # - [3.3 Model](#3.3) # - [Exercise 05](#ex05) # - [Part 4: Training](#4) # - [4.1 Training the model](#4.1) # - [Exercise 06](#ex06) # - [4.2 Practice Making a prediction](#4.2) # - [Part 5: Evaluation ](#5) # - [5.1 Computing the accuracy on a batch](#5.1) # - [Exercise 07](#ex07) # - [5.2 Testing your model on Validation Data](#5.2) # - [Exercise 08](#ex08) # - [Part 6: Testing with your own input](#6) # # + [markdown] colab_type="text" id="8aeCPdILgrga" # In course 1, you implemented Logistic regression and Naive Bayes for sentiment analysis. However if you were to give your old models an example like: # # <center> <span style='color:blue'> <b>This movie was almost good.</b> </span> </center> # # Your model would have predicted a positive sentiment for that review. However, that sentence has a negative sentiment and indicates that the movie was not good. To solve those kinds of misclassifications, you will write a program that uses deep neural networks to identify sentiment on text. By completing this assignment you will: # # - Understand how you can build/design a model using layers # - Train a model using a training loop # - Use a binary cross entropy loss function # - Compute the accuracy of your model # - Predict using your own input # # As you can tell, this model follows a similar structure to the one you previously implemented in the second course of this specialization. # - Indeed most of the deep nets you will be implementing will have a similar structure. The only thing that changes is the model architecture, the inputs, and the outputs. Before starting the assignment, we will introduce you to the Google library `trax` that we use for building and training models. # # # Now we will show you how to compute the gradient of a certain function `f` by just using ` .grad(f)`. # # - Trax source code can be found on Github: [Trax](https://github.com/google/trax) # - The Trax code also uses the JAX library: [JAX](https://jax.readthedocs.io/en/latest/index.html) # + [markdown] colab_type="text" id="IOK4n9JEjCVs" # <a name="1"></a> # # Part 1: Import libraries and try out Trax # # - Let's import libraries and look at an example of using the Trax library. # + colab={"base_uri": "https://localhost:8080/", "height": 493} colab_type="code" id="WOTfm2P0jCVt" outputId="d4903011-7268-4bea-ae35-ea3fd0b46311" # Automatic gradient with replaced numpy. # #!pip -q install trax==1.3.1 # import relevant libraries import trax # set random seeds to make this notebook easier to replicate trax.supervised.trainer_lib.init_random_number_generators(31) # import trax.fastmath.numpy import trax.fastmath.numpy as np # import trax.layers from trax import layers as tl # import Layer from the utils.py file from utils import Layer, load_tweets, process_tweet import os # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="EyMnUt38jCVw" outputId="6981112a-3bf8-48a9-bf7d-6fc6d64150b1" # Create an array using trax.fastmath.numpy a = np.array(5.0) # View the returned array display(a) print(type(a)) # + [markdown] colab_type="text" id="WtEWAFUIjCVz" # Notice that trax.fastmath.numpy returns a DeviceArray from the jax library. # + colab={} colab_type="code" id="J2RUtDtrjCV0" # Define a function that will use the trax.fastmath.numpy array def f(x): # f = x^2 return (x**2) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="qvUd-xzqjCV4" outputId="cdda13b4-b56b-4d10-e6be-abbfcb523e78" # Call the function print(f"f(a) for a={a} is {f(a)}") # + [markdown] colab_type="text" id="yGKhSeasjCV7" # The gradient (derivative) of function `f` with respect to its input `x` is the derivative of $x^2$. # - The derivative of $x^2$ is $2x$. # - When x is 5, then $2x=10$. # # You can calculate the gradient of a function by using `trax.fastmath.grad(fun=)` and passing in the name of the function. # - In this case the function you want to take the gradient of is `f`. # - The object returned (saved in `grad_f` in this example) is a function that can calculate the gradient of f for a given trax.fastmath.numpy array. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="2Im5Hkc9jCV8" outputId="4c963756-9b0d-46b8-b05d-a8e4de5d0740" # Directly use trax.fastmath.grad to calculate the gradient (derivative) of the function grad_f = trax.fastmath.grad(fun=f) # df / dx - Gradient of function f(x) with respect to x # View the type of the retuned object (it's a function) type(grad_f) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="0lDIVvx3jCV_" outputId="16ad5e34-f634-4d38-ccd8-96786e1d412d" # Call the newly created function and pass in a value for x (the DeviceArray stored in 'a') grad_calculation = grad_f(a) # View the result of calling the grad_f function display(grad_calculation) # + [markdown] colab_type="text" id="41l3WDBkjCWD" # The function returned by trax.fastmath.grad returns takes in x=5 and calculates the gradient of f, which is 2*x, which is 10. The value is also stored as a DeviceArray from the jax library. # + [markdown] colab_type="text" id="CZ8RUynQsktn" # <a name="2"></a> # # Part 2: Importing the data # # <a name="2.1"></a> # ## 2.1 Loading in the data # # Import the data set. # - You may recognize this from earlier assignments in the specialization. # - Details of process_tweet function is available in utils.py file # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="h5ClwIOSuLJh" outputId="dee50364-c476-405f-8cdd-63d067b848d6" ## DO NOT EDIT THIS CELL # Import functions from the utils.py file import numpy as np # Load positive and negative tweets all_positive_tweets, all_negative_tweets = load_tweets() # View the total number of positive and negative tweets. print(f"The number of positive tweets: {len(all_positive_tweets)}") print(f"The number of negative tweets: {len(all_negative_tweets)}") # Split positive set into validation and training val_pos = all_positive_tweets[4000:] # generating validation set for positive tweets train_pos = all_positive_tweets[:4000]# generating training set for positive tweets # Split negative set into validation and training val_neg = all_negative_tweets[4000:] # generating validation set for negative tweets train_neg = all_negative_tweets[:4000] # generating training set for nagative tweets # Combine training data into one set train_x = train_pos + train_neg # Combine validation data into one set val_x = val_pos + val_neg # Set the labels for the training set (1 for positive, 0 for negative) train_y = np.append(np.ones(len(train_pos)), np.zeros(len(train_neg))) # Set the labels for the validation set (1 for positive, 0 for negative) val_y = np.append(np.ones(len(val_pos)), np.zeros(len(val_neg))) print(f"length of train_x {len(train_x)}") print(f"length of val_x {len(val_x)}") # + [markdown] colab_type="text" id="dNQq4LmbjCWG" # Now import a function that processes tweets (we've provided this in the utils.py file). # - `process_tweets' removes unwanted characters e.g. hashtag, hyperlinks, stock tickers from tweet. # - It also returns a list of words (it tokenizes the original string). # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="2bRX6aPDjCWH" outputId="09497362-bf73-4d63-e99f-460027e91eb5" # Import a function that processes the tweets # from utils import process_tweet # Try out function that processes tweets print("original tweet at training position 0") print(train_pos[0]) print("Tweet at training position 0 after processing:") process_tweet(train_pos[0]) # + [markdown] colab_type="text" id="00XdS3LOjCWK" # Notice that the function `process_tweet` keeps key words, removes the hash # symbol, and ignores usernames (words that begin with '@'). It also returns a list of the words. # + [markdown] colab_type="text" id="ac4D5WSUAVub" # <a name="2.2"></a> # ## 2.2 Building the vocabulary # # Now build the vocabulary. # - Map each word in each tweet to an integer (an "index"). # - The following code does this for you, but please read it and understand what it's doing. # - Note that you will build the vocabulary based on the training data. # - To do so, you will assign an index to everyword by iterating over your training set. # # The vocabulary will also include some special tokens # - `__PAD__`: padding # - `</e>`: end of line # - `__UNK__`: a token representing any word that is not in the vocabulary. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="rQaHKs7kAVuc" outputId="1d360f94-902d-471c-d3c3-0d69c27f5eaf" # Build the vocabulary # Unit Test Note - There is no test set here only train/val # Include special tokens # started with pad, end of line and unk tokens Vocab = {'__PAD__': 0, '__</e>__': 1, '__UNK__': 2} # Note that we build vocab using training data for tweet in train_x: processed_tweet = process_tweet(tweet) for word in processed_tweet: if word not in Vocab: Vocab[word] = len(Vocab) print("Total words in vocab are",len(Vocab)) display(Vocab) # + [markdown] colab_type="text" id="gP6K9qcCAVue" # The dictionary `Vocab` will look like this: # ```CPP # {'__PAD__': 0, # '__</e>__': 1, # '__UNK__': 2, # 'followfriday': 3, # 'top': 4, # 'engag': 5, # ... # ``` # # - Each unique word has a unique integer associated with it. # - The total number of words in Vocab: 9088 # + [markdown] colab_type="text" id="0x8pND8tAVuf" # <a name="2.3"></a> # ## 2.3 Converting a tweet to a tensor # # Write a function that will convert each tweet to a tensor (a list of unique integer IDs representing the processed tweet). # - Note, the returned data type will be a **regular Python `list()`** # - You won't use TensorFlow in this function # - You also won't use a numpy array # - You also won't use trax.fastmath.numpy array # - For words in the tweet that are not in the vocabulary, set them to the unique ID for the token `__UNK__`. # # ##### Example # Input a tweet: # ```CPP # '@happypuppy, is Maria happy?' # ``` # # The tweet_to_tensor will first conver the tweet into a list of tokens (including only relevant words) # ```CPP # ['maria', 'happi'] # ``` # # Then it will convert each word into its unique integer # # ```CPP # [2, 56] # ``` # - Notice that the word "maria" is not in the vocabulary, so it is assigned the unique integer associated with the `__UNK__` token, because it is considered "unknown." # # # + [markdown] colab_type="text" id="QtQhtv0kjCWQ" # <a name="ex01"></a> # ### Exercise 01 # **Instructions:** Write a program `tweet_to_tensor` that takes in a tweet and converts it to an array of numbers. You can use the `Vocab` dictionary you just found to help create the tensor. # # - Use the vocab_dict parameter and not a global variable. # - Do not hard code the integer value for the `__UNK__` token. # + [markdown] colab_type="text" id="sKSx1SBYjCWR" # <details> # <summary> # <font size="3" color="darkgreen"><b>Hints</b></font> # </summary> # <p> # <ul> # <li>Map each word in tweet to corresponding token in 'Vocab'</li> # <li>Use Python's Dictionary.get(key,value) so that the function returns a default value if the key is not found in the dictionary.</li> # </ul> # </p> # # + colab={} colab_type="code" id="Ft1zNGMaAVuf" # UNQ_C1 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: tweet_to_tensor def tweet_to_tensor(tweet, vocab_dict, unk_token='__UNK__', verbose=False): ### START CODE HERE (Replace instances of 'None' with your code) ### # Process the tweet into a list of words # where only important words are kept (stop words removed) word_l = None if verbose: print("List of words from the processed tweet:") print(word_l) # Initialize the list that will contain the unique integer IDs of each word tensor_l = [] # Get the unique integer ID of the __UNK__ token unk_ID = None if verbose: print(f"The unique integer ID for the unk_token is {unk_ID}") # for each word in the list: for word in word_l: # Get the unique integer ID. # If the word doesn't exist in the vocab dictionary, # use the unique ID for __UNK__ instead. word_ID = None ### END CODE HERE ### # Append the unique integer ID to the tensor list. tensor_l.append(word_ID) return tensor_l # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="ze0Zx_5UjCWU" outputId="a9427907-b364-4c5f-fed5-bd2502e18bab" print("Actual tweet is\n",val_pos[0]) print("\nTensor of tweet:\n",tweet_to_tensor(val_pos[0], vocab_dict=Vocab)) # + [markdown] colab_type="text" id="s0MbWVeijCWW" # ##### Expected output # # ```CPP # Actual tweet is # Bro:U wan cut hair anot,ur hair long Liao bo # Me:since ord liao,take it easy lor treat as save $ leave it longer :) # Bro:LOL <NAME> # # Tensor of tweet: # [1065, 136, 479, 2351, 745, 8146, 1123, 745, 53, 2, 2672, 791, 2, 2, 349, 601, 2, 3489, 1017, 597, 4559, 9, 1065, 157, 2, 2] # ``` # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="bMmf0pPcjCWX" outputId="7b2a4f49-12df-4dde-fa27-15c55d4c409b" # test tweet_to_tensor def test_tweet_to_tensor(): test_cases = [ { "name":"simple_test_check", "input": [val_pos[1],Vocab], "expected":[444, 2, 304, 567, 56, 9], "error":"The function gives bad output for val_pos[1]. Test failed" }, { "name":"datatype_check", "input":[val_pos[1],Vocab], "expected":type([]), "error":"Datatype mismatch. Need only list not np.array" }, { "name":"without_unk_check", "input":[val_pos[1],Vocab], "expected":6, "error":"Unk word check not done- Please check if you included mapping for unknown word" } ] count = 0 for test_case in test_cases: try: if test_case['name'] == "simple_test_check": assert test_case["expected"] == tweet_to_tensor(*test_case['input']) count += 1 if test_case['name'] == "datatype_check": assert isinstance(tweet_to_tensor(*test_case['input']),test_case["expected"]) count += 1 if test_case['name'] == "without_unk_check": assert None not in tweet_to_tensor(*test_case['input']) count += 1 except: print(test_case['error']) if count == 3: print("\033[92m All tests passed") else: print(count," Tests passed out of 3") test_tweet_to_tensor() # + [markdown] colab_type="text" id="rwAZZIYYAVuj" # <a name="2.4"></a> # ## 2.4 Creating a batch generator # # Most of the time in Natural Language Processing, and AI in general we use batches when training our data sets. # - If instead of training with batches of examples, you were to train a model with one example at a time, it would take a very long time to train the model. # - You will now build a data generator that takes in the positive/negative tweets and returns a batch of training examples. It returns the model inputs, the targets (positive or negative labels) and the weight for each target (ex: this allows us to can treat some examples as more important to get right than others, but commonly this will all be 1.0). # # Once you create the generator, you could include it in a for loop # # ```CPP # for batch_inputs, batch_targets, batch_example_weights in data_generator: # ... # ``` # # You can also get a single batch like this: # # ```CPP # batch_inputs, batch_targets, batch_example_weights = next(data_generator) # ``` # The generator returns the next batch each time it's called. # - This generator returns the data in a format (tensors) that you could directly use in your model. # - It returns a triple: the inputs, targets, and loss weights: # -- Inputs is a tensor that contains the batch of tweets we put into the model. # -- Targets is the corresponding batch of labels that we train to generate. # -- Loss weights here are just 1s with same shape as targets. Next week, you will use it to mask input padding. # + [markdown] colab_type="text" id="sR-sF_o0jCWa" # <a name="ex02"></a> # ### Exercise 02 # Implement `data_generator`. # + colab={} colab_type="code" id="fPd9HNT7AVuk" # UNQ_C2 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED: Data generator def data_generator(data_pos, data_neg, batch_size, loop, vocab_dict): ''' Input: data_pos - Set of posstive examples data_neg - Set of negative examples batch_size - number of samples per batch loop - True or False Yield: inputs - Subset of positive and negative examples targets - The corresponding labels for the subset example_weights - An array specifying the importance of each example ''' ### START GIVEN CODE ### # make sure the batch size is an even number # to allow an equal number of positive and negative samples assert batch_size % 2 == 0 # Number of positive examples in each batch is half of the batch size # same with number of negative examples in each batch n_to_take = batch_size // 2 # Use pos_index to walk through the data_pos array # same with neg_index and data_neg pos_index = 0 neg_index = 0 # Loop indefinitely while True: # If the positive index plus num of positive examples # goes past the positive dataset, if pos_index + n_to_take > len(data_pos): # If user wants to keep re-using the data, reset the index if loop: pos_index = 0 # otherwise exit the loop else: # exit the loop break ### END GIVEN CODE ### ### START CODE HERE (Replace instances of 'None' with your code) ### # If the positive index plus num of negative examples # goes past the negative dataset, if None: # If user wants to keep re-using the data, reset the index if loop: # Reset the negative index neg_index = None # otherwise exit the loop else: # exit the loop None ### END CODE HERE ### ### START GIVEN CODE ### # create a batch with positive examples batch = [] # Start from pos_index and increment i up to n_to_take for i in range(n_to_take): # get the tweet as pos_index + i tweet = data_pos[pos_index + i] # convert the tweet into tensors of integers representing the processed words tensor = tweet_to_tensor(tweet, vocab_dict) # append the tensor to the batch list batch.append(tensor) ### END GIVEN CODE ### ### START CODE HERE (Replace instances of 'None' with your code) ### # Using the same batch list, start from neg_index and increment i up to n_to_take for i in range(None): # get the tweet as pos_index + i tweet = None # convert the tweet into tensors of integers representing the processed words tensor = None # append the tensor to the batch list None ### END CODE HERE ### ### START GIVEN CODE ### # Update the start index for positive data # so that it's n_to_take positions after the current pos_index pos_index += n_to_take # Update the start index for negative data # so that it's n_to_take positions after the current neg_index neg_index += n_to_take # Get the max tweet length (the length of the longest tweet) # (you will pad all shorter tweets to have this length) max_len = max([len(t) for t in batch]) # Initialize the input_l, which will # store the padded versions of the tensors tensor_pad_l = [] # Pad shorter tweets with zeros for tensor in batch: ### END GIVEN CODE ### ### START CODE HERE (Replace instances of 'None' with your code) ### # Get the number of positions to pad for this tensor so that it will be max_len long n_pad = None # Generate a list of zeros, with length n_pad pad_l = None # concatenate the tensor and the list of padded zeros tensor_pad = None # append the padded tensor to the list of padded tensors None # convert the list of padded tensors to a numpy array # and store this as the model inputs inputs = None # Generate the list of targets for the positive examples (a list of ones) # The length is the number of positive examples in the batch target_pos = None # Generate the list of targets for the negative examples (a list of ones) # The length is the number of negative examples in the batch target_neg = None # Concatenate the positve and negative targets target_l = None # Convert the target list into a numpy array targets = None # Example weights: Treat all examples equally importantly. example_weights = None ### END CODE HERE ### ### GIVEN CODE ### # note we use yield and not return yield inputs, targets, example_weights # + [markdown] colab_type="text" id="kI9gEdqpjCWd" # Now you can use your data generator to create a data generator for the training data, and another data generator for the validation data. # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="iIwM4YHtAVum" outputId="d25aa4ae-bad3-41f5-963c-0a091f8fb108" # Create the training data generator def train_generator(batch_size): return data_generator(train_pos, train_neg, batch_size, True, Vocab) # Create the test data generator def val_generator(batch_size): return data_generator(val_pos, val_neg, batch_size, False, Vocab) # Get a batch from the train_generator and inspect. inputs, targets, example_weights = next(train_generator(4)) # this will print a list of 4 tensors padded with zeros print(f'Inputs: {inputs}') print(f'Targets: {targets}') print(f'Example Weights: {example_weights}') # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="mcDOyrx9jCWh" outputId="1adae96d-fb14-4cc6-d39e-7e4d9517c239" # Test the train_generator # Create a data generator for training data, # which produces batches of size 4 (for tensors and their respective targets) tmp_data_gen = train_generator(batch_size = 4) # Call the data generator to get one batch and its targets tmp_inputs, tmp_targets, tmp_example_weights = next(tmp_data_gen) print(f"The inputs shape is {tmp_inputs.shape}") for i,t in enumerate(tmp_inputs): print(f"input tensor: {t}; target {tmp_targets[i]}; example weights {tmp_example_weights[i]}") # + [markdown] colab_type="text" id="cCTl7w1kjCWj" # ##### Expected output # # ```CPP # The inputs shape is (4, 14) # input tensor: [3 4 5 6 7 8 9 0 0 0 0 0 0 0]; target 1; example weights 1 # input tensor: [10 11 12 13 14 15 16 17 18 19 20 9 21 22]; target 1; example weights 1 # input tensor: [5738 2901 3761 0 0 0 0 0 0 0 0 0 0 0]; target 0; example weights 1 # input tensor: [ 858 256 3652 5739 307 4458 567 1230 2767 328 1202 3761 0 0]; target 0; example weights 1 # ``` # + [markdown] colab_type="text" id="J3HrgcJmAVup" # Now that you have your train/val generators, you can just call them and they will return tensors which correspond to your tweets in the first column and their corresponding labels in the second column. Now you can go ahead and start building your neural network. # + [markdown] colab_type="text" id="X591GrH_stXq" # <a name="3"></a> # # Part 3: Defining classes # # In this part, you will write your own library of layers. It will be very similar # to the one used in Trax and also in Keras and PyTorch. Writing your own small # framework will help you understand how they all work and use them effectively # in the future. # # Your framework will be based on the following `Layer` class from utils.py. # # ```CPP # class Layer(object): # """ Base class for layers. # """ # # # Constructor # def __init__(self): # # set weights to None # self.weights = None # # # The forward propagation should be implemented # # by subclasses of this Layer class # def forward(self, x): # raise NotImplementedError # # # This function initializes the weights # # based on the input signature and random key, # # should be implemented by subclasses of this Layer class # def init_weights_and_state(self, input_signature, random_key): # pass # # # This initializes and returns the weights, do not override. # def init(self, input_signature, random_key): # self.init_weights_and_state(input_signature, random_key) # return self.weights # # # __call__ allows an object of this class # # to be called like it's a function. # def __call__(self, x): # # When this layer object is called, # # it calls its forward propagation function # return self.forward(x) # ``` # + [markdown] colab_type="text" id="TcWUXFaPzS-m" # <a name="3.1"></a> # ## 3.1 ReLU class # You will now implement the ReLU activation function in a class below. The ReLU function looks as follows: # <img src = "relu.jpg" style="width:300px;height:150px;"/> # # $$ \mathrm{ReLU}(x) = \mathrm{max}(0,x) $$ # # + [markdown] colab_type="text" id="KVVRVuhjjCWl" # <a name="ex03"></a> # ### Exercise 03 # **Instructions:** Implement the ReLU activation function below. Your function should take in a matrix or vector and it should transform all the negative numbers into 0 while keeping all the positive numbers intact. # + [markdown] colab_type="text" id="dPH_EVcHjCWl" # <details> # <summary> # <font size="3" color="darkgreen"><b>Hints</b></font> # </summary> # <p> # <ul> # <li>Please use numpy.maximum(A,k) to find the maximum between each element in A and a scalar k</li> # </ul> # </p> # # + colab={} colab_type="code" id="VGE5zZ5mzF9x" # UNQ_C3 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: Relu class Relu(Layer): """Relu activation function implementation""" def forward(self, x): ''' Input: - x (a numpy array): the input Output: - activation (numpy array): all positive or 0 version of x ''' ### START CODE HERE (Replace instances of 'None' with your code) ### activation = None ### END CODE HERE ### return activation # + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="hVQ3YtoZ1uYP" outputId="6565529b-abe1-4b01-f5a8-36f8c7d0d903" # Test your relu function x = np.array([[-2.0, -1.0, 0.0], [0.0, 1.0, 2.0]], dtype=float) relu_layer = Relu() print("Test data is:") print(x) print("Output of Relu is:") print(relu_layer(x)) # + [markdown] colab_type="text" id="niL6mIuBAVuu" # ##### Expected Outout # ```CPP # Test data is: # [[-2. -1. 0.] # [ 0. 1. 2.]] # Output of Relu is: # [[0. 0. 0.] # [0. 1. 2.]] # ``` # + [markdown] colab_type="text" id="XepjDxCQ1G8p" # <a name="3.2"></a> # ## 3.2 Dense class # # ### Exercise # # Implement the forward function of the Dense class. # - The forward function multiplies the input to the layer (`x`) by the weight matrix (`W`) # # $$\mathrm{forward}(\mathbf{x},\mathbf{W}) = \mathbf{xW} $$ # # - You can use `numpy.dot` to perform the matrix multiplication. # # Note that for more efficient code execution, you will use the trax version of `math`, which includes a trax version of `numpy` and also `random`. # # Implement the weight initializer `new_weights` function # - Weights are initialized with a random key. # - The second parameter is a tuple for the desired shape of the weights (num_rows, num_cols) # - The num of rows for weights should equal the number of columns in x, because for forward propagation, you will multiply x times weights. # # Please use `trax.fastmath.random.normal(key, shape, dtype=tf.float32)` to generate random values for the weight matrix. The key difference between this function # and the standard `numpy` randomness is the explicit use of random keys, which # need to be passed. While it can look tedious at the first sight to pass the random key everywhere, you will learn in Course 4 why this is very helpful when # implementing some advanced models. # - `key` can be generated by calling `random.get_prng(seed=)` and passing in a number for the `seed`. # - `shape` is a tuple with the desired shape of the weight matrix. # - The number of rows in the weight matrix should equal the number of columns in the variable `x`. Since `x` may have 2 dimensions if it reprsents a single training example (row, col), or three dimensions (batch_size, row, col), get the last dimension from the tuple that holds the dimensions of x. # - The number of columns in the weight matrix is the number of units chosen for that dense layer. Look at the `__init__` function to see which variable stores the number of units. # - `dtype` is the data type of the values in the generated matrix; keep the default of `tf.float32`. In this case, don't explicitly set the dtype (just let it use the default value). # # Set the standard deviation of the random values to 0.1 # - The values generated have a mean of 0 and standard deviation of 1. # - Set the default standard deviation `stdev` to be 0.1 by multiplying the standard deviation to each of the values in the weight matrix. # + colab={} colab_type="code" id="cJqiv5KnjCWr" # use the fastmath module within trax from trax import fastmath # use the numpy module from trax np = fastmath.numpy # use the fastmath.random module from trax random = fastmath.random # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="6reTe6asjCWt" outputId="94480024-f9ec-43d2-f657-f523c9c78504" # See how the fastmath.trax.random.normal function works tmp_key = random.get_prng(seed=1) print("The random seed generated by random.get_prng") display(tmp_key) print("choose a matrix with 2 rows and 3 columns") tmp_shape=(2,3) display(tmp_shape) # Generate a weight matrix # Note that you'll get an error if you try to set dtype to tf.float32, where tf is tensorflow # Just avoid setting the dtype and allow it to use the default data type tmp_weight = trax.fastmath.random.normal(key=tmp_key, shape=tmp_shape) print("Weight matrix generated with a normal distribution with mean 0 and stdev of 1") display(tmp_weight) # + [markdown] colab_type="text" id="IpiJ87L9jCWw" # <a name="ex04"></a> # ### Exercise 04 # # Implement the `Dense` class. # + colab={} colab_type="code" id="783FfWt70660" # UNQ_C4 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: Dense class Dense(Layer): """ A dense (fully-connected) layer. """ # __init__ is implemented for you def __init__(self, n_units, init_stdev=0.1): # Set the number of units in this layer self._n_units = n_units self._init_stdev = 0.1 # Please implement 'forward()' def forward(self, x): ### START CODE HERE (Replace instances of 'None' with your code) ### # Matrix multiply x and the weight matrix dense = None ### END CODE HERE ### return dense # init_weights def init_weights_and_state(self, input_signature, random_key): ### START CODE HERE (Replace instances of 'None' with your code) ### # The input_signature has a .shape attribute that gives the shape as a tuple input_shape = None # Generate the weight matrix from a normal distribution, # and standard deviation of 'stdev' w = None ### END CODE HERE ### self.weights = w return self.weights # + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="vw-z6n8SAVuy" outputId="60b4a7ca-f8b6-4dce-9a35-b72eba48db1f" # Testing your Dense layer dense_layer = Dense(n_units=10) #sets number of units in dense layer random_key = random.get_prng(seed=0) # sets random seed z = np.array([[2.0, 7.0, 25.0]]) # input array dense_layer.init(z, random_key) print("Weights are\n ",dense_layer.weights) #Returns randomly generated weights print("Foward function output is ", dense_layer(z)) # Returns multiplied values of units and weights # - # ##### Expected Outout # ```CPP # Weights are # [[-0.02837108 0.09368162 -0.10050076 0.14165013 0.10543301 0.09108126 # -0.04265672 0.0986188 -0.05575325 0.00153249] # [-0.20785688 0.0554837 0.09142365 0.05744595 0.07227863 0.01210617 # -0.03237354 0.16234995 0.02450038 -0.13809784] # [-0.06111237 0.01403724 0.08410042 -0.1094358 -0.10775021 -0.11396459 # -0.05933381 -0.01557652 -0.03832145 -0.11144515]] # Foward function output is [[-3.0395496 0.9266802 2.5414743 -2.050473 -1.9769388 -2.582209 # -1.7952735 0.94427425 -0.8980402 -3.7497487 ]] # ``` # + [markdown] colab_type="text" id="eZEY8vBCgrgy" # <a name="3.3"></a> # ## 3.3 Model # # Now you will implement a classifier using neural networks. Here is the model architecture you will be implementing. # # <img src = "nn.jpg" style="width:400px;height:250px;"/> # # For the model implementation, you will use the Trax layers library `tl`. # Note that the second character of `tl` is the lowercase of letter `L`, not the number 1. Trax layers are very similar to the ones you implemented above, # but in addition to trainable weights also have a non-trainable state. # State is used in layers like batch normalization and for inference, you will learn more about it in course 4. # # First, look at the code of the Trax Dense layer and compare to your implementation above. # - [tl.Dense](https://github.com/google/trax/blob/master/trax/layers/core.py#L29): Trax Dense layer implementation # # One other important layer that you will use a lot is one that allows to execute one layer after another in sequence. # - [tl.Serial](https://github.com/google/trax/blob/master/trax/layers/combinators.py#L26): Combinator that applies layers serially. # - You can pass in the layers as arguments to `Serial`, separated by commas. # - For example: `tl.Serial(tl.Embeddings(...), tl.Mean(...), tl.Dense(...), tl.LogSoftmax(...))` # # Please use the `help` function to view documentation for each layer. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="RpbiDzN9jCW2" outputId="b3d40cc7-c133-404b-db86-fef5fa664a48" # View documentation on tl.Dense help(tl.Dense) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="Hrblw_uJ4zmF" outputId="729a5a60-3d38-4457-e2ae-c9abefae7eac" # View documentation on tl.Serial help(tl.Serial) # + [markdown] colab_type="text" id="n6PptsvwjCW3" # - [tl.Embedding](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.py#L113): Layer constructor function for an embedding layer. # - `tl.Embedding(vocab_size, d_feature)`. # - `vocab_size` is the number of unique words in the given vocabulary. # - `d_feature` is the number of elements in the word embedding (some choices for a word embedding size range from 150 to 300, for example). # - Recall from the previous course 2, week 4, that the embedding is # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="Y5FAphBWjCW4" outputId="de4678bf-0bcd-456b-988d-66497fb12929" # View documentation for tl.Embedding help(tl.Embedding) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="Bi4OhkZbjCW6" outputId="61a46a9c-ef12-42ec-99e7-1ec888937c9a" tmp_embed = tl.Embedding(vocab_size=3, d_feature=2) display(tmp_embed) # + [markdown] colab_type="text" id="OD0XVH5jjCW8" # - [tl.Mean](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.py#L276): Calculates means across an axis. In this case, please choose axis = 1 to get an average embedding vector (an embedding vector that is an average of all words in the vocabulary). # - For example, if the embedding matrix is 300 elements and vocab size is 10,000 words, taking the mean of the embedding matrix along axis=1 will yield a vector of 300 elements. # + colab={"base_uri": "https://localhost:8080/", "height": 289} colab_type="code" id="CO0uMOOmjCW8" outputId="066c2690-e572-4e0b-8a98-ca6873f8918f" # view the documentation for tl.mean help(tl.Mean) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="eSS-_d38jCW-" outputId="eb19ac1d-7f11-4e5c-e7b4-97544a34f7d4" # Pretend the embedding matrix uses # 2 elements for embedding the meaning of a word # and has a vocabulary size of 3 # So it has shape (2,3) tmp_embed = np.array([[1,2,3,], [4,5,6] ]) # take the mean along axis 0 print("The mean along axis 0 creates a vector whose length equals the vocabulary size") display(np.mean(tmp_embed,axis=0)) print("The mean along axis 1 creates a vector whose length equals the number of elements in a word embedding") display(np.mean(tmp_embed,axis=1)) # + [markdown] colab_type="text" id="08G5yUa_jCXE" # - [tl.LogSoftmax](https://github.com/google/trax/blob/1372b903bb66b0daccee19fd0b1fdf44f659330b/trax/layers/core.py#L242): Implements log softmax function # - Here, you don't need to set any parameters for `LogSoftMax()`. # + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="0UsQjFrAjCXF" outputId="4ce93870-33b6-47a3-97d4-4e01564e3893" help(tl.LogSoftmax) # + [markdown] colab_type="text" id="W8ONXnJsjCXH" # <a name="ex05"></a> # ### Exercise 05 # Implement the classifier function. # + colab={} colab_type="code" id="Wh33Hk8lgrgz" # UNQ_C5 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: classifier def classifier(vocab_size=len(Vocab), embedding_dim=256, output_dim=2, mode='train'): ### START CODE HERE (Replace instances of 'None' with your code) ### # create embedding layer # number of rows is the embedding units # number of columns is the vocabulary size embed_layer = None # Create a mean layer, to create an "average" word embedding mean_layer = None # Create a dense layer, one unit for each output dense_output_layer = None # Create the log softmax layer (no parameters needed) log_softmax_layer = None # Use tl.Serial to combine all layers # and create the classifier # of type trax.layers.combinators.Serial model = None ### END CODE HERE ### # return the model of type return model # + colab={} colab_type="code" id="OwJCu3e9jCXK" tmp_model = classifier() # + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="ZsMzvK8YjCXM" outputId="dbc365af-2a5a-4423-98f1-371d2ee6bba4" print(type(tmp_model)) display(tmp_model) # + [markdown] colab_type="text" id="DV0LEuRVjCXO" # ##### Expected Outout # ```CPP # <class 'trax.layers.combinators.Serial'> # Serial[ # Embedding_9088_256 # Mean # Dense_2 # LogSoftmax # ] # ``` # + [markdown] colab_type="text" id="1FaugA_7grg6" # <a name="4"></a> # # Part 4: Training # # To train a model on a task, Trax defines an abstraction `trax.supervised.training.TrainTask` which packages the train data, loss and optimizer (among other things) together into an object. # # Similarly to evaluate a model, Trax defines an abstraction `trax.supervised.training.EvalTask` which packages the eval data and metrics (among other things) into another object. # # The final piece tying things together is the `trax.supervised.training.Loop` abstraction that is a very simple and flexible way to put everything together and train the model, all the while evaluating it and saving checkpoints. # Using `Loop` will save you a lot of code compared to always writing the training loop by hand, like you did in courses 1 and 2. More importantly, you are less likely to have a bug in that code that would ruin your training. # + colab={"base_uri": "https://localhost:8080/", "height": 799} colab_type="code" id="UGgKw03jjCXP" outputId="014a4326-53ac-4408-878f-0202f6c4828e" # View documentation for trax.supervised.training.TrainTask help(trax.supervised.training.TrainTask) # + colab={"base_uri": "https://localhost:8080/", "height": 850} colab_type="code" id="Tr2MmdWDn6hV" outputId="daec4adb-694d-407e-f1cc-8eb26628ed05" # View documentation for trax.supervised.training.EvalTask help(trax.supervised.training.EvalTask) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="XkUVMzVXn_8f" outputId="b5bdbd12-ec1c-4a4c-99ac-a122e5534434" # View documentation for trax.supervised.training.Loop help(trax.supervised.training.Loop) # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="Ooekq1F305bt" outputId="9f60f810-9d6a-47b6-e977-b6a3c478e9ba" # View optimizers that you could choose from help(trax.optimizers) # + [markdown] colab_type="text" id="OmR3BhV41Cxs" # Notice some available optimizers include: # ```CPP # adafactor # adam # momentum # rms_prop # sm3 # ``` # + [markdown] colab_type="text" id="HA01H6K7grg_" # <a name="4.1"></a> # ## 4.1 Training the model # # Now you are going to train your model. # # Let's define the `TrainTask`, `EvalTask` and `Loop` in preparation to train the model. # + colab={} colab_type="code" id="ogMtJgHSoiZj" from trax.supervised import training batch_size = 16 train_task = training.TrainTask( labeled_data=train_generator(batch_size=batch_size), loss_layer=tl.CrossEntropyLoss(), optimizer=trax.optimizers.Adam(0.01), n_steps_per_checkpoint=10, ) eval_task = training.EvalTask( labeled_data=val_generator(batch_size=batch_size), metrics=[tl.CrossEntropyLoss(), tl.Accuracy()], ) model = classifier() # + [markdown] colab_type="text" id="R_sw8EGd0Sjk" # This defines a model trained using `tl.CrossEntropyLoss` optimized with the `trax.optimizers.Adam` optimizer, all the while tracking the accuracy using `tl.Accuracy` metric. We also track `tl.CrossEntropyLoss` on the validation set. # + [markdown] colab_type="text" id="yB78IIUerIVG" # Now let's make an output directory and train the model. # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="CNx4LnP9rMsO" outputId="359fab84-7b89-4eea-b64e-5c681f6952c1" output_dir = '~/model/' output_dir_expand = os.path.expanduser(output_dir) print(output_dir_expand) # + [markdown] colab_type="text" id="e4R4EHUcrwqe" # <a name="ex06"></a> # ### Exercise 06 # **Instructions:** Implement `train_model` to train the model (`classifier` that you wrote earlier) for the given number of training steps (`n_steps`) using `TrainTask`, `EvalTask` and `Loop`. # + colab={} colab_type="code" id="tolygrj7rpFX" # UNQ_C6 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: train_model def train_model(classifier, train_task, eval_task, n_steps, output_dir): ''' Input: classifier - the model you are building train_task - Training task eval_task - Evaluation task n_steps - the evaluation steps output_dir - folder to save your files Output: trainer - trax trainer ''' ### START CODE HERE (Replace instances of 'None' with your code) ### training_loop = training.Loop( None, None, eval_task = None, output_dir = None) training_loop.run(n_steps = None) ### END CODE HERE ### # Return the training_loop, since it has the model. return training_loop # + colab={"base_uri": "https://localhost:8080/", "height": 578} colab_type="code" id="d-AtiqAYs_rH" outputId="32fd06b6-9d04-4391-fe3a-689a28734b72" training_loop = train_model(classifier, train_task, eval_task, 100, output_dir_expand) # + [markdown] colab_type="text" id="Wxn24gyx1Xpd" # ##### Expected output (Approximately) # ```CPP # Step 1: train CrossEntropyLoss | 0.75331926 # Step 1: eval CrossEntropyLoss | 0.88253963 # Step 1: eval Accuracy | 0.50000000 # Step 10: train CrossEntropyLoss | 0.68949085 # Step 10: eval CrossEntropyLoss | 0.39143169 # Step 10: eval Accuracy | 1.00000000 # Step 20: train CrossEntropyLoss | 0.34751052 # Step 20: eval CrossEntropyLoss | 0.28144282 # Step 20: eval Accuracy | 1.00000000 # Step 30: train CrossEntropyLoss | 0.21527445 # Step 30: eval CrossEntropyLoss | 0.14794244 # Step 30: eval Accuracy | 1.00000000 # Step 40: train CrossEntropyLoss | 0.12926930 # Step 40: eval CrossEntropyLoss | 0.13686025 # Step 40: eval Accuracy | 0.93750000 # Step 50: train CrossEntropyLoss | 0.11106913 # Step 50: eval CrossEntropyLoss | 0.08613179 # Step 50: eval Accuracy | 1.00000000 # Step 60: train CrossEntropyLoss | 0.06994272 # Step 60: eval CrossEntropyLoss | 0.05273105 # Step 60: eval Accuracy | 1.00000000 # Step 70: train CrossEntropyLoss | 0.06942032 # Step 70: eval CrossEntropyLoss | 0.08188842 # Step 70: eval Accuracy | 0.93750000 # Step 80: train CrossEntropyLoss | 0.04251108 # Step 80: eval CrossEntropyLoss | 0.04675784 # Step 80: eval Accuracy | 1.00000000 # Step 90: train CrossEntropyLoss | 0.04134055 # Step 90: eval CrossEntropyLoss | 0.09237872 # Step 90: eval Accuracy | 0.93750000 # Step 100: train CrossEntropyLoss | 0.04980525 # Step 100: eval CrossEntropyLoss | 0.05621190 # Step 100: eval Accuracy | 1.00000000 # ``` # + [markdown] colab_type="text" id="KVMcsw2kjCX9" # <a name="4.2"></a> # ## 4.2 Practice Making a prediction # # Now that you have trained a model, you can access it as `training_loop.model` object. We will actually use `training_loop.eval_model` and in the next weeks you will learn why we sometimes use a different model for evaluation, e.g., one without dropout. For now, make predictions with your model. # # Use the training data just to see how the prediction process works. # - Later, you will use validation data to evaluate your model's performance. # # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="WAMgXWY4jCX-" outputId="7d732b79-6528-49cf-a78a-2f3ee4891681" # Create a generator object tmp_train_generator = train_generator(16) # get one batch tmp_batch = next(tmp_train_generator) # Position 0 has the model inputs (tweets as tensors) # position 1 has the targets (the actual labels) tmp_inputs, tmp_targets, tmp_example_weights = tmp_batch print(f"The batch is a tuple of length {len(tmp_batch)} because position 0 contains the tweets, and position 1 contains the targets.") print(f"The shape of the tweet tensors is {tmp_inputs.shape} (num of examples, length of tweet tensors)") print(f"The shape of the labels is {tmp_targets.shape}, which is the batch size.") print(f"The shape of the example_weights is {tmp_example_weights.shape}, which is the same as inputs/targets size.") # + colab={"base_uri": "https://localhost:8080/", "height": 374} colab_type="code" id="5XoxD6u5jCX_" outputId="d857441c-0977-411f-a8de-2037820d8fa4" # feed the tweet tensors into the model to get a prediction tmp_pred = training_loop.eval_model(tmp_inputs) print(f"The prediction shape is {tmp_pred.shape}, num of tensor_tweets as rows") print("Column 0 is the probability of a negative sentiment (class 0)") print("Column 1 is the probability of a positive sentiment (class 1)") print() print("View the prediction array") tmp_pred # + [markdown] colab_type="text" id="0aJpFcyljCYB" # To turn these probabilities into categories (negative or positive sentiment prediction), for each row: # - Compare the probabilities in each column. # - If column 1 has a value greater than column 0, classify that as a positive tweet. # - Otherwise if column 1 is less than or equal to column 0, classify that example as a negative tweet. # + colab={"base_uri": "https://localhost:8080/", "height": 289} colab_type="code" id="6wJHv0TNjCYC" outputId="0367db61-6e29-44b5-be45-7534600e6931" # turn probabilites into category predictions tmp_is_positive = tmp_pred[:,1] > tmp_pred[:,0] for i, p in enumerate(tmp_is_positive): print(f"Neg log prob {tmp_pred[i,0]:.4f}\tPos log prob {tmp_pred[i,1]:.4f}\t is positive? {p}\t actual {tmp_targets[i]}") # + [markdown] colab_type="text" id="TywSi02cjCYF" # Notice that since you are making a prediction using a training batch, it's more likely that the model's predictions match the actual targets (labels). # - Every prediction that the tweet is positive is also matching the actual target of 1 (positive sentiment). # - Similarly, all predictions that the sentiment is not positive matches the actual target of 0 (negative sentiment) # + [markdown] colab_type="text" id="N6X_0K_EjCYF" # One more useful thing to know is how to compare if the prediction is matching the actual target (label). # - The result of calculation `is_positive` is a boolean. # - The target is a type trax.fastmath.numpy.int32 # - If you expect to be doing division, you may prefer to work with decimal numbers with the data type type trax.fastmath.numpy.int32 # + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="CQgx_ar9jCYG" outputId="16b00f0d-9f1a-4602-f38e-b58ea3ab52a9" # View the array of booleans print("Array of booleans") display(tmp_is_positive) # convert boolean to type int32 # True is converted to 1 # False is converted to 0 tmp_is_positive_int = tmp_is_positive.astype(np.int32) # View the array of integers print("Array of integers") display(tmp_is_positive_int) # convert boolean to type float32 tmp_is_positive_float = tmp_is_positive.astype(np.float32) # View the array of floats print("Array of floats") display(tmp_is_positive_float) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="O_GmTvaTjCYH" outputId="218a3de9-38b4-44b7-bdd4-a7e8306b41d9" tmp_pred.shape # + [markdown] colab_type="text" id="8gJ3n4UljCYJ" # Note that Python usually does type conversion for you when you compare a boolean to an integer # - True compared to 1 is True, otherwise any other integer is False. # - False compared to 0 is True, otherwise any ohter integer is False. # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="GbKFCf0njCYJ" outputId="c18362fd-4202-47b5-cfb6-a6797863fff7" print(f"True == 1: {True == 1}") print(f"True == 2: {True == 2}") print(f"False == 0: {False == 0}") print(f"False == 2: {False == 2}") # + [markdown] colab_type="text" id="Jh5XfviCjCYK" # However, we recommend that you keep track of the data type of your variables to avoid unexpected outcomes. So it helps to convert the booleans into integers # - Compare 1 to 1 rather than comparing True to 1. # + [markdown] colab_type="text" id="37PNk6IzjCYL" # Hopefully you are now familiar with what kinds of inputs and outputs the model uses when making a prediction. # - This will help you implement a function that estimates the accuracy of the model's predictions. # + [markdown] colab_type="text" id="fRRrgOHJgrhI" # <a name="5"></a> # # Part 5: Evaluation # # <a name="5.1"></a> # ## 5.1 Computing the accuracy on a batch # # You will now write a function that evaluates your model on the validation set and returns the accuracy. # - `preds` contains the predictions. # - Its dimensions are `(batch_size, output_dim)`. `output_dim` is two in this case. Column 0 contains the probability that the tweet belongs to class 0 (negative sentiment). Column 1 contains probability that it belongs to class 1 (positive sentiment). # - If the probability in column 1 is greater than the probability in column 0, then interpret this as the model's prediction that the example has label 1 (positive sentiment). # - Otherwise, if the probabilities are equal or the probability in column 0 is higher, the model's prediction is 0 (negative sentiment). # - `y` contains the actual labels. # - `y_weights` contains the weights to give to predictions. # + [markdown] colab_type="text" id="2hdfk3LEjCYL" # <a name="ex07"></a> # ### Exercise 07 # Implement `compute_accuracy`. # + colab={} colab_type="code" id="WBqaN5f9grhJ" # UNQ_C7 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: compute_accuracy def compute_accuracy(preds, y, y_weights): """ Input: preds: a tensor of shape (dim_batch, output_dim) y: a tensor of shape (dim_batch, output_dim) with the true labels y_weights: a n.ndarray with the a weight for each example Output: accuracy: a float between 0-1 weighted_num_correct (np.float32): Sum of the weighted correct predictions sum_weights (np.float32): Sum of the weights """ ### START CODE HERE (Replace instances of 'None' with your code) ### # Create an array of booleans, # True if the probability of positive sentiment is greater than # the probability of negative sentiment # else False is_pos = None # convert the array of booleans into an array of np.int32 is_pos_int = None # compare the array of predictions (as int32) with the target (labels) of type int32 correct = None # Count the sum of the weights. sum_weights = None # convert the array of correct predictions (boolean) into an arrayof np.float32 correct_float = None # Multiply each prediction with its corresponding weight. weighted_correct_float = None # Sum up the weighted correct predictions (of type np.float32), to go in the # denominator. weighted_num_correct = None # Divide the number of weighted correct predictions by the sum of the # weights. accuracy = None ### END CODE HERE ### return accuracy, weighted_num_correct, sum_weights # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="1c7ZOeO0jCYN" outputId="a2a7414d-0168-4c55-b31f-bf263718c330" # test your function tmp_val_generator = val_generator(64) # get one batch tmp_batch = next(tmp_val_generator) # Position 0 has the model inputs (tweets as tensors) # position 1 has the targets (the actual labels) tmp_inputs, tmp_targets, tmp_example_weights = tmp_batch # feed the tweet tensors into the model to get a prediction tmp_pred = training_loop.eval_model(tmp_inputs) tmp_acc, tmp_num_correct, tmp_num_predictions = compute_accuracy(preds=tmp_pred, y=tmp_targets, y_weights=tmp_example_weights) print(f"Model's prediction accuracy on a single training batch is: {100 * tmp_acc}%") print(f"Weighted number of correct predictions {tmp_num_correct}; weighted number of total observations predicted {tmp_num_predictions}") # + [markdown] colab_type="text" id="h2ep7nNejCYP" # ##### Expected output (Approximately) # # ``` # Model's prediction accuracy on a single training batch is: 92.1875% # Weighted number of correct predictions 59.0; weighted number of total observations predicted 64 # ``` # + [markdown] colab_type="text" id="dqle69F1grhM" # <a name="5.2"></a> # ## 5.2 Testing your model on Validation Data # # Now you will write test your model's prediction accuracy on validation data. # # This program will take in a data generator and your model. # - The generator allows you to get batches of data. You can use it with a `for` loop: # # ``` # for batch in iterator: # # do something with that batch # ``` # # `batch` has dimensions `(batch size, 2)`. # - Column 0 corresponds to the tweet as a tensor. # - Column 1 corresponds to its target (actual label, positive or negative sentiment). # - You can feed the tweet into model and it will return the predictions for the batch. # # + [markdown] colab_type="text" id="1zwYl_f9jCYP" # <a name="ex08"></a> # ### Exercise 08 # # **Instructions:** # - Compute the accuracy over all the batches in the validation iterator. # - Make use of `compute_accuracy`, which you recently implemented, and return the overall accuracy. # + colab={} colab_type="code" id="HKoTad4ggrhN" # UNQ_C8 (UNIQUE CELL IDENTIFIER, DO NOT EDIT) # GRADED FUNCTION: test_model def test_model(generator, model): ''' Input: generator: an iterator instance that provides batches of inputs and targets model: a model instance Output: accuracy: float corresponding to the accuracy ''' accuracy = 0. total_num_correct = 0 total_num_pred = 0 ### START CODE HERE (Replace instances of 'None' with your code) ### for batch in generator: # Retrieve the inputs from the batch inputs = None # Retrieve the targets (actual labels) from the batch targets = None # Retrieve the example weight. example_weight =None # Make predictions using the inputs pred = None # Calculate accuracy for the batch by comparing its predictions and targets batch_accuracy, batch_num_correct, batch_num_pred = None # Update the total number of correct predictions # by adding the number of correct predictions from this batch total_num_correct += None # Update the total number of predictions # by adding the number of predictions made for the batch total_num_pred += None # Calculate accuracy over all examples accuracy = None ### END CODE HERE ### return accuracy # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1Rm_k21XgrhQ" outputId="65957ec4-d72b-4363-a5c2-20aa071e9005" # DO NOT EDIT THIS CELL # testing the accuracy of your model: this takes around 20 seconds model = training_loop.eval_model accuracy = test_model(val_generator(16), model) print(f'The accuracy of your model on the validation set is {accuracy:.4f}', ) # + [markdown] colab_type="text" id="esUJRMQPgrhS" # ##### Expected Output (Approximately) # # ```CPP # The accuracy of your model on the validation set is 0.9810 # ``` # + [markdown] colab_type="text" id="Mct4P9QZgrhT" # <a name="6"></a> # # Part 6: Testing with your own input # # Finally you will test with your own input. You will see that deepnets are more powerful than the older methods you have used before. Although you go close to 100% accuracy on the first two assignments, the task was way easier. # + colab={} colab_type="code" id="SUq5cw-xgrhU" # this is used to predict on your own sentnece def predict(sentence): inputs = np.array(tweet_to_tensor(sentence, vocab_dict=Vocab)) # Batch size 1, add dimension for batch, to work with the model inputs = inputs[None, :] # predict with the model preds_probs = model(inputs) # Turn probabilities into categories preds = int(preds_probs[0, 1] > preds_probs[0, 0]) sentiment = "negative" if preds == 1: sentiment = 'positive' return preds, sentiment # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="3RJntC57grhX" outputId="01f92c2d-738f-424b-d2b0-6e1f6ade4fef" # try a positive sentence sentence = "It's such a nice day, think i'll be taking Sid to Ramsgate fish and chips for lunch at Peter's fish factory and then the beach maybe" tmp_pred, tmp_sentiment = predict(sentence) print(f"The sentiment of the sentence \n***\n\"{sentence}\"\n***\nis {tmp_sentiment}.") print() # try a negative sentence sentence = "I hated my day, it was the worst, I'm so sad." tmp_pred, tmp_sentiment = predict(sentence) print(f"The sentiment of the sentence \n***\n\"{sentence}\"\n***\nis {tmp_sentiment}.") # + [markdown] colab_type="text" id="nZmGCheXjCYX" # Notice that the model seems to prefer prediction positive sentiment, even for a sentence that looks negative. # + [markdown] colab_type="text" id="mNg0fAYIgrhd" # ### On Deep Nets # # Deep nets allow you to understand and capture dependencies that you would have not been able to capture with a simple linear regression, or logistic regression. # - It also allows you to better use pre-trained embeddings for classification and tends to generalize better.
sequence_models/1-sentiment_analysis/old/C3_W1_Assignment_Sar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 51} id="FLPEFsdS_ogi" executionInfo={"status": "ok", "timestamp": 1607760842842, "user_tz": -540, "elapsed": 26224, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="d4af09a7-5175-4cab-852e-4e1c28214869" # Edge detection with Canny operator import cv2 #----------Setting----------# #Setting for using google drive from google import colab colab.drive.mount('/content/gdrive') #Directory setting b_dir='gdrive/My Drive/ComputerVisionA/' # Setting working directory #Experimental setting (Parameter setting for canny operator) min_val=100 max_val=150 #Input fie setting t_dir=b_dir+'Images/' data='ISIR' ext='.png' org_name=t_dir+data+ext #Output file setting canny_name=t_dir+data+'_Canny_'+str(min_val)+'_'+str(max_val)+ext #----------Image processing----------# #Image read org=cv2.imread(org_name) if org is None: print('\n*********************************************************************\n') print(org_name+' cannot be read\n') print('*********************************************************************\n') else: #Grayscale image generation gray=cv2.cvtColor(org,cv2.COLOR_BGR2GRAY) #Apply canny operator canny=cv2.Canny(gray,min_val,max_val) #Save image cv2.imwrite(canny_name,canny)
ComputerVisionA/Src/EdgeDetectCannyOperator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import fiona import geopandas as gpd from shapely import geometry, ops import matplotlib.pyplot as plt from matplotlib import gridspec from matplotlib import cm from pysheds.grid import Grid from matplotlib import colors import seaborn as sns import warnings warnings.filterwarnings('ignore') sns.set() sns.set_palette('husl', 8) # %matplotlib inline # - grid = Grid.from_raster('../data/n30w100_dir', data_name='dir') # + dirmap = (64, 128, 1, 2, 4, 8, 16, 32) # Specify pour point x, y = -97.294167, 32.73750 # Delineate the catchment grid.catchment(data='dir', x=x, y=y, dirmap=dirmap, out_name='catch', recursionlimit=15000, xytype='label') # Clip the bounding box to the catchment grid.clip_to('catch', pad=(1,1,1,1)) # Compute flow distance grid.flow_distance(data='catch', x=x, y=y, dirmap=dirmap, out_name='dist', xytype='label') dist = grid.view('dist', nodata=0, dtype=np.float64) # - grid.accumulation(data='catch', dirmap=dirmap) p = grid.polygonize() p = ops.unary_union([geometry.shape(i) for i, j in p]) f = gpd.GeoDataFrame.from_file('/home/mdbartos/Data/GIS/NHDPlus_H_1203/NHDPlus_H_1203_GDB.gdb/', layer='NHDFlowline') f = f[f.geometry.intersects(p)] f.geometry = f.geometry.intersection(p) # + fig, ax = plt.subplots(1, 2, figsize=(12,6)) ax[0].set_aspect('equal') f.plot(ax=ax[0], color='b') xs, ys = p.exterior.xy ax[0].plot(xs, ys, c='k') ax[1].imshow(np.where(grid.mask, grid.view('acc') > 100, np.nan), zorder=1, extent=grid.extent) ax[0].set_title('NHD flowlines') ax[1].set_title('Cells with >100 accumulation') # -
notebooks/find_channels.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Problem set 6: Classification # # ## Description # # Explore different algorithms to classify Old Bailey cases as involving "stealing" or "other." # # ## Imports and setup # + # %matplotlib inline import numpy as np import matplotlib.pyplot as plt import os import seaborn as sns from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer # Our input text file old_bailey_file = os.path.join('..', '..', 'data', 'old_bailey', 'old_bailey.txt') # - # ## Old Bailey records # # We'll work with a set of 3,090 short text documents from the Old Bailey archive, just as we did last week. # Read cases in as a list of strings with open(old_bailey_file, 'r') as f: bailey = [doc for doc in f.read().split('\n\n')] # split on consecutive newlines print("Total documents:", len(bailey)) # Freebie function to assign gold labels to corpus texts def make_labels(corpus, keyword='steal'): ''' Takes a corpus of documents and a keyword string. Assigns labels based on coöccurrence of keyword and ' indicted for ' in first sentence of document. Returns an array of class labels (1=member, 0=nonmember). ''' from nltk import word_tokenize, sent_tokenize import re find_indictment = re.compile(' indicted for ') find_keyword = re.compile(keyword) labels = [] for doc in corpus: first_sentence = sent_tokenize(doc)[0].lower() if find_indictment.search(first_sentence) and find_keyword.search(first_sentence): labels.append(1) else: labels.append(0) return np.array(labels) # Generate gold labels y_bailey = make_labels(bailey, keyword='steal') print("Label vector shape:", y_bailey.shape) print("Stealing cases:", np.sum(y_bailey)) print("Fraction of cases labeled 'stealing':", round(np.sum(y_bailey)/len(y_bailey),3)) # Baseline accuracy and F1 # What if we just guess 1 every time? baseline_accuracy = np.sum(y_bailey)/len(y_bailey) baseline_precision = baseline_accuracy baseline_recall = 1.0 baseline_f1 = 2*baseline_precision*baseline_recall/(baseline_precision+baseline_recall) print("Baseline accuracy:", round(baseline_accuracy, 3)) print("Baseline F1:", round(baseline_f1, 3)) # ## 1. Vectorize (5 points) # # Using the vectorizer defined below, transform the input documents into a TFIDF-weighted document-term matrix. Store your vectorized output in a varaible named `X_bailey` and print the shape of the resulting matrix. # # Note: This is a straight carry-over from the last problem set. # + # Custom preprocessing to remove escaped characters in input def pre_proc(x): ''' Takes a unicode string. Lowercases, strips accents, and removes some escapes. Returns a standardized version of the string. ''' import unicodedata return unicodedata.normalize('NFKD', x.replace("\'", "'").replace("\ in\ form", " inform").lower().strip()) # Set up vectorizer vectorizer = TfidfVectorizer( encoding='utf-8', preprocessor=pre_proc, min_df=2, # Note this max_df=0.8, # This, too binary=False, norm='l2', use_idf=True # And this ) # - # Your code here X_bailey = vectorizer.fit_transform(bailey) # Get the dimensions of the doc-term matrix print("Matrix shape:", X_bailey.shape) # + # Freebie: plot our data coords_bailey = TruncatedSVD(n_components=2).fit_transform(X_bailey) plt.subplots(figsize=(12,8)) sns.scatterplot( x=coords_bailey[:, 0], y=coords_bailey[:, 1], hue=y_bailey, alpha=0.2, linewidth=0 ) plt.title('Old Bailey Theft vs. Other') plt.show() # - # ## Freebies: Classification and cross-validation how-to # # Below is a sample of how to set up classifiers and perform cross-validation. # + # Cross-validate two simple classifiers on our data from sklearn.model_selection import cross_validate from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression # Classifers to test classifiers = { 'kNN': KNeighborsClassifier(), 'Logit':LogisticRegression() } scores = {} # Store cross-validation results in a dictionary for classifier in classifiers: scores[classifier] = cross_validate( # perform cross-validation classifiers[classifier], # classifier object X_bailey, # feature matrix y_bailey, # gold labels cv=10, #number of folds scoring=['accuracy', 'f1', 'f1_macro', 'f1_micro'] # scoring methods ) # - # Examine the performance of our simple classifiers # Freebie function to summarize and display classifier scores def compare_scores(scores_dict): ''' Takes a dictionary of cross_validate scores. Returns a color-coded Pandas dataframe that summarizes those scores. ''' import pandas as pd df = pd.DataFrame(scores_dict).T.applymap(np.mean).style.background_gradient(cmap='RdYlGn') return df # Compare cross-validation scores # Note that colorization of the `time` columns is counterintuitive compare_scores(scores) # ## 2. Add results from three more classifiers (25 points) # # * Set up the three classifiers imported below (decision tree, random forest, and multinomial naïve Bayes). Use **default** parameters only (that is, do not set any classifier parameters yourself). (10 points total) # * Cross-validate the three new classifiers, saving the scoring output to the same `scores` dictionary as above. (10 points) # * Use the `compare_scores` function to display the scores of all five classifiers. (5 points) # # This all takes less than 30 seconds total to run on my laptop. from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import MultinomialNB # ## 3. Compare the unoptimized performance of the five classifiers (10 points) # # How would you summarize the performance of the five classifiers prior to any optimization? Consider classification performance relative to one another and to baseline, as well as computation time. # **Your comparison here** # ## 4. Improve two of the classifiers (40 points) # # See how much you can improve the performance of any two of the five classifiers. You might try: # # * Changing the vectorization parameters. (Hint: it can help to use fewer input features). # * Changing the classifier parameters. # * See the `sklearn` [documentation](https://scikit-learn.org/stable/supervised_learning.html#supervised-learning) for your chosen classifiers for a list of available options. # * Trying a different classifier entirely. # * If you do this, make sure you give careful consideration to the settings of the new classifier, too. **Using default parameters will result in a score of zero.** # # No matter what you try, you must **document your process**. In other words, don't just change settings in a cell and run it over and over. You should iterate over different options, storing your cross-validation scores for each combination of settings. # # Finally, display your results using the `compare_scores` function. # # Points breakdown: 20 points total for each classifier, of which 10 are for iterating over a range of parameter values, 5 are for performing cross validation correctly and displaying summary scores, and 5 are for actually improving performance (by any amount). # + # Your code here # - # ## 5. Examine and discuss classification errors (20 points) # # First, rerun your best-performing classifier over the full corpus and save the resulting labels as `y_best`. (10 points) # # To do this, first set up a classifier with your optimized paramters; then use the classifier's `fit` method to train the model; then use the fitted classifer's `predict` method to compute the labels. In schematic form, this looks like: # # ``` # y_labels = Classifier(options).fit(X, y).predict(X) # ``` # # (Note that training and testing over the same data is poor practice, since it encourages overfitting. We avoided that problem by using cross validation above. We're doing it here only for ease of examination.) # # Then use the `pull_errors` function below to read through some cases that were incorrectly classified by your best-perfoming classifer. Write a paragraph that summarizes any patterns you can identify in the mis-classified cases. Can you explain what may be confusing the classifier? Does your analysis suggest any avenues for improved performance? (10 points) # + # Your code here # - # Function to examine some classification errors def pull_errors(labels, gold_labels=y_bailey, corpus=bailey, n=3): ''' Takes: an array of computed labels an array of gold (correct) labels a list of corpus documents an int of cases to display ''' import pandas as pd labeled_cases = pd.DataFrame( { 'gold':gold_labels, 'computed':labels, 'text':corpus } ) errors = labeled_cases.loc[labeled_cases.gold != labeled_cases.computed] with pd.option_context('display.max_colwidth', None): display(errors.sample(n)) # Pull errors pull_errors(y_best, y_bailey, bailey, n=3) # **Your discussion here**
problem_sets/ps_06/ps_06_classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import sys import os import lxml.etree as ET import matplotlib.pyplot as plt def get_data_from_file_RRT(filePath: str) -> tuple: tree = ET.parse(filePath) root = tree.getroot() time = float(root.find('time').text) # countOfEdges = int(root.find('countofedges').text) path_found = True if root.find('pathfound').text == 'true' else False distance = 0 if path_found: distance = float(root.find('distance').text) return (path_found, time, distance) def get_all_data(folder_RRT: str) -> list: RRT_data = [] RRT_path_not_found = [] for file in os.listdir(folder_RRT): info = get_data_from_file_RRT(os.path.join(folder_RRT, file)) if info[0]: RRT_data.append((info[1], info[2])) else: RRT_path_not_found.append(file) if len(RRT_path_not_found) != 0: print(f'RRT path not found:', *RRT_path_not_found) return RRT_data # + def process(binaryRRT: str, filePath: str) -> str: filePath = os.path.normpath(filePath) r = os.path.split(filePath) def make_unique(path: str, suffix: str): name = 'RRTstar' + suffix result = name index = 1 while True: if not os.path.exists(os.path.join(path, result)): return os.path.join(path, result) result = name + str(index) index += 1 dir_input = make_unique(r[0], 'input') os.mkdir(dir_input) dir_output = make_unique(r[0], 'output') os.mkdir(dir_output) tree = ET.parse(filePath) root = tree.getroot() log = root.find('log') path = log.find('path') algo = root.find('algorithm') search_type = algo.find('searchtype') number_of_iterations = algo.find('numberofiterations') name = r[1].split('.')[0] for index in range(100): path_input_file = os.path.join( dir_input, f'{name}_{index}.xml') path_output_file = os.path.join( dir_output, f'{name}_log_{index}.xml') path.text = path_output_file number_of_iterations.text = str(10000 + index * 1000) search_type.text = 'rrtstar' tree.write(path_input_file, pretty_print=True) os.system(f'{binaryRRT} {path_input_file}') return dir_output # - moscow_0_1024_02 = process('D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\bin\\release\\RRT-RRTstar.exe', 'D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\tests\\analysis\\moscow102402\\Moscow_0_1024_rrt.xml') print(moscow_0_1024_02) london_1_1024_1 = process('D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\bin\\release\\RRT-RRTstar.exe', 'D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\tests\\analysis\\london102411\\London_1_1024_rrt.xml') print(london_1_1024_1) brc201d1 = process('D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\bin\\release\\RRT-RRTstar.exe', 'D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\tests\\analysis\\brc201d1\\brc201d_rrt.xml') print(brc201d1) def draw_data(filePath: str, optimal: float): data = get_all_data(filePath) x = [i for i in range(len(data))] fig, ax = plt.subplots() ax.plot([(10000 + 1000 * i) for i in range(len(data))], [e[1] for e in data]) ax.set(xlabel="Number of iterations", ylabel='Distance', title='Changes in distance with increasing iterations of the RRT* algorithm') ax.grid() ax.axline((0, optimal), (1, optimal)) plt.show() draw_data('D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\tests\\analysis\\moscow102402\\RRTstaroutput', 1085.5454) draw_data('D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\tests\\analysis\\london102411\\RRTstaroutput', 874.00922) draw_data('D:\\Projects\\PathPlanningAlgorithms-RRT-RRTstar-\\tests\\analysis\\brc201d1\\RRTstaroutput', 851.58282)
tests/analysis/src/.ipynb_checkpoints/rrtstartan-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_BiologicalNeuronModels/W2D3_Tutorial4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>[![Kaggle](https://kaggle.com/static/images/open-in-kaggle.svg)](https://kaggle.com/kernels/welcome?src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W2D3_BiologicalNeuronModels/W2D3_Tutorial4.ipynb) # - # # Tutorial 4: Spike-timing dependent plasticity (STDP) # **Week 2, Day 3: Biological Neuron Models** # # **By Neuromatch Academy** # # __Content creators:__ <NAME>, <NAME>, <NAME>, <NAME>, <NAME> # # __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME> # **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs** # # <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p> # --- # # Tutorial Objectives # In this tutorial, we will focus on building a model of a synapse in which its synaptic strength changes as a function of the relative timing (i.e., time difference) between the spikes of the presynaptic and postsynaptic neurons, respectively. This change in the synaptic weight is known as **spike-timing dependent plasticity (STDP)**. # # Our goals for this tutorial are to: # # - build a model of synapse that show STDP # # - study how correlations in input spike trains influence the distribution of synaptic weights # # Towards these goals, we will model the presynaptic input as Poisson type spike trains. The postsynaptic neuron will be modeled as an LIF neuron (see Tutorial 1). # # Throughout this tutorial, we assume that a single postsynaptic neuron is driven by $N$ presynaptic neurons. That is, there are $N$ synapses, and we will study how their weights depend on the statistics or the input spike trains and their timing with respect to the spikes of the postsynaptic neuron. # # --- # # Setup # + cellView="both" # Import libraries import matplotlib.pyplot as plt import numpy as np import time # + cellView="form" # @title Figure Settings import ipywidgets as widgets # interactive display # %config InlineBackend.figure_format='retina' # use NMA plot style plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle") my_layout = widgets.Layout() # + cellView="form" # @title Helper functions def default_pars_STDP(**kwargs): pars = {} # typical neuron parameters pars['V_th'] = -55. # spike threshold [mV] pars['V_reset'] = -75. # reset potential [mV] pars['tau_m'] = 10. # membrane time constant [ms] pars['V_init'] = -65. # initial potential [mV] pars['V_L'] = -75. # leak reversal potential [mV] pars['tref'] = 2. # refractory time (ms) # STDP parameters pars['A_plus'] = 0.008 # magnitude of LTP pars['A_minus'] = pars['A_plus'] * 1.10 # magnitude of LTD pars['tau_stdp'] = 20. # STDP time constant [ms] # simulation parameters pars['T'] = 400. # Total duration of simulation [ms] pars['dt'] = .1 # Simulation time step [ms] # external parameters if any for k in kwargs: pars[k] = kwargs[k] pars['range_t'] = np.arange(0, pars['T'], pars['dt']) # Vector of discretized time points [ms] return pars def Poisson_generator(pars, rate, n, myseed=False): """Generates poisson trains Args: pars : parameter dictionary rate : noise amplitute [Hz] n : number of Poisson trains myseed : random seed. int or boolean Returns: pre_spike_train : spike train matrix, ith row represents whether there is a spike in ith spike train over time (1 if spike, 0 otherwise) """ # Retrieve simulation parameters dt, range_t = pars['dt'], pars['range_t'] Lt = range_t.size # set random seed if myseed: np.random.seed(seed=myseed) else: np.random.seed() # generate uniformly distributed random variables u_rand = np.random.rand(n, Lt) # generate Poisson train poisson_train = 1. * (u_rand < rate * (dt / 1000.)) return poisson_train def my_raster_plot(range_t, spike_train, n): """Generates poisson trains Args: range_t : time sequence spike_train : binary spike trains, with shape (N, Lt) n : number of Poisson trains plot Returns: Raster_plot of the spike train """ # Find the number of all the spike trains N = spike_train.shape[0] # n should be smaller than N: if n > N: print('The number n exceeds the size of spike trains') print('The number n is set to be the size of spike trains') n = N # Raster plot i = 0 while i <= n: if spike_train[i, :].sum() > 0.: t_sp = range_t[spike_train[i, :] > 0.5] # spike times plt.plot(t_sp, i * np.ones(len(t_sp)), 'k|', ms=10, markeredgewidth=2) i += 1 plt.xlim([range_t[0], range_t[-1]]) plt.ylim([-0.5, n + 0.5]) plt.xlabel('Time (ms)') plt.ylabel('Neuron ID') def my_example_P(): spT = pre_spike_train_ex[pre_spike_train_ex.sum(axis=1) > 0., :] plt.figure(figsize=(7, 6)) plt.subplot(211) color_set = ['r', 'b', 'k', 'orange', 'c'] for i in range(spT.shape[0]): t_sp = pars['range_t'][spT[i, :] > 0.5] # spike times plt.plot(t_sp, i*np.ones(len(t_sp)), '|', color=color_set[i], ms=10, markeredgewidth=2) plt.xlabel('Time (ms)') plt.ylabel('Neuron ID') plt.xlim(0, 200) plt.subplot(212) for k in range(5): plt.plot(pars['range_t'], P[k, :], color=color_set[k], lw=1.5) plt.xlabel('Time (s)') plt.ylabel('P(t)') plt.xlim(0, 200) plt.tight_layout() def mySTDP_plot(A_plus, A_minus, tau_stdp, time_diff, dW): plt.figure() plt.plot([-5 * tau_stdp, 5 * tau_stdp], [0, 0], 'k', linestyle=':') plt.plot([0, 0], [-A_minus, A_plus], 'k', linestyle=':') plt.plot(time_diff[time_diff <= 0], dW[time_diff <= 0], 'ro') plt.plot(time_diff[time_diff > 0], dW[time_diff > 0], 'bo') plt.xlabel(r't$_{\mathrm{pre}}$ - t$_{\mathrm{post}}$ (ms)') plt.ylabel(r'$\Delta$W', fontsize=12) plt.title('Biphasic STDP', fontsize=12, fontweight='bold') plt.show() # - # --- # # Section 1: Spike-timing dependent plasticity (STDP) # + cellView="form" # @title Video 1: STDP from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id=id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id="", width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id="luHL-mO5S1w", width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') display(out) # - # ## Model of STDP # # The phenomenology of STDP is generally described as a biphasic exponentially decaying function. That is, the instantaneous change in weights is given by: # # \begin{eqnarray} # & \Delta W &=& A_+ e^{ (t_{pre}-t_{post})/\tau_+} & \text{if} \hspace{5mm} t_{post} > t_{pre}& \\ # & \Delta W &=& -A_- e^{- (t_{pre}-t_{post})/\tau_-} &\text{if} \hspace{5mm} t_{post} < t_{pre}& \\ # \end{eqnarray} # # where $\Delta W$ denotes the change in the synaptic weight, $A_+$ and $A_-$ determine the maxmimum amount of synaptic modification (which occurs when the timing difference between presynaptic and postsynaptic spikes is close to zero), $\tau_+$ and $\tau_-$ determine the ranges of pre-to-postsynaptic interspike intervals over which synaptic strengthening or weakening occurs. Thus, $\Delta W > 0 $ means that postsynaptic neuron spikes after the presynaptic neuron. # # This model captures the phenomena that repeated occurrences of presynaptic spikes within a few milliseconds **before** postsynaptic action potentials lead to long-term potentiation (LTP) of the synapse, whereas repeated occurrences of presynaptic spikes **after** the postsynaptic ones lead to long-term depression (LTD) of the same synapse. # # The latency between presynaptic and postsynaptic spike ($\Delta t$) is defined as: # # \begin{eqnarray} # \Delta t = t_{\rm pre} - t_{\rm post} # \end{eqnarray} # # where $t_{\rm pre}$ and $t_{\rm post}$ are the timings of the presynaptic and postsynaptic spikes, respectively. # # Complete the following code to set the STDP parameters and plot the STDP function. Note that for simplicity, we assume **$\tau_{+} = \tau_{-} = \tau_{\rm stdp}$**. # ## Exercise 1: Compute the STDP changes $\Delta W$ # # Note, as shown above, the expression of $\Delta W$ is different for $t_{post}>t_{pre}$ and $t_{post}<t_{pre}$. In the code, we use the parameter `time_diff` that describes the $t_{pre}-t_{post}$, as given above. # # After implementing the code, you can visualize the STDP kernel, which describes how much the synaptic weight will change given a latency between the presynaptic and postsynaptic spikes. # + def Delta_W(pars, A_plus, A_minus, tau_stdp): """ Plot STDP biphasic exponential decaying function Args: pars : parameter dictionary A_plus : (float) maxmimum amount of synaptic modification which occurs when the timing difference between pre- and post-synaptic spikes is positive A_plus : (float) maxmimum amount of synaptic modification which occurs when the timing difference between pre- and post-synaptic spikes is negative tau_stdp : the ranges of pre-to-postsynaptic interspike intervals over which synaptic strengthening or weakening occurs Returns: dW : instantaneous change in weights """ ####################################################################### ## TODO for students: compute dP, then remove the NotImplementedError # # Fill out when you finish the function raise NotImplementedError("Student excercise: compute dW, the change in weights!") ####################################################################### # STDP change dW = np.zeros(len(time_diff)) # Calculate dW for LTP dW[time_diff <= 0] = ... # Calculate dW for LTD dW[time_diff > 0] = ... return dW pars = default_pars_STDP() # Get parameters A_plus, A_minus, tau_stdp = pars['A_plus'], pars['A_minus'], pars['tau_stdp'] # pre_spike time - post_spike time time_diff = np.linspace(-5 * tau_stdp, 5 * tau_stdp, 50) # Uncomment to test your function # dW = Delta_W(pars, A_plus, A_minus, tau_stdp) # mySTDP_plot(A_plus, A_minus, tau_stdp, time_diff, dW) # + # to_remove solution def Delta_W(pars, A_plus, A_minus, tau_stdp): """ Plot STDP biphasic exponential decaying function Args: pars : parameter dictionary A_plus : (float) maxmimum amount of synaptic modification which occurs when the timing difference between pre- and post-synaptic spikes is positive A_plus : (float) maxmimum amount of synaptic modification which occurs when the timing difference between pre- and post-synaptic spikes is negative tau_stdp : the ranges of pre-to-postsynaptic interspike intervals over which synaptic strengthening or weakening occurs Returns: dW : instantaneous change in weights """ # STDP change dW = np.zeros(len(time_diff)) # Calculate dW for LTP dW[time_diff <= 0] = A_plus * np.exp(time_diff[time_diff <= 0] / tau_stdp) # Calculate dW for LTD dW[time_diff > 0] = -A_minus * np.exp(-time_diff[time_diff > 0] / tau_stdp) return dW pars = default_pars_STDP() # Get parameters A_plus, A_minus, tau_stdp = pars['A_plus'], pars['A_minus'], pars['tau_stdp'] # pre_spike time - post_spike time time_diff = np.linspace(-5 * tau_stdp, 5 * tau_stdp, 50) # Uncomment to test your function dW = Delta_W(pars, A_plus, A_minus, tau_stdp) with plt.xkcd(): mySTDP_plot(A_plus, A_minus, tau_stdp, time_diff, dW) # - # ## Keeping track of pre- and postsynaptic spikes # Since a neuron will receive numerous presynaptic spike inputs, in order to implement STDP by taking into account different synapses, we first have to keep track of the pre- and postsynaptic spike times throughout the simulation. # # A convenient way to do this is to define the following equation for each postsynaptic neuron: # # \begin{eqnarray} # \tau_{-} \frac{dM}{dt} = -M # \end{eqnarray} # # and whenever the postsynaptic neuron spikes, # # \begin{eqnarray} # M(t) = M(t) - A_{-} # \end{eqnarray} # # This way $M(t)$ tracks the number of postsynaptic spikes over the timescale $\tau_{-}$. # # Similarly, for each presynaptic neuron, we define: # # \begin{eqnarray} # \tau_{+} \frac{dP}{dt} = -P # \end{eqnarray} # # and whenever there is spike on the presynaptic neuron, # # \begin{eqnarray} # P(t) = P(t) + A_{+} # \end{eqnarray} # # The variables $M(t)$ and $P(t)$ are very similar to the equations for the synaptic conductances, i.e., $g_{i}(t)$, except that they are used to keep track of pre- and postsynaptic spike times on a much longer timescale. Note that, $M(t)$ is always negative, and $P(t)$ is always positive. You can probably already guess that $M$ is used to induce LTD and $P$ to induce LTP because they are updated by $A_{-}$ and $A_{+}$, respectively. # # **Important note:** $P(t)$ depends on the presynaptic spike times. If we know the presynaptic spike times, $P$ can be generated before simulating the postsynaptic neuron and the corresponding STDP weights. # ## Visualization of $P$ # Here, we will consider a scenario in which there is a single postsynaptic neuron connected to $N$ presynaptic neurons. # # For instance, we have one postsynaptic neuron which receives Poisson type spiking inputs from five presynaptic neurons. # # We can simulate $P$ for each one of the presynaptic neurons. # ## Exercise 2: Compute $dP$ # # Here, yet again, we use the Euler scheme, which has been introduced several times in the previous tutorials. # # Similar to the dynamics of the membrane potential in the LIF model, in a time step $dt$, $P(t)$ will decrease by an amount of $\displaystyle{\frac{dt}{\tau_+}P(t)}$. Whereas, if a presynaptic spike arrives, $P(t)$ will instantaneously increase by an amount of $A_+$. Therefore, # # \\ # # \begin{eqnarray} # dP = -\displaystyle{\frac{dt}{\tau_+}P[t]} + A_+\cdot \text{sp_or_not}[t+dt] # \end{eqnarray} # # \\ # # where the $\text{sp_or_not}$ is a binary variable, i.e., $\text{sp_or_not}=1$ if there is a spike within $dt$, and $\text{sp_or_not}=0$ otherwise. # + def generate_P(pars, pre_spike_train_ex): """ track of pre-synaptic spikes Args: pars : parameter dictionary pre_spike_train_ex : binary spike train input from presynaptic excitatory neuron Returns: P : LTP ratio """ # Get parameters A_plus, tau_stdp = pars['A_plus'], pars['tau_stdp'] dt, range_t = pars['dt'], pars['range_t'] Lt = range_t.size # Initialize P = np.zeros(pre_spike_train_ex.shape) for it in range(Lt - 1): ####################################################################### ## TODO for students: compute dP, then remove the NotImplementedError # # Fill out when you finish the function raise NotImplementedError("Student excercise: compute P, the change of presynaptic spike") ####################################################################### # Calculate the delta increment dP dP = ... # Update P P[:, it + 1] = P[:, it] + dP return P # Uncomment these lines to test your function # pars = default_pars_STDP(T=200., dt=1.) # pre_spike_train_ex = Poisson_generator(pars, rate=10, n=5, myseed=2020) # P = generate_P(pars, pre_spike_train_ex) # my_example_P() # + # to_remove solution def generate_P(pars, pre_spike_train_ex): """ track of pre-synaptic spikes Args: pars : parameter dictionary pre_spike_train_ex : binary spike train input from presynaptic excitatory neuron Returns: P : LTP ratio """ # Get parameters A_plus, tau_stdp = pars['A_plus'], pars['tau_stdp'] dt, range_t = pars['dt'], pars['range_t'] Lt = range_t.size # Initialize P = np.zeros(pre_spike_train_ex.shape) for it in range(Lt - 1): # Calculate the delta increment dP dP = -(dt / tau_stdp) * P[:, it] + A_plus * pre_spike_train_ex[:, it + 1] # Update P P[:, it + 1] = P[:, it] + dP return P pars = default_pars_STDP(T=200., dt=1.) pre_spike_train_ex = Poisson_generator(pars, rate=10, n=5, myseed=2020) P = generate_P(pars, pre_spike_train_ex) with plt.xkcd(): my_example_P() # - # --- # # Section 2: Implementation of STDP # # Finally, to implement STDP in spiking networks, we will change the value of the peak synaptic conductance based on the presynaptic and postsynaptic timing, thus using the variables $P(t)$ and $M(t)$. # # Each synapse $i$ has its own peak synaptic conductance ($\bar g_i$), which may vary between $[0, \bar g_{max}]$, and will be modified depending on the presynaptic and postsynaptic timing. # # * When the $i_{th}$ presynaptic neuron elicits a spike, its corresponding peak conductance is updated according to the following equation: # # \\ # # \begin{eqnarray} # \bar g_i = \bar g_i + M(t)\bar g_{max} # \end{eqnarray} # # \\ # # Note that, $M(t)$ tracks the time since the last postsynaptic potential and is always negative. Hence, if *the postsynaptic neuron spikes shortly before the presynaptic neuron*, the above equation shows that the peak conductance will decrease. # # * When the postsynaptic neuron spikes, the peak conductance of **each** synapse is updated according to: # # \\ # # \begin{eqnarray} # \bar g_i = \bar g_i + P_i(t)\bar g_{max}, \forall i # \end{eqnarray} # # \\ # # Note that, $P_i(t)$ tracks the time since the last spike of $i_{th}$ pre-synaptic neuron and is always positive. # # Thus, the equation given above shows that if the presynaptic neuron spikes before the postsynaptic neuron, its peak conductance will increase. # # ### LIF neuron connected with synapses that show STDP # In the following exercise, we connect $N$ presynaptic neurons to a single postsynaptic neuron. We do not need to simulate the dynamics of each presynaptic neuron as we are only concerned about their spike times. So, we will generate $N$ Poisson type spikes. Here, we will assume that all these inputs are excitatory. # # We need to simulate the dynamics of the postsynaptic neuron as we do not know its spike times. We model the postsynaptic neuron as an LIF neuron receiving only excitatory inputs. # # \\ # # \begin{eqnarray} # \tau_m\frac{dV}{dt} = -(V-E_L) - g_E(t) (V(t)-E_E)\, # \end{eqnarray} # # \\ # # where the total excitatory synaptic conductance $g_{E}(t)$ is given by: # # \\ # # \begin{eqnarray} # g_E(t) = \sum_{i=1}^{N} g_i(t) \, # \end{eqnarray} # # \\ # # While simulating STDP, it is important to make sure that $\bar g_i$ never goes outside of its bounds. # # + cellView="form" # @title Function for LIF neuron with STDP synapses def run_LIF_cond_STDP(pars, pre_spike_train_ex): """ conductance-based LIF dynamics Args: pars : parameter dictionary pre_spike_train_ex : spike train input from presynaptic excitatory neuron Returns: rec_spikes : spike times rec_v : mebrane potential gE : postsynaptic excitatory conductance """ # Retrieve parameters V_th, V_reset = pars['V_th'], pars['V_reset'] tau_m = pars['tau_m'] V_init, V_L = pars['V_init'], pars['V_L'] gE_bar, VE, tau_syn_E = pars['gE_bar'], pars['VE'], pars['tau_syn_E'] gE_init = pars['gE_init'] tref = pars['tref'] A_minus, tau_stdp = pars['A_minus'], pars['tau_stdp'] dt, range_t = pars['dt'], pars['range_t'] Lt = range_t.size P = generate_P(pars, pre_spike_train_ex) # Initialize tr = 0. v = np.zeros(Lt) v[0] = V_init M = np.zeros(Lt) gE = np.zeros(Lt) gE_bar_update = np.zeros(pre_spike_train_ex.shape) gE_bar_update[:, 0] = gE_init # note: gE_bar is the maximum value # simulation rec_spikes = [] # recording spike times for it in range(Lt - 1): if tr > 0: v[it] = V_reset tr = tr - 1 elif v[it] >= V_th: # reset voltage and record spike event rec_spikes.append(it) v[it] = V_reset M[it] = M[it] - A_minus gE_bar_update[:, it] = gE_bar_update[:, it] + P[:, it] * gE_bar id_temp = gE_bar_update[:, it] > gE_bar gE_bar_update[id_temp, it] = gE_bar tr = tref / dt # update the synaptic conductance M[it + 1] = M[it] - dt / tau_stdp * M[it] gE[it + 1] = gE[it] - (dt / tau_syn_E) * gE[it] + (gE_bar_update[:, it] * pre_spike_train_ex[:, it]).sum() gE_bar_update[:, it + 1] = gE_bar_update[:, it] + M[it]*pre_spike_train_ex[:, it]*gE_bar id_temp = gE_bar_update[:, it + 1] < 0 gE_bar_update[id_temp, it + 1] = 0. # calculate the increment of the membrane potential dv = (-(v[it] - V_L) - gE[it + 1] * (v[it] - VE)) * (dt / tau_m) # update membrane potential v[it + 1] = v[it] + dv rec_spikes = np.array(rec_spikes) * dt return v, rec_spikes, gE, P, M, gE_bar_update # - # ## Evolution of excitatory synaptic conductance # In the following exercise, we will simulate an LIF neuron receiving input from $N=300$ presynaptic neurons. # + pars = default_pars_STDP(T=200., dt=1.) # Simulation duration 200 ms pars['gE_bar'] = 0.024 # max synaptic conductance pars['gE_init'] = 0.024 # initial synaptic conductance pars['VE'] = 0. # [mV] Synapse reversal potential pars['tau_syn_E'] = 5. # [ms] EPSP time constant # generate Poisson type spike trains pre_spike_train_ex = Poisson_generator(pars, rate=10, n=300, myseed=2020) # simulate the LIF neuron and record the synaptic conductance v, rec_spikes, gE, P, M, gE_bar_update = run_LIF_cond_STDP(pars, pre_spike_train_ex) # + cellView="form" # @title Figures of the evolution of synaptic conductance # @markdown Run this cell to see the figures! plt.figure(figsize=(12., 8)) plt.subplot(321) dt, range_t = pars['dt'], pars['range_t'] if rec_spikes.size: sp_num = (rec_spikes / dt).astype(int) - 1 v[sp_num] += 10 # add artificial spikes plt.plot(pars['range_t'], v, 'k') plt.xlabel('Time (ms)') plt.ylabel('V (mV)') plt.subplot(322) # Plot the sample presynaptic spike trains my_raster_plot(pars['range_t'], pre_spike_train_ex, 10) plt.subplot(323) plt.plot(pars['range_t'], M, 'k') plt.xlabel('Time (ms)') plt.ylabel('M') plt.subplot(324) for i in range(10): plt.plot(pars['range_t'], P[i, :]) plt.xlabel('Time (ms)') plt.ylabel('P') plt.subplot(325) for i in range(10): plt.plot(pars['range_t'], gE_bar_update[i, :]) plt.xlabel('Time (ms)') plt.ylabel(r'$\bar g$') plt.subplot(326) plt.plot(pars['range_t'], gE, 'r') plt.xlabel('Time (ms)') plt.ylabel(r'$g_E$') plt.tight_layout() plt.show() # - # ## Think! # * In the above, even though all the presynaptic neurons have the same average firing rate, many of the synapses seem to have been weakened? Did you expect that? # # * Total synaptic conductance is fluctuating over time. How do you expect $g_E$ to fluctuate if synapses did not show any STDP like behavior? # # * Do synaptic weight ever reach a stationary state when synapses show STDP? # + # to_remove explanation """ Discussion: 1. Yes. Because a. area under the STDP curve is negative b. synapses are set to their maximum possible weight. c. presynaptic neurons have a variance in their firing rates because of Poisson nature 2. g_E will fluctuate even when synapses do not show any STDP because of the Poisson nature but with STDP, there will be bigger fluctuations as synaptic weight will vary on spike by spike basis. 3. Individual synaptic weight will always fluctuate but the synaptic weight distribution will reach a steady state. """; # - # ## Distribution of synaptic weight # From the example given above, we get an idea that some synapses depotentiate, but what is the distribution of the synaptic weights when synapses show STDP? # # In fact, it is possible that even the synaptic weight distribution itself is a time-varying quantity. So, we would like to know how the distribution of synaptic weights evolves as a function of time. # # To get a better estimate of the weight distribution and its time evolution, we will increase the presynaptic firing rate to $15$Hz and simulate the postsynaptic neuron for $120$s. # + cellView="form" # @title Functions for simulating a LIF neuron with STDP synapses def example_LIF_STDP(inputrate=15., Tsim=120000.): """ Simulation of a LIF model with STDP synapses Args: intputrate : The rate used for generate presynaptic spike trains Tsim : Total simulation time output: Interactive demo, Visualization of synaptic weights """ pars = default_pars_STDP(T=Tsim, dt=1.) pars['gE_bar'] = 0.024 pars['gE_init'] = 0.014 # initial synaptic conductance pars['VE'] = 0. # [mV] pars['tau_syn_E'] = 5. # [ms] starttime = time.perf_counter() pre_spike_train_ex = Poisson_generator(pars, rate=inputrate, n=300, myseed=2020) # generate Poisson trains v, rec_spikes, gE, P, M, gE_bar_update = run_LIF_cond_STDP(pars, pre_spike_train_ex) # simulate LIF neuron with STDP gbar_norm = gE_bar_update/pars['gE_bar'] # calculate the ratio of the synaptic conductance endtime = time.perf_counter() timecost = (endtime - starttime) / 60. print('Total simulation time is %.3f min' % timecost) my_layout.width = '620px' @widgets.interact( sample_time=widgets.FloatSlider(0.5, min=0., max=1., step=0.1, layout=my_layout) ) def my_visual_STDP_distribution(sample_time=0.0): sample_time = int(sample_time * pars['range_t'].size) - 1 sample_time = sample_time * (sample_time > 0) plt.figure(figsize=(8, 8)) ax1 = plt.subplot(211) for i in range(50): ax1.plot(pars['range_t'][::1000] / 1000., gE_bar_update[i, ::1000], lw=1., alpha=0.7) ax1.axvline(1e-3 * pars['range_t'][sample_time], 0., 1., color='k', ls='--') ax1.set_ylim(0, 0.025) ax1.set_xlim(-2, 122) ax1.set_xlabel('Time (s)') ax1.set_ylabel(r'$\bar{g}$') bins = np.arange(-.05, 1.05, .05) g_dis, _ = np.histogram(gbar_norm[:, sample_time], bins) ax2 = plt.subplot(212) ax2.bar(bins[1:], g_dis, color='b', alpha=0.5, width=0.05) ax2.set_xlim(-0.1, 1.1) ax2.set_xlabel(r'$\bar{g}/g_{\mathrm{max}}$') ax2.set_ylabel('Number') ax2.set_title(('Time = %.1f s' % (1e-3 * pars['range_t'][sample_time])), fontweight='bold') plt.show() print(help(example_LIF_STDP)) # - # ### Interactive Demo: Example of an LIF model with STDP # + cellView="form" # @title # @markdown Make sure you execute this cell to enable the widget! example_LIF_STDP(inputrate=15) # - # ## Think! # Increase the firing rate (i.e., 30 Hz) of presynaptic neurons, and investigate the effect on the dynamics of synaptic weight distribution. # + # to_remove explanation """ Discussion: As we increase the input firing rate, more synapses move to the extreme values, either go to zero or to maximal conductance. Using 15Hz, the distribution of the weights at the end of the simulation is more like a bell-shaped, skewed, with more synapses to be potentiated. However, increasing the firing rate, in early times, almost all synapses undergo depression, and then only a few escape and they become potentiated. """; # - # # Section 3: Effect of input correlations # # Thus far, we assumed that the input population was uncorrelated. What do you think will happen if presynaptic neurons were correlated? # # In the following, we will modify the input such that first $L$ neurons have identical spike trains while the remaining inputs are uncorrelated. This is a highly simplified model of introducing correlations. You can try to code your own model of correlated spike trains. # + cellView="form" #@title Function for LIF neuron with STDP synapses receiving correlated inputs def example_LIF_STDP_corrInput(inputrate=20., Tsim=120000.): """ A LIF model equipped with STDP synapses, receiving correlated inputs Args: intputrate : The rate used for generate presynaptic spike trains Tsim : Total simulation time Returns: Interactive demo: Visualization of synaptic weights """ np.random.seed(2020) pars = default_pars_STDP(T=Tsim, dt=1.) pars['gE_bar'] = 0.024 pars['VE'] = 0. # [mV] pars['gE_init'] = 0.024 * np.random.rand(300) # initial synaptic conductance pars['tau_syn_E'] = 5. # [ms] starttime = time.perf_counter() pre_spike_train_ex = Poisson_generator(pars, rate=inputrate, n=300, myseed=2020) for i_pre in range(50): pre_spike_train_ex[i_pre, :] = pre_spike_train_ex[0, :] # simple way to set input correlated v, rec_spikes, gE, P, M, gE_bar_update = run_LIF_cond_STDP(pars, pre_spike_train_ex) # simulate LIF neuron with STDP gbar_norm = gE_bar_update / pars['gE_bar'] # calculate the ratio of the synaptic conductance endtime = time.perf_counter() timecost = (endtime - starttime) / 60. print(f'Total simulation time is {timecost:.3f} min') my_layout.width = '620px' @widgets.interact( sample_time=widgets.FloatSlider(0.5, min=0., max=1., step=0.05, layout=my_layout) ) def my_visual_STDP_distribution(sample_time=0.0): sample_time = int(sample_time * pars['range_t'].size) - 1 sample_time = sample_time*(sample_time > 0) figtemp = plt.figure(figsize=(8, 8)) ax1 = plt.subplot(211) for i in range(50): ax1.plot(pars['range_t'][::1000] / 1000., gE_bar_update[i, ::1000], lw=1., color='r', alpha=0.7, zorder=2) for i in range(50): ax1.plot(pars['range_t'][::1000] / 1000., gE_bar_update[i + 50, ::1000], lw=1., color='b', alpha=0.5, zorder=1) ax1.axvline(1e-3 * pars['range_t'][sample_time], 0., 1., color='k', ls='--', zorder=2) ax1.set_ylim(0, 0.025) ax1.set_xlim(-2, 122) ax1.set_xlabel('Time (s)') ax1.set_ylabel(r'$\bar{g}$') legend=ax1.legend(['Correlated input', 'Uncorrelated iput'], fontsize=18, loc=[0.92, -0.6], frameon=False) for color, text, item in zip(['r', 'b'], legend.get_texts(), legend.legendHandles): text.set_color(color) item.set_visible(False) bins = np.arange(-.05, 1.05, .05) g_dis_cc, _ = np.histogram(gbar_norm[:50, sample_time], bins) g_dis_dp, _ = np.histogram(gbar_norm[50:, sample_time], bins) ax2 = plt.subplot(212) ax2.bar(bins[1:], g_dis_cc, color='r', alpha=0.5, width=0.05) ax2.bar(bins[1:], g_dis_dp, color='b', alpha=0.5, width=0.05) ax2.set_xlim(-0.1, 1.1) ax2.set_xlabel(r'$\bar{g}/g_{\mathrm{max}}$') ax2.set_ylabel('Number') ax2.set_title(('Time = %.1f s' % (1e-3 * pars['range_t'][sample_time])), fontweight='bold') plt.show() print(help(example_LIF_STDP_corrInput)) # - # ## Interactive Demo: LIF model with plastic synapses receiving correlated inputs # + cellView="form" # @title # @markdown Make sure you execute this cell to enable the widget! example_LIF_STDP_corrInput(inputrate=10.0) # - # **Why do weights of uncorrelated neurons decrease when synapses show STDP** # # Above, we notice that the synapses of correlated neurons (which spike together) were almost unaffected, but the weights of other neurons diminished. Why does this happen? # # The reason is that the correlated presynaptic neurons have a higher chance of eliciting a spike in the postsynaptic neurons and that create a $pre \rightarrow post$ pairing of spikes. # ## Think! # # * Modify the code above and create two groups of correlated presynaptic neurons and test what happens to the weight distribution. # # * How can the above observations be used to create unsupervised learning? Could you imagine how we have to train a neuronal model enabled with STDP rule to identify input patterns? # # * What else can be done with this type of plasticity? # # + # to_remove explanation """ Discussion: 1. The two groups will compete to get to stronger synaptic values 2. STDP provides a basis of unsupervised learning as these synapses are sensitive to input correlations. So if the input data has any temporal correlation the neuron will become responsive to those correlated events 3. You can for circuit motifs in an otherwise unstructured network. The simplest function circuit we can make is a feedforward chain. """; # - # --- # # Summary # # Hooray! You have just finished this loooong day! In this tutorial, we covered the concept of **spike-timing dependent plasticity (STDP)**. # # We managed to: # # - build a model of synapse that shows STDP. # # - study how correlations in input spike trains influence the distribution of synaptic weights. # # Using presynaptic inputs as Poisson type spike trains, we modeled an LIF model with synapses equipped with STDP. We also studied the effect of correlated inputs on the synaptic strength!
tutorials/W2D3_BiologicalNeuronModels/W2D3_Tutorial4.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Generate Word Clouds # - Data Analytics, Data Science, Data Analysis # - Basic WordCloud, Custom, Fancier # - Execute this notebook from command line # - `jupyter nbconvert --no-input --execute --ExecutePreprocessor.timeout=180 --to html --template basic word_clouds.ipynb` # - `jupyter nbconvert --no-input --execute --ExecutePreprocessor.timeout=180 --to html --template=nbextensions word_clouds.ipynb` - nbextensions template for suppressing code output. Read more [here](https://jupyter-contrib-nbextensions.readthedocs.io/en/latest/exporting.html) # # Data Analytics # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page wiki = wikipedia.page('Data Analytics')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') import matplotlib.pyplot as plt # %matplotlib widget # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='salmon', colormap='Pastel1', collocations=False, stopwords = STOPWORDS).generate(text)# Plot plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off"); # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page wiki = wikipedia.page('Data Analytics')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') import matplotlib.pyplot as plt # %matplotlib widget # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud # Generate wordcloud wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords = STOPWORDS).generate(text) plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off"); # - # # Cloud frame # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page wiki = wikipedia.page('Data Analytics')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') import matplotlib.pyplot as plt # %matplotlib inline # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud # Generate wordcloud from PIL import Image import numpy as np mask = np.array(Image.open('cloud_white_background.png')) wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='white', colormap='rainbow', collocations=False, stopwords = STOPWORDS, mask=mask).generate(text) plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off"); # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page wiki = wikipedia.page('Data Analytics')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') import matplotlib.pyplot as plt # %matplotlib inline # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud # Generate wordcloud from PIL import Image import numpy as np mask = np.array(Image.open('comment_white_background.png')) wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='white', colormap='rainbow', collocations=False, stopwords = STOPWORDS, mask=mask).generate(text) plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off"); # - # # Data Science # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page wiki = wikipedia.page('Data Science')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') import matplotlib.pyplot as plt # %matplotlib inline # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud # Generate wordcloud wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords = STOPWORDS).generate(text) plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off"); # - # # Data Analysis # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page wiki = wikipedia.page('Data Analysis')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') import matplotlib.pyplot as plt # %matplotlib inline # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud # Generate wordcloud wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords = STOPWORDS).generate(text) plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off"); # - # ### Data Analysis vs. Data Science vs. Data Analytics vs. Data Engineering # + # Import packages import wikipedia import re# Specify the title of the Wikipedia page my_keywords = ['Data Analysis','Data Science','Data Engineering', 'Data Analytics'] consolidated_text = "" for keyword in my_keywords: wiki = wikipedia.page('Data Analysis')# Extract the plain text content of the page text = wiki.content# Clean text text = re.sub(r'==.*?==+', '', text) text = text.replace('\n', '') consolidated_text += text import matplotlib.pyplot as plt # %matplotlib inline # Import package from wordcloud import WordCloud, STOPWORDS# Generate word cloud # Generate wordcloud wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='black', colormap='Set2', collocations=False, stopwords = STOPWORDS).generate(consolidated_text) plt.figure(figsize=(5, 5)) # Display image plt.imshow(wordcloud) # No axis details plt.axis("off");
class1_explore/scrapers/word_clouds.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solid Angle subtended by a sphere Approximation # # We want to approximate the value of the solid angle subtended by a sphere as seen from any point in space outside of that sphere. This expression will be used in ToFu to compute the radiated power received by a particle of arbitrary radius (small vs plasma volume discretization) from the whole plasma. The expression will allow faster computation. # # # ## Notations # # ![Solid angles notations](solid_angle_notations.png) # # Let’s consider the case of a spherical particle of radius $r$, observed from point $M$ located at a distance $d$ from the center $C$ of the particle, as illustrated in the figure above. By definition, the solid angle $\Omega = \dfrac{S}{d^2}$ , where $S$ is the surface on the sphere of center $M$ intersecting the particle center $C$ and limited by its radius, as represented in the figure below. # # # ![Schema angles solide sphere](SA_schema_cropped.png) # # ## Solid Angle approximation # In our case, we get # # $$\Omega = 2\pi \left( 1 - \sqrt{1-\left(\dfrac{r}{d}\right)^2}\right)$$ # # However, the particle radius is almost always much smaller than the distance between the particle and the observation point $M$. Thus, often $$\dfrac{r}{d} = X \xrightarrow[]{} 0$$ # The taylor series of the function $\Omega(X) = 2\pi \left( 1 - \sqrt{1-X^2}\right)$ at $X=0$ is given by # # $$\Omega(X) = \Omega(0) + X\Omega'(0) + \dfrac{X^2}{2}\Omega''(0) + \dfrac{X^3}{6}\Omega^{(3)}(0)+ \dfrac{X^4}{24}\Omega^{(4)}(0) + O(x^4)$$ # where # # $$ # \begin{align} # \Omega(X) &= 2\pi \left( 1 - \sqrt{1-X^2}\right)\\ # \Omega'(X) &= 2\pi X \left( 1 - X^2\right)^{-\dfrac{1}{2}}\\ # \Omega''(X) &= 2\pi \left( 1 - X^2\right)^{-\dfrac{3}{2}}\\ # \Omega^{(3)}(X) &= 6 \pi X \left( 1 - X^2\right)^{-\dfrac{5}{2}}\\ # \Omega^{(4)}(X) &= 6 \pi \left(4X^2 + 1 \right)\left( 1 - X^2\right)^{-\dfrac{7}{2}} # \end{align} # $$ # # Thus, we get # # $$ \Omega(X) = \pi x^2 + \dfrac{x^4 \pi}{4} + O(x^4) $$ # Replacing the variable back # # $$ \Omega \approx \pi \left(\dfrac{r}{d}\right)^2 + \dfrac{\pi}{4}\left(\dfrac{r}{d}\right)^4$$ # # And to the 9-th degree # # $$ \Omega \approx \pi \left(\dfrac{r}{d}\right)^2 + \dfrac{\pi}{4}\left(\dfrac{r}{d}\right)^4 + \dfrac{\pi}{8}\left(\dfrac{r}{d}\right)^6 + \dfrac{5 \pi}{64}\left(\dfrac{r}{d}\right)^8$$ # ## Computation # %matplotlib widget import ipywidgets as widgets import matplotlib.pyplot as plt import numpy as np # + # set up plot fig, ax = plt.subplots(figsize=(6, 4)) ax.grid(True) def exact(r, d): """ Return a sine for x with angular frequeny w and amplitude amp. """ return 2*np.pi*(1-np.sqrt(1-(r/d)**2)) def approx(r,d): """ Return a sine for x with angular frequeny w and amplitude amp. """ x = r/d return np.pi*(x**2 + x**4/4) # generate x values d = np.linspace(1, 10, 100) maxdiff = 0. for r in np.linspace(0.1,0.8,8): diff = abs(exact(r, d) - approx(r,d)) if r < 0.5: maxdiff = max(np.max(diff), maxdiff) ax.plot(d, diff, label=str(r)) ax.set_ylim([0, maxdiff]) ax.legend() ax.set_title("Error with respect to distance for different radius")
Notes_Upgrades/SolidAngle/Solid_Angle_Sphere_Approx.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.2 64-bit # name: python3 # --- # # # Statistics & Data Analysis # # ## Req # #### Import Requirements # ##### HTML formatting # + from IPython.display import HTML HTML("""<style type="text/css"> table.dataframe td, table.dataframe th { max-width: none; </style> """) HTML("""<style type="text/css"> table.dataframe td, table.dataframe th { max-width: none; white-space: normal; } </style> """) HTML("""<style type="text/css"> table.dataframe td, table.dataframe th { max-width: none; white-space: normal; line-height: normal; } </style> """) HTML("""<style type="text/css"> table.dataframe td, table.dataframe th { max-width: none; white-space: normal; line-height: normal; padding: 0.3em 0.5em; } </style> """) # + import numpy as np import pandas as pd import scipy import matplotlib.pyplot as plt from pandas.api.types import CategoricalDtype from plotnine import * from scipy.stats import * import scikit_posthocs as sp data = pd.read_csv("./NewCols.csv") # - # ## Calculating the differences between the noremalized values. # + data_control = data[data["treatment"] == "baseline"] data_control.to_csv("./control.csv") data_treatment = data[data["treatment"] == "intravenous LPS"] data_control.to_csv("./lps.csv") procData = data_treatment procData['diff_AVAR2'] = ( np.array(data_treatment["AVAR2"]) - np.array(data_control["AVAR2"])).tolist() procData["diff_CVAR2"] = ( np.array(data_treatment["CVAR2"]) - np.array(data_control["CVAR2"])).tolist() procData["diff_AWT2"] = (np.array(data_treatment["AWT2"]) - np.array(data_control["AWT2"])).tolist() procData["diff_CWT2"] = (np.array(data_treatment["CWT2"]) - np.array(data_control["CWT2"])).tolist() procData["diff_total2"] = ( np.array(data_treatment["total2"]) - np.array(data_control["total2"])).tolist() procData["diff_totalA"] = ( np.array(data_treatment["totalA"]) - np.array(data_control["totalA"])).tolist() procData["diff_totalC"] = ( np.array(data_treatment["totalC"]) - np.array(data_control["totalC"])).tolist() procData["diff_totalWT"] = (np.array( data_treatment["totalWT"]) - np.array(data_control["totalWT"])).tolist() procData["diff_totalVar"] = (np.array( data_treatment["totalVar"]) - np.array(data_control["totalVar"])).tolist() procData.to_csv("./procData.csv") # - newDF= data_control[["testGroup","tg2"]] newDF newDF.rename(columns = {'testGroup':'c_tg','tg2':'c_tg2'}, inplace=True) newDF newDF.index = procData.index procData= pd.concat([procData,newDF], axis=1) # #### Difference Table # # + pd.set_option('display.max_rows', procData.shape[0]+1) diff_data = procData.loc[ :,"diff_AVAR2":"diff_totalVar" ] diff_data.to_csv("./diffData.csv") # - diff_data.describe() diff_data.var() diff_data.std() diff_data.skew() diff_data.kurtosis().tolist() diff_data.kurtosis() # ## Graph Data - from plotnine import * ggplot(data, aes(x='treatment', y='AWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(data_control["AWT2"],data_treatment["AWT2"]) # - ggplot(data, aes(x='treatment', y='CWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(data_control["CWT2"],data_treatment["CWT2"]) # - ggplot(data, aes(x='treatment', y='AVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(data_control["AVAR2"],data_treatment["AVAR2"]) # - ggplot(data, aes(x='treatment', y='CVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(data_control["CVAR2"],data_treatment["CVAR2"]) # - removed_outliers = data.total2.between(data.total2.quantile(.05), data.total2.quantile(.95)) data_total= data[removed_outliers] ggplot(data_total, aes(x='treatment',y="total2" ), ) + geom_boxplot(outlier_shape = "") + geom_jitter(data_total,aes(y="total2",colour='treatment',shape='treatment') ) + ggtitle("QQ Plot of IRAK-1 expression per GbP") + xlab("Treatment") + ylab("Total IRAK-1 Levels per Gigabase pair") + ylim(data_total.total2.quantile(.05), data_total.total2.quantile(.95)) # + a = 0.05 wilcoxon(diff_data["diff_total2"]) # - removed_outliers_diffData = diff_data.diff_total2.between(diff_data.diff_total2.quantile(.05), diff_data.diff_total2.quantile(.95)) difftotalData=diff_data[removed_outliers_diffData] ggplot(difftotalData, aes( x='0',y='diff_total2') ) + geom_boxplot() + geom_point(color="red") + ylim(difftotalData.diff_total2.quantile(.05), difftotalData.diff_total2.quantile(.95)) + ggtitle("QQ Plot of changes in IRAK-1 levels per Gbp") + xlab("Treatment") + ylab("Changes in IRAK-1 Levels per Gigabase pair") data_plot = data_treatment controlData = data_control['total2'] controlData # + data_plot["ctrl_total2"]=controlData.to_list() data_plot # - from sklearn.linear_model import LinearRegression model = LinearRegression().fit(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2) r_sq= model.score(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2) print('coefficient of determination:', r_sq) print('intercept:', model.intercept_) print('slope:', model.coef_) # + ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point() + geom_smooth(method='lm') # + from sklearn import linear_model lm = linear_model.LinearRegression() # - shapiro_test = shapiro(data_control['total2']) shapiro_test shapiro_test = shapiro(data_treatment['total2']) shapiro_test shapiro_test = shapiro(diff_data['diff_total2']) shapiro_test ggplot(data, aes(x='treatment', y='totalVar') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(diff_data["diff_totalVar"]) # - ggplot(data, aes(x='treatment', y='totalWT') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(diff_data["diff_totalWT"]) # - ggplot(data, aes(x='treatment', y='totalA') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(diff_data["diff_totalA"]) # - ggplot(data, aes(x='treatment', y='totalC') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment')) # + a = 0.05 wilcoxon(diff_data["diff_totalC"]) # - # ## Statistics # ### Total 2 Comparison # #### Wilcoxon non-parametric # + a = 0.05 w, p = wilcoxon(data_control["total2"],data_treatment["total2"]) print(w, p) # - if (p < a): print("As P"+str(p)+" is less than a: "+str(a)) print( "we reject the Null Hypothesis.") print(". There is significant difference betwween the groups") else: print("As P"+p+" is larger than a: "+str(a)) print( "we FAIL TO reject the Null Hypothesis.") print(". There is NOT a significant difference betwween the groups") # #### <NAME> sp.posthoc_nemenyi_friedman(diff_data) # <NAME> # ### other # + a = 0.05 w, p = wilcoxon((data_control["totalA"]/data_control["totalC"] ),(data_treatment["totalA"]/data_treatment["totalC"])) print(w, p) # + a = 0.05 w, p = wilcoxon((data_control["AVAR2"]/data_control["CVAR2"] ),(data_treatment["AVAR2"]/data_treatment["CVAR2"])) print(w, p) # + a = 0.05 w, p = wilcoxon((data_control["AWT2"]/data_control["CWT2"] ),(data_treatment["AWT2"]/data_treatment["CWT2"])) print(w, p) # - ggplot()+geom_histogram(procData,aes(x="tg2")) ggplot()+geom_histogram(procData,aes(x="mutant")) ggplot()+geom_bar(procData,aes(x="spliceVariant",fill="mutant")) ggplot()+geom_col(procData,aes(x="spliceVariant",y="diff_totalA/diff_totalC",fill="mutant")) a = 0.05 diff_data = procData[(data["totalC"] > 0 ) & (data["totalA"] > 0 )] ggplot()+geom_histogram(diff_data,aes(x="tg2")) # + w, p = wilcoxon((diff_data["totalC"] )/(diff_data["totalA"])) print(w, p) # + a = 0.05 w, p = wilcoxon(data_control["total2"],data_treatment["total2"]) print(w, p) # - # 2 graphs # # 1. Do the Table # 3. Black and white # 3. Make sure its not sloppy # 4. # # control, LPS & Difference. # # correlation plot for each patient - total 2 & diff_total2 # # Look for A/C ratios # # ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point(colour) + geom_smooth(method='lm') # #
thesis/_build/jupyter_execute/treatment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # %load_ext tensorboard.notebook # %tensorboard --logdir /logs # + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
tests/data/jupyter_tensorboard.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="6585703d-001" colab_type="text" # #1. Install Dependencies # First install the libraries needed to execute recipes, this only needs to be done once, then click play. # # + id="6585703d-002" colab_type="code" # !pip install git+https://github.com/google/starthinker # + [markdown] id="6585703d-003" colab_type="text" # #2. Get Cloud Project ID # To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play. # # + id="6585703d-004" colab_type="code" CLOUD_PROJECT = 'PASTE PROJECT ID HERE' print("Cloud Project Set To: %s" % CLOUD_PROJECT) # + [markdown] id="6585703d-005" colab_type="text" # #3. Get Client Credentials # To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play. # # + id="6585703d-006" colab_type="code" CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE' print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS) # + [markdown] id="6585703d-007" colab_type="text" # #4. Enter URL Parameters # Pull URL information. # 1. Specify a table with columns and URL, URI ( can be null). # 1. Check bigquery destination for list of information. # Modify the values below for your use case, can be done multiple times, then click play. # # + id="6585703d-008" colab_type="code" FIELDS = { 'auth': 'service', # Credentials used for rading and writing data. 'status': True, # Pull status of HTTP request. 'read': False, # Pull data from HTTP request. 'dataset': '', # Name of Google BigQuery dataset to write. 'table': '', # Name of Google BigQuery dataset to write. } print("Parameters Set To: %s" % FIELDS) # + [markdown] id="6585703d-009" colab_type="text" # #5. Execute URL # This does NOT need to be modified unless you are changing the recipe, click play. # # + id="6585703d-010" colab_type="code" from starthinker.util.configuration import Configuration from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields USER_CREDENTIALS = '/content/user.json' TASKS = [ { 'url': { 'auth': 'user', 'status': {'field': {'name': 'status','kind': 'boolean','order': 2,'default': True,'description': 'Pull status of HTTP request.'}}, 'read': {'field': {'name': 'read','kind': 'boolean','order': 3,'default': False,'description': 'Pull data from HTTP request.'}}, 'urls': { 'bigquery': { 'dataset': {'field': {'name': 'dataset','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to write.'}}, 'query': {'field': {'name': 'table','kind': 'text','order': 5,'default': '','description': 'Query to run to pull URLs.'}}, 'legacy': False } }, 'to': { 'bigquery': { 'dataset': {'field': {'name': 'dataset','kind': 'string','order': 6,'default': '','description': 'Name of Google BigQuery dataset to write.'}}, 'table': {'field': {'name': 'table','kind': 'string','order': 7,'default': '','description': 'Name of Google BigQuery dataset to write.'}} } } } } ] json_set_fields(TASKS, FIELDS) execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
colabs/url.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Goal with this is to nail down the part of the scraper that actually retreives and parses each profile's webpage. # # Only example scraping health.usnews.com (granted, I didn't search too much): https://medium.com/analytics-vidhya/web-scraping-best-childrens-hospitals-for-cancer-5574db6d4090 # # HTML formatter to help with HTML parsing: https://webformatter.com/html # + # Scraping from bs4 import BeautifulSoup import requests import json import pandas as pd import pprint from urllib.request import Request, urlopen from time import sleep from random import randint # + # Progress counter/tracker from contextlib import contextmanager from timeit import default_timer import sys from datetime import timedelta # Define a timer w/ decoractor to use with the loop @contextmanager def elapsed_timer(): start=default_timer() elapser = lambda: default_timer() - start yield lambda: elapser() end = default_timer() elapser = lambda: end-start # - # # Notes # 2. I should also look into cycling (a) IP address and (b) headers. # # Test with one radiologist child_url = 'doctors/richard-duszak-130940' prefix = 'https://health.usnews.com/' user_agent = {'User-agent' : 'Mozilla/5.0'} page = requests.get(prefix+child_url, headers=user_agent) soup = BeautifulSoup(page.text, "html.parser") # + # Narrow down the HTML to the section I want using "id" results = soup.find(id="experience") print(results.prettify()) #NB could directly use: # ed_ex_elements = soup.findAll("div", {"class": "EducationAndExperience__Item-dbww3o-0 eUTnkN"}) # + # Find all HTML relating to Education & Experience (includes med school & residency, licenses, etc.) ed_ex_elements = results.find_all("div", class_= "EducationAndExperience__Item-dbww3o-0 eUTnkN") # Pick out the child element I'm interested in for ed_ex_element in ed_ex_elements: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text try: education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text except: pass print(organization_element) print(education_element) print() # v-space between elements # + ## Trying to feed the previous block into a DF vs. print # Gather the radiologists' data radiologist_name = "<NAME>" radiologist_id = 1 # create a list to store the data scraped_data = [] for ed_ex_element in ed_ex_elements: # initialize the dictionary radiologist_details = {} # parse out the exact text we want # organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text try: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text except: continue # add data to the dictionary radiologist_details['radiologist_id'] = radiologist_id radiologist_details['radiologist_name'] = radiologist_name radiologist_details['organization_element'] = organization_element radiologist_details['education_element'] = education_element # append the dictionaries to the list scraped_data.append(radiologist_details) print(scraped_data) # + ## Create and format a DF from the list of dictionaries df_scraped = pd.DataFrame.from_dict(scraped_data) # Remove rows where License is recorded df_scraped = df_scraped[df_scraped['organization_element'].str.contains("License") == False] # Remove redundant organization data after comma in education_element counter = 0 for x in df_scraped['education_element']: if "," in x: x = x.split(",")[:-1] df_scraped["education_element"][counter] = x[0] #remove the annoying brackets counter += 1 # View DF df_scraped # - # # Testing how to also get verification data (e.g., name, city, etc.) # + # Find all HTML from profile page header (includes name, address, etc.) verif_elements = soup.find_all("div", class_= "Hero__ContentWrapper-sc-1lw4wit-0 eZBhPz mt4") # Pick out the child elements I'm interested in for verif_element in verif_elements: address_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ysuVA").text name_element = verif_element.find("h1", class_="Heading__HeadingStyled-sc-1w5xk2o-0 kYBDwy Heading-sc-1w5xk2o-1 Hero__Name-sc-1lw4wit-3 cRrhAX iZgYrY").text degree_element = verif_element.find("p", class_="Heading__HeadingStyled-sc-1w5xk2o-0-p kzoSbH Heading-sc-1w5xk2o-1 cRrhAX").text city_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 Hero__Location-sc-1lw4wit-6 iOniyG jTBKbU").text gender_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ewEPVS flex").text gndr_xp_lang_element = verif_element.find("div", class_="Hero__MoreInfo-sc-1lw4wit-11 fLhfWJ").text print(address_element) print(name_element) print(degree_element) print(city_element) print(gender_element) print(gndr_xp_lang_element) # + # Gather the radiologists' data radiologist_id = child_url # Create lists to store the data scraped_ed_ex_data = [] scraped_verif_data = [] # Source education and experience data for ed_ex_element in ed_ex_elements: # Initialize the dictionary radiologist_details = {} # Parse out the exact text we want try: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text except: continue # Add data to the dictionary radiologist_details['radiologist_id'] = radiologist_id radiologist_details['organization_element'] = organization_element radiologist_details['education_element'] = education_element # Append the dictionaries to the list scraped_ed_ex_data.append(radiologist_details) # Source verification data for verif_element in verif_elements: # Initialize the dictionary radiologist_verif_info = {} # Parse out the exact text we want try: address_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ysuVA").text name_element = verif_element.find("h1", class_="Heading__HeadingStyled-sc-1w5xk2o-0 kYBDwy Heading-sc-1w5xk2o-1 Hero__Name-sc-1lw4wit-3 cRrhAX iZgYrY").text degree_element = verif_element.find("p", class_="Heading__HeadingStyled-sc-1w5xk2o-0-p kzoSbH Heading-sc-1w5xk2o-1 cRrhAX").text city_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 Hero__Location-sc-1lw4wit-6 iOniyG jTBKbU").text gender_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ewEPVS flex").text gndr_xp_lang_element = verif_element.find("div", class_="Hero__MoreInfo-sc-1lw4wit-11 fLhfWJ").text except: continue # Add data to the dictionary radiologist_verif_info['radiologist_id'] = radiologist_id radiologist_verif_info['address'] = address_element radiologist_verif_info['name'] = name_element radiologist_verif_info['degree'] = degree_element radiologist_verif_info['city'] = city_element radiologist_verif_info['gender'] = gender_element radiologist_verif_info['years_xp'] = gndr_xp_lang_element # Append the dictionaries to the list scraped_verif_data.append(radiologist_verif_info) # - scraped_ed_ex_data[0:3] scraped_verif_data # # Testing with two radiologists # + # Testing with two radiologists radiologists = [ {"name": "<NAME>", "child_url": "doctors/richard-duszak-130940"}, {"name": "<NAME>", "child_url": "doctors/lisa-abramson-863145"} ] radiologists[1]["child_url"] # + # Create lists to store the data scraped_ed_ex_data = [] scraped_verif_data = [] for rad in range(0,len(radiologists)): child_url = radiologists[rad]["child_url"] #part of request url + radiologist ID for relational DFs prefix = 'https://health.usnews.com/' user_agent = {'User-agent' : 'Mozilla/5.0'} page = requests.get(prefix+child_url, headers=user_agent) soup = BeautifulSoup(page.text, "html.parser") # Find all HTML relating to Education & Experience (includes med school & residency, licenses, etc.) ed_ex_results = soup.find(id="experience") ed_ex_elements = ed_ex_results.find_all("div", class_= "EducationAndExperience__Item-dbww3o-0 eUTnkN") # Find all HTML from profile page header (includes name, address, etc.) verif_elements = soup.find_all("div", class_= "Hero__ContentWrapper-sc-1lw4wit-0 eZBhPz mt4") # Source education and experience data for ed_ex_element in ed_ex_elements: # Initialize the dictionary radiologist_details = {} # Parse out the exact text we want try: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text radiologist_details['organization_element'] = organization_element except: continue try: education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text radiologist_details['education_element'] = education_element except: continue # Add data to the dictionary radiologist_details['radiologist_id'] = child_url # radiologist_details['organization_element'] = organization_element # radiologist_details['education_element'] = education_element # Append the dictionaries to the list scraped_ed_ex_data.append(radiologist_details) # Source verification data for verif_element in verif_elements: # Initialize the dictionary radiologist_verif_info = {} # Parse out the exact text we want & Add data to the dictionary try: address_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ysuVA").text radiologist_verif_info['address'] = address_element except: continue try: name_element = verif_element.find("h1", class_="Heading__HeadingStyled-sc-1w5xk2o-0 kYBDwy Heading-sc-1w5xk2o-1 Hero__Name-sc-1lw4wit-3 cRrhAX iZgYrY").text radiologist_verif_info['name'] = name_element except: continue try: degree_element = verif_element.find("p", class_="Heading__HeadingStyled-sc-1w5xk2o-0-p kzoSbH Heading-sc-1w5xk2o-1 cRrhAX").text radiologist_verif_info['degree'] = degree_element except: continue try: city_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 Hero__Location-sc-1lw4wit-6 iOniyG jTBKbU").text radiologist_verif_info['city'] = city_element except: continue try: gender_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ewEPVS flex").text radiologist_verif_info['gender'] = gender_element except: continue # Add data to the dictionary radiologist_verif_info['radiologist_id'] = child_url # radiologist_verif_info['address'] = address_element # radiologist_verif_info['name'] = name_element # radiologist_verif_info['degree'] = degree_element # radiologist_verif_info['city'] = city_element # radiologist_verif_info['gender'] = gender_element # Append the dictionaries to the list scraped_verif_data.append(radiologist_verif_info) # - scraped_ed_ex_data scraped_verif_data # # Try the full loop with all radiologists # Read in the Part 1 data (child urls) child_url_list_pre = pd.read_csv(r"C:\Users\ssantavicca3\Documents\Work Files & Folders\RadiologyTrainees_NotABot\datadump\first stage\url_list.csv") child_url_list_pre child_url_list_pre["url"][0] # + # Sort out duplicate rows by url import numpy as np child_url_list = [] for i in range(0, len(child_url_list_pre)): child_url_list.append(child_url_list_pre["url"][i]) def unique(lst): x = np.array(lst) print(len(np.unique(x))) unique(child_url_list) # count of unique child_urls or providers to be iterated over # - child_url_list[0] # + # Create lists to store the data scraped_ed_ex_data = [] scraped_verif_data = [] with elapsed_timer() as elapsed: n_iter = 0 for i in range(0,len(child_url_list)): child_url = child_url_list[i] #part of request url + radiologist ID for relational DFs prefix = 'https://health.usnews.com' user_agent = {'User-agent' : 'Mozilla/5.0'} page = requests.get(prefix+child_url, headers=user_agent) soup = BeautifulSoup(page.text, "html.parser") # Find all HTML relating to education & experience (includes med school & residency, licenses, etc.) ed_ex_results = soup.find(id="experience") ed_ex_elements = ed_ex_results.find_all("div", class_= "EducationAndExperience__Item-dbww3o-0 eUTnkN") # Find all HTML from profile page header (includes name, address, etc.) verif_elements = soup.find_all("div", class_= "Hero__ContentWrapper-sc-1lw4wit-0 eZBhPz mt4") # Source education and experience data for ed_ex_element in ed_ex_elements: # Initialize the dictionary radiologist_details = {} # Parse out the exact text we want try: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text radiologist_details['organization_element'] = organization_element except: pass try: education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text radiologist_details['education_element'] = education_element except: pass # Add data to the dictionary radiologist_details['radiologist_id'] = child_url # Append the dictionaries to the list scraped_ed_ex_data.append(radiologist_details) # Source verification data for verif_element in verif_elements: # Initialize the dictionary radiologist_verif_info = {} # Parse out the exact text we want & Add data to the dictionary try: address_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ysuVA").text radiologist_verif_info['address'] = address_element except: pass try: name_element = verif_element.find("h1", class_="Heading__HeadingStyled-sc-1w5xk2o-0 kYBDwy Heading-sc-1w5xk2o-1 Hero__Name-sc-1lw4wit-3 cRrhAX iZgYrY").text radiologist_verif_info['name'] = name_element except: pass try: degree_element = verif_element.find("p", class_="Heading__HeadingStyled-sc-1w5xk2o-0-p kzoSbH Heading-sc-1w5xk2o-1 cRrhAX").text radiologist_verif_info['degree'] = degree_element except: pass try: city_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 Hero__Location-sc-1lw4wit-6 iOniyG jTBKbU").text radiologist_verif_info['city'] = city_element except: pass try: gender_element = verif_element.find("p", class_="Paragraph-sc-1iyax29-0 ewEPVS flex").text radiologist_verif_info['gender'] = gender_element except: pass # Add data to the dictionary radiologist_verif_info['radiologist_id'] = child_url # Append the dictionaries to the list scraped_verif_data.append(radiologist_verif_info) # Counter and timer for progress checks n_iter += 1 if n_iter % 1000 == 0: print("Iteration (radiologists): "+str(n_iter)+" ----- Time Elapsed: "+str(timedelta(seconds=round(elapsed())))) sys.stdout.flush() # Save intermediate output incase of crash or timeout filename1 = "datadump/second stage/saved_ed_ex_list_iter"+str(n_iter)+".txt" with open(filename1, 'w') as f: for item in scraped_ed_ex_data: f.write(f'{item}\n') sys.stdout.flush() filename2 = "datadump/second stage/saved_verif_list_iter"+str(n_iter)+".txt" with open(filename2, 'w') as f: for item in scraped_verif_data: f.write(f'{item}\n') sys.stdout.flush() # Iteration delay sleep(randint(1,3)) # - child_url_list_pre[child_url_list_pre['url'] == "/doctors/chelsea-pyle-1116030"] # # Progress Tracker # # #### Started program @ 4:59PM on Tuesday (3/7) # # Iteration (radiologists): 1000 ----- Time Elapsed: 0:45:04 # Iteration (radiologists): 2000 ----- Time Elapsed: 1:31:15 # # ConnectionError: HTTPSConnectionPool(host='health.usnews.com', port=443): Max retries exceeded with url: //doctors/chelsea-pyle-1116030 (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x00000200810717F0>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed')) # # # ## KEY UPDATE: 3/8/22 - I REALIZED THE ACTUALLY TRAINING SPECIALTY DATA IS NO LONGER PRESENT ON THE SITE AND CONTACTED US.HEALTH.NEWS. IF ABLE TO CONTINUE, I LEFT OFF THIS PROGRAM HAVING TRIED TO RUN THE FULL PRESUMABLY READY LOOP ONLY TO SEE THAT: # # ### (A) FOR SOME ED_EX_DATA INFO WAS MISSING OR JUMBLED, SO I FOUND THAT THERE WAS AN ADDITIONAL <\P> CLASS THAT CAPTURED MED SCHOOL AND OTHER TRAINING IN MANY CASES. (BELOW; SOME OF THIS NEEDS TO BE TRANSLATED TO THE FULL LOOP; I HAVEN'T DONE YET) # # ### (B) BARELY ANY OF THE VERIF_DATA MADE IT THROUGH - I HAVE NOT GOTTEN AROUND TO FIXING THIS UP YET, BUT I THINK I'LL NEED TO INCLUDE SOME MORE TRY/EXCEPT BLOCKS OR LOOK FOR MISSING TAG CLASSES. # # - When trying to fix everything up run the loop through 100 people and try to produce effectively the end results (or at least what I'd send to SAS for further processing). # # ### SEPARATELY, I FIXED UP THE POST-LOOP DATA PROCESSING STEP TO CONVERT TO DATA FRAME (BELOW). # # - Although, it may be best to entirely save the preprocessing until after all data is pulled to avoid loosing stuff I don't want to lose. # + #scraped_ed_ex_data ## Create and format a DF from the list of dictionaries scraped_ed_ex_data = pd.DataFrame.from_dict(scraped_ed_ex_data) # Remove rows where License is recorded scraped_ed_ex_data = scraped_ed_ex_data[scraped_ed_ex_data['organization_element'].str.contains("License") == False] # Remove redundant organization data after comma in education_element counter = 0 for x in scraped_ed_ex_data['education_element']: if "," in x: x = x.split(",")[:-1] scraped_ed_ex_data["education_element"][counter] = x[0] #remove the annoying brackets counter += 1 # View DF scraped_ed_ex_data scraped_ed_ex_data.to_csv("scraped_ed_ex_data.csv", encoding='utf-8', index=True) # - scraped_verif_data # + child_url = "/doctors/richard-duszak-130940" prefix = 'https://health.usnews.com' user_agent = {'User-agent' : 'Mozilla/5.0'} page = requests.get(prefix+child_url, headers=user_agent) soup = BeautifulSoup(page.text, "html.parser") results = soup.find(id="experience") # Find all HTML relating to Education & Experience (includes med school & residency, licenses, etc.) ed_ex_elements = results.find_all("div", class_= "EducationAndExperience__Item-dbww3o-0 eUTnkN") ed_ex_elements2 = results.find_all("div", class_= "EducationAndExperience__Item-dbww3o-0 bMIddY") for add_element in ed_ex_elements2: ed_ex_elements.append(add_element) # Pick out the child element I'm interested in for ed_ex_element in ed_ex_elements: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text try: education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text except: pass print(organization_element) print(education_element) print() # v-space between elements # + ## Trying to feed the previous block into a DF vs. print # Gather the radiologists' data radiologist_name = "richard-duszak" radiologist_id = 1 # create a list to store the data scraped_data_test = [] for ed_ex_element in ed_ex_elements: # initialize the dictionary radiologist_details = {} # parse out the exact text we want try: organization_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 eRvRyE").text except: pass try: education_element = ed_ex_element.find("p", class_="Paragraph-sc-1iyax29-0 hwNctc").text except: pass # add data to the dictionary radiologist_details['radiologist_id'] = radiologist_id radiologist_details['radiologist_name'] = radiologist_name radiologist_details['organization_element'] = organization_element radiologist_details['education_element'] = education_element # append the dictionaries to the list scraped_data_test.append(radiologist_details) scraped_data_test # + ## Create and format a DF from the list of dictionaries scraped_data = pd.DataFrame.from_dict(scraped_data_test) # Remove rows with is recorded import numpy as np idx = np.where( (scraped_data['organization_element'].str.contains("License") == False) & (scraped_data['education_element'].str.contains("Active through") == False) & (scraped_data['organization_element'].str.contains("Scholarship") == False) & (scraped_data['organization_element'].str.contains("List") == False) ) scraped_data = scraped_data.loc[idx] scraped_data = pd.concat([scraped_data, scraped_data["education_element"].str.split(', ', expand=True)], axis=1) # Remove redundant data after commas # scraped_data["organization_element2"] = ["" for i in scraped_data["organization_element"]] # counter = 0 # for x in scraped_data['education_element']: # if "," in x: # if "Other Training" not in x: # scraped_data["organization_element2"][counter] = # x = x.split(",")[:-1] # scraped_data["education_element"][counter] = x[0] #[0] --> remove the annoying brackets # counter += 1 # View DF scraped_data.reset_index()
P2_BeautifulSoup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Raw Strings # Difference between raw strings and normal strings print("string1","C:\desktop\natalie") # normal string print("\n") print("string2",r"C:\desktop\natalie") # # re.match() # + import re # match a word at the beginning of a string result = re.match('Data Science', r'Data Science is the hottest job of the 21st century.') print(result) result_2 = re.match('hottest', r'Data Science is the hottest job of the 21st century.') print(result_2) # - print(result.group()) # # re.search() result = re.search('founded', r'<NAME> founded Tesla. He also founded SpaceX.') print(result) # Also returns a match object. print(result.group()) # # re.findall() result = re.findall('founded', r'<NAME> founded Tesla. He also founded SpaceX.') print(result) import re pattern = r'\d\d\d-\d\d\d-\d\d\d\d' string = 'Cell: 415-555-9999 Work: 212-555-0000' re.findall(pattern,string) re.findall(r'[abc]','a ab abc de') # # regex Summary # Import the regex module import re # Create a regex object with re.compile() phoneRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') # Pass the string to be searched into the search method match_object = phoneRegex.search('Cell: 415-555-9999 Work: 212-555-0000') match_object # Call the match object's group() method to return a string of the matched text match_object.group() phoneNumRegex = re.compile(r'(\d\d\d)-(\d\d\d-\d\d\d\d)') mo = phoneNumRegex.search('My number is 415-555-4242.') mo # # Square Brackets pattern = r'[abc]' # same as [a-c] string1 = 'Abc,xyz,de' # 2 matches string2 = 'car,aec' # 4 matches string3 = 'def' # no match print(re.findall(pattern,string1)) print(re.findall(pattern,string2)) print(re.findall(pattern,string3)) # # phone Number program without regex # + # Without using Regular Expressions def isPhoneNumber(text): if len(text) != 12: # not size of phone number return False for i in range(0,3): if not text[i].isdecimal(): return False # no area code if text[3] != '-': return False # missing dash for i in range(4,7): if not text[i].isdecimal(): return False # no first 3 digits if text[7] != '-': return False # missing dash for i in range(8,12): if not text[i].isdecimal(): return False # no last 4 digits return True message = 'Call me at 132-222-7218, or at 132-222-1111' foundNumber = False for i in range(len(message)): chunk = message[i:i+12] if isPhoneNumber(chunk): print('Phone number found: ' + chunk) foundNumber = True if not foundNumber: print('Could not find any phone numbers.') # - # # phone Number program Using Regexes # Using Regular Expressions a = re.findall(r'\d{3}-\d{3}-\d{4}', 'Call me at 132-222-7218, or at 132-222-1111') for i in range(len(a)): print('Phone Number found: '+ a[i]) # # Period pattern = r'.s' # match string containing any character followed by 's' string1 = 'zs,as,es' # 3 matches string2 = '&s,42sc,:s' # 3 matches string3 = '-s' # 1 match print(re.findall(pattern,string1)) print(re.findall(pattern,string2)) print(re.findall(pattern,string3)) # # Caret pattern = r'^B' # match any string that starts with 'A' string1 = 'basketball' # no match string2 = 'boB' # no match string3 = 'Bat' # 1 match print(re.findall(pattern,string1)) print(re.findall(pattern,string2)) print(re.findall(pattern,string3)) # # Dollar pattern = r'ing$' # match any string that ends with 'ing' string1 = 'Joking' # 1 match string2 = 'CALLING' # no match print(re.findall(pattern,string1)) print(re.findall(pattern,string2)) # # Question Mark pattern = r'bat(woman|man|girl)?' # match 0 or 1 instance of 'woman' or 'bat' or 'girl' attached to the word 'bat' string1 = 'Batman, bat, batgirl, batwoman' # 3 matches print(re.findall(pattern,string1)) # # Braces # + pattern = r'(wow){3,5}' # matches 3 to atmost 5 instances of the word wow wowRegex = re.compile(pattern) # Examples string1 = 'wowwowwow' # match string2 = 'wowwowwowwowwow' # match string3 = 'wow' # no match mo1 = wowRegex.search(string1) mo2 = wowRegex.search(string2) print(mo1.group()) print(mo2.group()) print(re.findall(pattern,string3)) # re.group() would return error here as there is no match # - # # Star pattern = r'da*ta' # match 0 or more occurences of the first 'a' in 'data' string1 = 'data' # match string2 = 'daaaaata' # match string3 = 'dta' # match print(re.findall(pattern,string1)) print(re.findall(pattern,string2)) print(re.findall(pattern,string3)) # # Plus pattern = r'da+ta' # match 1 or more occurences of the first 'a' in 'data' string1 = 'data' # match string2 = 'daaaaata' # match string3 = 'dta' # no match print(re.findall(pattern,string1)) print(re.findall(pattern,string2)) print(re.findall(pattern,string3)) # ### Extract Grocery list using Regexes # + message = '''I am going to the grocery tomorrow and need to get the following: 3 onions, 1 bread, 1 Milk, 10 bananas, 5 peppers and 12 oranges.''' groceryRegex = re.compile(r'''\d+ # one or more digits \s # space character \w+ # one or more words ''', re.VERBOSE) # re.VERBOSE can be used to make regexes more readable print(groceryRegex.findall(message)) # - # # Re.match # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="LiXEkNiFsuyl" outputId="fabb06b0-d45b-4730-ccf8-77ab0ac5a9c2" import re #match a word at the beginning of a string result = re.match('Data Science',r'Data Science is the hottest job of the 21st century.') print(result) result_2 = re.match('largest',r'Data Science is the hottest job of the 21st century.') print(result_2) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="UH1qzis2ebvp" outputId="8f61d1ce-cd38-41aa-cc0a-58e28cde7fb7" print(result.group()) #returns the total matches # + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="LiXEkNiFsuyl" outputId="fabb06b0-d45b-4730-ccf8-77ab0ac5a9c2" import re #matches a word at the beginning of a string result = re.match('Analytics',r'Analytics Vidhya is the largest data science community of India') print(result) result_2 = re.match('largest',r'Analytics Vidhya is the largest data science community of India') print(result_2) # - print(result.group()) #returns the total matches # # Re.search result = re.search('founded',r'<NAME> founded Tesla. He also founded SpaceX.') print(result) # Also returns a match object print(result.group()) # use .group() to return total matches # # Re.findall result = re.findall('founded',r'<NAME> founded Tesla. He also founded SpaceX.') print(result)
Regexes/.ipynb_checkpoints/Regexes-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (Data Science) # language: python # name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0 # --- # # NOTE: THIS NOTEBOOK RUNS FOR 30+ MINUTES. # # # PLEASE BE PATIENT # # Train a Model with SageMaker Autopilot # # We will use Autopilot to predict the star rating of customer reviews. Autopilot implements a transparent approach to AutoML. # # For more details on Autopilot, have a look at this [**Amazon Science Publication**](https://www.amazon.science/publications/amazon-sagemaker-autopilot-a-white-box-automl-solution-at-scale). # # [![](img/amazon_science.png)](https://www.amazon.science/publications/amazon-sagemaker-autopilot-a-white-box-automl-solution-at-scale) # # <img src="img/autopilot-transparent.png" width="80%" align="left"> # # Introduction # Amazon SageMaker Autopilot is a service to perform automated machine learning (AutoML) on your datasets. Autopilot is available through the UI or AWS SDK. In this notebook, we will use the AWS SDK to create and deploy a text processing and star rating classification machine learning pipeline. # # Setup # # Let's start by specifying: # # * The S3 bucket and prefix to use to train our model. _Note: This should be in the same region as this notebook._ # * The IAM role of this notebook needs access to your data. # # Notes # * This notebook will take some time to finish. # # * You can start this notebook and continue to the next notebooks whenever you are waiting for the current notebook to finish. # # Checking Pre-Requisites From Previous Notebook # %store -r autopilot_train_s3_uri try: autopilot_train_s3_uri print("[OK]") except NameError: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] PLEASE RUN THE PREVIOUS 01_PREPARE_DATASET_AUTOPILOT NOTEBOOK.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print(autopilot_train_s3_uri) if not autopilot_train_s3_uri: print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") print("[ERROR] PLEASE RUN THE PREVIOUS 01_PREPARE_DATASET_AUTOPILOT NOTEBOOK.") print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") else: print("[OK]") # + import boto3 import sagemaker import pandas as pd import json sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name import botocore.config config = botocore.config.Config( user_agent_extra='dsoaws/1.0' ) sm = boto3.Session().client(service_name="sagemaker", region_name=region, config=config) # - # # Training Data print(autopilot_train_s3_uri) # !aws s3 ls $autopilot_train_s3_uri # ## See our prepared training data which we use as input for Autopilot # !aws s3 cp $autopilot_train_s3_uri ./tmp/ # + import csv df = pd.read_csv("./tmp/amazon_reviews_us_Digital_Software_v1_00_autopilot.csv") df.head() # - # # Setup the S3 Location for the Autopilot-Generated Assets # This include Jupyter Notebooks (Analysis), Python Scripts (Feature Engineering), and Trained Models. # + prefix_model_output = "models/autopilot" model_output_s3_uri = "s3://{}/{}".format(bucket, prefix_model_output) print(model_output_s3_uri) # + max_candidates = 3 job_config = { "CompletionCriteria": { "MaxRuntimePerTrainingJobInSeconds": 900, "MaxCandidates": max_candidates, "MaxAutoMLJobRuntimeInSeconds": 5400, }, } input_data_config = [ { "DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": "{}".format(autopilot_train_s3_uri)}}, "TargetAttributeName": "star_rating", } ] output_data_config = {"S3OutputPath": "{}".format(model_output_s3_uri)} # - # # Check For Existing Autopilot Jobs existing_jobs_response = sm.list_auto_ml_jobs() # + num_existing_jobs = 0 running_jobs = 0 if "AutoMLJobSummaries" in existing_jobs_response.keys(): job_list = existing_jobs_response["AutoMLJobSummaries"] num_existing_jobs = len(job_list) # print('[INFO] You already created {} Autopilot job(s) in this account.'.format(num_existing_jobs)) for j in job_list: if "AutoMLJobStatus" in j.keys(): if j["AutoMLJobStatus"] == "InProgress": running_jobs = running_jobs + 1 print("[INFO] You have {} Autopilot job(s) currently running << Should be 0 jobs.".format(running_jobs)) else: print("[OK] Please continue.") # - # # Launch the SageMaker Autopilot Job from time import gmtime, strftime, sleep # + # %store -r auto_ml_job_name try: auto_ml_job_name except NameError: timestamp_suffix = strftime("%d-%H-%M-%S", gmtime()) auto_ml_job_name = "automl-dm-" + timestamp_suffix print("Created AutoMLJobName: " + auto_ml_job_name) # - print(auto_ml_job_name) # %store auto_ml_job_name # + max_running_jobs = 1 if running_jobs < max_running_jobs: # Limiting to max. 1 Jobs try: sm.create_auto_ml_job( AutoMLJobName=auto_ml_job_name, InputDataConfig=input_data_config, OutputDataConfig=output_data_config, AutoMLJobConfig=job_config, RoleArn=role, ) print("[OK] Autopilot Job {} created.".format(auto_ml_job_name)) running_jobs = running_jobs + 1 except: print( "[INFO] You have already launched an Autopilot job. Please continue see the output of this job.".format( running_jobs ) ) else: print( "[INFO] You have already launched {} Autopilot running job(s). Please continue see the output of the running job.".format( running_jobs ) ) # - # # Track the Progress of the Autopilot Job # # SageMaker Autopilot job consists of the following high-level steps: # * _Data Analysis_ where the data is summarized and analyzed to determine which feature engineering techniques, hyper-parameters, and models to explore. # * _Feature Engineering_ where the data is scrubbed, balanced, combined, and split into train and validation. # * _Model Training and Tuning_ where the top performing features, hyper-parameters, and models are selected and trained. # # <img src="img/autopilot-steps.png" width="90%" align="left"> # **Autopilot Research Paper: # https://assets.amazon.science/e8/8b/2366b1ab407990dec96e55ee5664/amazon-sagemaker-autopilot-a-white-box-automl-solution-at-scale.pdf** # # Analyzing Data and Generate Notebooks # + job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) while ( "AutoMLJobStatus" not in job_description_response.keys() and "AutoMLJobSecondaryStatus" not in job_description_response.keys() ): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) print("[INFO] Autopilot Job has not yet started. Please wait. ") print(json.dumps(job_description_response, indent=4, sort_keys=True, default=str)) print("[INFO] Waiting for Autopilot Job to start...") sleep(15) print("[OK] AutoMLJob started.") # - # # Review the SageMaker `Processing Jobs` # * First Processing Job (Data Splitter) checks the data sanity, performs stratified shuffling and splits the data into training and validation. # * Second Processing Job (Candidate Generator) first streams through the data to compute statistics for the dataset. Then, uses these statistics to identify the problem type, and possible types of every column-predictor: numeric, categorical, natural language, etc. # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/">Processing Jobs</a></b>'.format( region ) ) ) # - # # The Next Cell Will Show `InProgress` For A Few Minutes. # # ## _Please be patient._ # + # %%time job_status = job_description_response["AutoMLJobStatus"] job_sec_status = job_description_response["AutoMLJobSecondaryStatus"] if job_status not in ("Stopped", "Failed"): while job_status in ("InProgress") and job_sec_status in ("Starting", "AnalyzingData"): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] job_sec_status = job_description_response["AutoMLJobSecondaryStatus"] print(job_status, job_sec_status) sleep(15) print("[OK] Data analysis phase completed.\n") print(json.dumps(job_description_response, indent=4, sort_keys=True, default=str)) # - # # View Generated Notebook Samples # Once data analysis is complete, SageMaker AutoPilot generates two notebooks: # * Data Exploration # * Candidate Definition # # Waiting For Generated Notebooks # + job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) while "AutoMLJobArtifacts" not in job_description_response.keys(): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) print("[INFO] Autopilot Job has not yet generated the artifacts. Please wait. ") print(json.dumps(job_description_response, indent=4, sort_keys=True, default=str)) print("[INFO] Waiting for AutoMLJobArtifacts...") sleep(15) print("[OK] AutoMLJobArtifacts generated.") # + job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) while "DataExplorationNotebookLocation" not in job_description_response["AutoMLJobArtifacts"].keys(): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) print("[INFO] Autopilot Job has not yet generated the notebooks. Please wait. ") print(json.dumps(job_description_response, indent=4, sort_keys=True, default=str)) print("[INFO] Waiting for DataExplorationNotebookLocation...") sleep(15) print("[OK] DataExplorationNotebookLocation found.") # - generated_resources = job_description_response["AutoMLJobArtifacts"]["DataExplorationNotebookLocation"] download_path = generated_resources.rsplit("/notebooks/SageMakerAutopilotDataExplorationNotebook.ipynb")[0] job_id = download_path.rsplit("/", 1)[-1] # + from IPython.core.display import display, HTML if not job_id: print("No AutoMLJobArtifacts found.") else: display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/{}/{}/sagemaker-automl-candidates/{}/">S3 Generated Resources</a></b>'.format( bucket, prefix_model_output, auto_ml_job_name, job_id ) ) ) # - # # Download Generated Notebooks & Code print(download_path) try: # !aws s3 cp --recursive $download_path . except: print('Could not download the generated resources. Make sure the path is correct.') # # Review The Generated Resources: # !ls ./generated_module/candidate_data_processors # !ls ./notebooks # # Feature Engineering # ### Watch out for SageMaker `Training Jobs` and `Batch Transform Jobs` to start. # # * This is the candidate exploration phase. # * Each python script code for data-processing is executed inside a SageMaker framework container as a training job, followed by transform job. # # Note, that feature preprocessing part of each pipeline has all hyper parameters fixed, i.e. does not require tuning, thus feature preprocessing step can be done prior runing the hyper parameter optimization job. # # It outputs up to 10 variants of transformed data, therefore algorithms for each pipeline are set to use # the respective transformed data. # # <img src="img/autopilot-steps.png" width="90%" align="left"> # **Autopilot Research Paper: # https://assets.amazon.science/e8/8b/2366b1ab407990dec96e55ee5664/amazon-sagemaker-autopilot-a-white-box-automl-solution-at-scale.pdf** # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/">Training Jobs</a></b>'.format( region ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/transform-jobs/">Batch Transform Jobs</a></b>'.format( region ) ) ) # - # # The Next Cell Will Show `InProgress` For A Few Minutes. # # ## _Please be patient._ ## # + # %%time job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] job_sec_status = job_description_response["AutoMLJobSecondaryStatus"] print(job_status) print(job_sec_status) if job_status not in ("Stopped", "Failed"): while job_status in ("InProgress") and job_sec_status in ("FeatureEngineering"): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] job_sec_status = job_description_response["AutoMLJobSecondaryStatus"] print(job_status, job_sec_status) sleep(15) print("[OK] Feature engineering phase completed.\n") print(json.dumps(job_description_response, indent=4, sort_keys=True, default=str)) # - # # [INFO] _Feel free to continue to the next workshop section while this notebook is running._ # # Model Training and Tuning # ### Watch out for a SageMaker`Hyperparameter Tuning Job` and various `Training Jobs` to start. # # * All algorithms are optimized using a SageMaker Hyperparameter Tuning job. # * Up to 250 training jobs (based on number of candidates specified) are selectively executed to find the best candidate model. # # <img src="img/autopilot-steps.png" width="90%" align="left"> # **Autopilot Research Paper: # https://assets.amazon.science/e8/8b/2366b1ab407990dec96e55ee5664/amazon-sagemaker-autopilot-a-white-box-automl-solution-at-scale.pdf** # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/hyper-tuning-jobs/">Hyperparameter Tuning Jobs</a></b>'.format( region ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/">Training Jobs</a></b>'.format( region ) ) ) # - # # The Next Cell Will Show `InProgress` For A Few Minutes. # # ## _Please be patient._ # + # %%time job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] job_sec_status = job_description_response["AutoMLJobSecondaryStatus"] print(job_status) print(job_sec_status) if job_status not in ("Stopped", "Failed"): while job_status in ("InProgress") and job_sec_status in ("ModelTuning"): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] job_sec_status = job_description_response["AutoMLJobSecondaryStatus"] print(job_status, job_sec_status) sleep(15) print("[OK] Model tuning phase completed.\n") print(json.dumps(job_description_response, indent=4, sort_keys=True, default=str)) # - # # _Please Wait Until ^^ Autopilot ^^ Completes Above_ # # # [INFO] _Feel free to continue to the next workshop section while this notebook is running._ # Make sure the status below indicates `Completed`. # + # %%time job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] print(job_status) if job_status not in ("Stopped", "Failed"): while job_status not in ("Completed"): job_description_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) job_status = job_description_response["AutoMLJobStatus"] print(job_status) sleep(10) print("[OK] Autopilot Job completed.\n") else: print(job_status) # - # # Viewing All Candidates # Once model tuning is complete, you can view all the candidates (pipeline evaluations with different hyperparameter combinations) that were explored by AutoML and sort them by their final performance metric. candidates_response = sm.list_candidates_for_auto_ml_job( AutoMLJobName=auto_ml_job_name, SortBy="FinalObjectiveMetricValue" ) # ### Check that candidates exist print(candidates_response.keys()) # + while "Candidates" not in candidates_response.keys(): candidates_response = sm.list_candidates_for_auto_ml_job( AutoMLJobName=auto_ml_job_name, SortBy="FinalObjectiveMetricValue" ) print("[INFO] Autopilot Job is generating the Candidates. Please wait.") print(json.dumps(candidates_response, indent=4, sort_keys=True, default=str)) sleep(10) candidates = candidates_response["Candidates"] print("[OK] Candidates generated.") # - print(candidates[0].keys()) # + while "CandidateName" not in candidates[0]: candidates_response = sm.list_candidates_for_auto_ml_job( AutoMLJobName=auto_ml_job_name, SortBy="FinalObjectiveMetricValue" ) candidates = candidates_response["Candidates"] print("[INFO] Autopilot Job is generating CandidateName. Please wait. ") print(json.dumps(candidates, indent=4, sort_keys=True, default=str)) sleep(10) print("[OK] CandidateName generated.") # + while "FinalAutoMLJobObjectiveMetric" not in candidates[0]: candidates_response = sm.list_candidates_for_auto_ml_job( AutoMLJobName=auto_ml_job_name, SortBy="FinalObjectiveMetricValue" ) candidates = candidates_response["Candidates"] print("[INFO] Autopilot Job is generating FinalAutoMLJobObjectiveMetric. Please wait. ") print(json.dumps(candidates, indent=4, sort_keys=True, default=str)) sleep(10) print("[OK] FinalAutoMLJobObjectiveMetric generated.") # - print(json.dumps(candidates, indent=4, sort_keys=True, default=str)) for index, candidate in enumerate(candidates): print( str(index) + " " + candidate["CandidateName"] + " " + str(candidate["FinalAutoMLJobObjectiveMetric"]["Value"]) ) # # Inspect Trials using Experiments API # # SageMaker Autopilot automatically creates a new experiment, and pushes information for each trial. # + from sagemaker.analytics import ExperimentAnalytics, TrainingJobAnalytics exp = ExperimentAnalytics( sagemaker_session=sess, experiment_name=auto_ml_job_name + "-aws-auto-ml-job", ) df = exp.dataframe() print(df) # - # # Explore the Best Candidate # Now that we have successfully completed the AutoML job on our dataset and visualized the trials, we can create a model from any of the trials with a single API call and then deploy that model for online or batch prediction using [Inference Pipelines](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html). For this notebook, we deploy only the best performing trial for inference. # The best candidate is the one we're really interested in. best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) print(best_candidate_response.keys()) # + while "BestCandidate" not in best_candidate_response: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) print("[INFO] Autopilot Job is generating BestCandidate. Please wait. ") print(json.dumps(best_candidate_response, indent=4, sort_keys=True, default=str)) sleep(10) best_candidate = best_candidate_response["BestCandidate"] print("[OK] BestCandidate generated.") # - print(json.dumps(best_candidate_response, indent=4, sort_keys=True, default=str)) print(best_candidate.keys()) # + while "CandidateName" not in best_candidate: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) best_candidate = best_candidate_response["BestCandidate"] print("[INFO] Autopilot Job is generating BestCandidate CandidateName. Please wait. ") print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) sleep(10) print("[OK] BestCandidate CandidateName generated.") # + while "FinalAutoMLJobObjectiveMetric" not in best_candidate: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) best_candidate = best_candidate_response["BestCandidate"] print("[INFO] Autopilot Job is generating BestCandidate FinalAutoMLJobObjectiveMetric. Please wait. ") print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) sleep(10) print("[OK] BestCandidate FinalAutoMLJobObjectiveMetric generated.") # - best_candidate_identifier = best_candidate["CandidateName"] print("Candidate name: " + best_candidate_identifier) print("Metric name: " + best_candidate["FinalAutoMLJobObjectiveMetric"]["MetricName"]) print("Metric value: " + str(best_candidate["FinalAutoMLJobObjectiveMetric"]["Value"])) print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) # # View Individual Autopilot Jobs # + while "CandidateSteps" not in best_candidate: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) best_candidate = best_candidate_response["BestCandidate"] print("[INFO] Autopilot Job is generating BestCandidate CandidateSteps. Please wait. ") print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) sleep(10) best_candidate = best_candidate_response["BestCandidate"] print("[OK] BestCandidate CandidateSteps generated.") # + while "CandidateStepType" not in best_candidate["CandidateSteps"][0]: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) best_candidate = best_candidate_response["BestCandidate"] print("[INFO] Autopilot Job is generating BestCandidate CandidateSteps CandidateStepType. Please wait. ") print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) sleep(10) best_candidate = best_candidate_response["BestCandidate"] print("[OK] BestCandidate CandidateSteps CandidateStepType generated.") # + while "CandidateStepName" not in best_candidate["CandidateSteps"][0]: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) best_candidate = best_candidate_response["BestCandidate"] print("[INFO] Autopilot Job is generating BestCandidate CandidateSteps CandidateStepName. Please wait. ") print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) sleep(10) best_candidate = best_candidate_response["BestCandidate"] print("[OK] BestCandidate CandidateSteps CandidateStepName generated.") # - best_candidate steps = [] for step in best_candidate["CandidateSteps"]: print("Candidate Step Type: {}".format(step["CandidateStepType"])) print("Candidate Step Name: {}".format(step["CandidateStepName"])) steps.append(step["CandidateStepName"]) # + from IPython.core.display import display, HTML display( HTML( '<b>Review Best Candidate <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/{}">Processing Job</a></b>'.format( region, steps[0] ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review Best Candidate <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}">Training Job</a></b>'.format( region, steps[1] ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review Best Candidate <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/transform-jobs/{}">Transform Job</a></b>'.format( region, steps[2] ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review Best Candidate <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/jobs/{}">Training Job (Tuning)</a></b>'.format( region, steps[3] ) ) ) # + from IPython.core.display import display, HTML display( HTML( '<b>Review All <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/">Processing Jobs</a></b>'.format( region ) ) ) # - # # Review All Output in S3 # # You will see the artifacts generated by Autopilot including the following: # ``` # data-processor-models/ # "models" learned to transform raw data into features # documentation/ # explainability and other documentation about your model # preprocessed-data/ # data for train and validation # sagemaker-automl-candidates/ # candidate models which autopilot compares # transformed-data/ # candidate-specific data for train and validation # tuning/ # candidate-specific tuning results # validations/ # validation results # ``` # + from IPython.core.display import display, HTML display( HTML( '<b>Review All <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}?region={}&prefix=models/autopilot/{}/">Output in S3</a></b>'.format( bucket, region, auto_ml_job_name ) ) ) # - # # See the Containers and Models within the Inference Pipeline # + while "InferenceContainers" not in best_candidate: best_candidate_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name) best_candidate = best_candidate_response["BestCandidate"] print("[INFO] Autopilot Job is generating BestCandidate InferenceContainers. Please wait. ") print(json.dumps(best_candidate, indent=4, sort_keys=True, default=str)) sleep(10) print("[OK] BestCandidate InferenceContainers generated.") # - best_candidate_containers = best_candidate["InferenceContainers"] for container in best_candidate_containers: print(container["Image"]) print(container["ModelDataUrl"]) print("======================") # # Update Containers To Show Predicted Label and Confidence Score for container in best_candidate_containers: print(container["Environment"]) print("======================") best_candidate_containers[1]["Environment"].update({"SAGEMAKER_INFERENCE_OUTPUT": "predicted_label, probability"}) best_candidate_containers[2]["Environment"].update({"SAGEMAKER_INFERENCE_INPUT": "predicted_label, probability"}) best_candidate_containers[2]["Environment"].update({"SAGEMAKER_INFERENCE_OUTPUT": "predicted_label, probability"}) for container in best_candidate_containers: print(container["Environment"]) print("======================") # # Autopilot Chooses XGBoost as Best Candidate! # # Note that Autopilot chose different hyper-parameters and feature transformations than we used in our own XGBoost model. # # Deploy the Model as a REST Endpoint # Batch transformations are also supported, but for now, we will use a REST Endpoint. print(best_candidate["InferenceContainers"]) # %store -r autopilot_model_name try: autopilot_model_name except NameError: timestamp_suffix = strftime("%d-%H-%M-%S", gmtime()) autopilot_model_name = "automl-dm-model-" + timestamp_suffix print("[OK] Created Autopilot Model Name: " + autopilot_model_name) # %store autopilot_model_name # %store -r autopilot_model_arn try: autopilot_model_arn except NameError: create_model_response = sm.create_model( Containers=best_candidate["InferenceContainers"], ModelName=autopilot_model_name, ExecutionRoleArn=role ) autopilot_model_arn = create_model_response["ModelArn"] print("[OK] Created Autopilot Model: {}".format(autopilot_model_arn)) # %store autopilot_model_arn # # Define EndpointConfig Name # + timestamp_suffix = strftime("%d-%H-%M-%S", gmtime()) epc_name = "automl-dm-epc-" + timestamp_suffix print(epc_name) # - # # Define REST Endpoint Name for Autopilot Model # %store -r autopilot_endpoint_name # + timestamp_suffix = strftime("%d-%H-%M-%S", gmtime()) try: autopilot_endpoint_name except NameError: autopilot_endpoint_name = "automl-dm-ep-" + timestamp_suffix print("[OK] Created Autopilot Endpoint Name {}: ".format(autopilot_endpoint_name)) # - variant_name = "automl-dm-variant-" + timestamp_suffix print("[OK] Created Endpoint Variant Name {}: ".format(variant_name)) # %store autopilot_endpoint_name ep_config = sm.create_endpoint_config( EndpointConfigName=epc_name, ProductionVariants=[ { "InstanceType": "ml.m5.large", "InitialInstanceCount": 1, "ModelName": autopilot_model_name, "VariantName": variant_name, } ], ) # %store -r autopilot_endpoint_arn try: autopilot_endpoint_arn except NameError: create_endpoint_response = sm.create_endpoint(EndpointName=autopilot_endpoint_name, EndpointConfigName=epc_name) autopilot_endpoint_arn = create_endpoint_response["EndpointArn"] print(autopilot_endpoint_arn) # %store autopilot_endpoint_arn # + from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/endpoints/{}">SageMaker REST Endpoint</a></b>'.format( region, autopilot_endpoint_name ) ) ) # - # # Store Variables for the Next Notebooks # %store # # Summary # We used Autopilot to automatically find the best model, hyper-parameters, and feature-engineering scripts for our dataset. # # Autopilot uses a transparent approach to generate re-usable exploration Jupyter Notebooks and transformation Python scripts to continue to train and deploy our model on new data - well after this initial interaction with the Autopilot service.
00_quickstart/wip/03_Train_Reviews_Autopilot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 0.5.2 # language: julia # name: julia-0.5 # --- # # Running Stan on Jupyter or JuliaBox # ## Introduction to Stan and Stan.jl on Jupyter and JuliaBox # Stan ( http://mc-stan.org ) is one of several tools available to perform Bayesian sampling. Statistical inference based on Stan's result (the posterior samples in 1 or more chains) are typically done in R, Python or Matlab. Stan.jl uses Stan as an external program and simplifies running Stan models and subsequent inference, all from within Julia, thus providing aother 'interface' to Stan. # # Stan is written in C++ but still attractive to Julia users because it is a very good tool for users interested in Bayesian models that can be efficiently sampled with Stan. It is also supported by a very capable team and community willing to help out with modeling and installation issues. Reasons enough for the existance of Stan.jl. # # For Julia users who prefer a Notebook-like interface (over a Julia REPL + Editor interface), Jupyter is available and this Notebook demonstrates one way of using Stan.jl in a Notebook, including several options for the graphical display of results. # # JuliaBox contributes another major benefit in that it removes the burden of installing Stan from the end users. The user still has a 'private' permanent file system to store Julia scripts that include the Stan model and available data. # # Finally, pushing Stan's envelope can require knowledge of C++, C++ templates, the C++ Boost libraries, R, the Stan modeling language, and its extensions (e.g. functions). This can be a daunting task. Stan.jl can't help here but Julia's ecosystem provides options entirely written in Julia, like Mamba.jl ( http://mambajl.readthedocs.org/en/latest/index.html ) and Lora.jl which, if access to lower level MCMC functionality is required, might be a better route. Using Stan.jl will pave the way to such a migration. # ## Getting started, some preliminaries ... # Running below cell is occasionally useful to make sure all packages are up to date. # + #Pkg.update() # - # Stan.jl relies on Mamba.jl's very broad collection of functions for posterior inference and display after the MCMC sampling has been performed by Stan. using Mamba, Stan; # In this example we'll use one of several examples that are included in Stan.jl, i.e. estimating the distribution of the success probability (theta) based on 4 sets of 10 observed outcomes. # # I like to control where I store intermediate and result files. In this case I 'borrow' the Bernoulli directory inside Stan.jl. old = pwd() ProjDir = Pkg.dir("Stan", "Examples", "Mamba", "Bernoulli") cd(ProjDir) # ## A Stan model and its data # Define a simple Stan model. On JuliaBox it is best to define the model inline, but it could also be stored in an external file and uploaded to JuliaBox. bernoullimodel = " data { int<lower=0> N; int<lower=0,upper=1> y[N]; } parameters { real<lower=0,upper=1> theta; } model { theta ~ beta(1,1); y ~ bernoulli(theta); } "; # Define the observed data, in this case the number of observations in the 4 experiments and the outcomes of these observations. This is an array of dictionaries. By default Stan will run 4 chains and in this setup each chain is assigned a dictionary with the results of 1 experiment. If that is an acceptable way to analyze these results depends on the purpose of the 4 experiments. It's used here purely as an illustration, e.g. one could increase the probability of success in one of the experiment by updating the y vector. This will show up in the overall value of theta below and in the plots for that chain. bernoullidata = [ Dict("N" => 10, "y" => [0, 1, 0, 1, 0, 0, 0, 0, 0, 1]), Dict("N" => 10, "y" => [0, 1, 0, 0, 0, 0, 1, 0, 0, 1]), Dict("N" => 10, "y" => [0, 0, 0, 0, 0, 0, 1, 0, 1, 1]), Dict("N" => 10, "y" => [0, 0, 0, 1, 1, 0, 0, 1, 0, 1]) ]; # ## Setting up a Stanmodel and sampling from the model # Stanmodel() and stan() are the main functions provided by Stan.jl. For details on the arguments of these functions see the CmdStan documentation and the README in Stan.jl ( https://github.com/goedman/Stan.jl ) stanmodel = Stanmodel(num_samples=1200, thin=2, name="bernoulli", model=bernoullimodel); rc, sim1 = stan(stanmodel, bernoullidata, ProjDir, diagnostics=false); # Check for rc == 0 rc # From above summary you can see that by default Stan.jl runs 4 chains. Stan will not perform any thinning, this is done later on in fact as we defined a thinning factor of 2 in the Stanmodel() call. # # Notice that if we were to run the simulation again the Stan Model would not be compiled unless the model has been changed. # ## Posterior analysis # Stan records 6 variables by default augmented by model specific variables (only theta in this example). Not all of these are suitable for posterior inference. In this example we'll only perform posterior analysis on "theta", "lp__" and "accept_stat__". # # In some cases Stan might record 10s, 100s even 1000s of model specific variables. In those cases a variable like 'posterior' can be used to constrain the number of variables used in posterior analysis. posterior = ["theta", "lp__", "accept_stat__"]; # Apply the subset 'posterior' before calling describe(). sim = sim1[:, posterior, :] describe(sim) # Mamba.jl provides a range of function to diagnose and analyze the simulation result, e.g. gelmandiag() is shown below. See the Mamba documentation ( http://mambajl.readthedocs.org/en/latest/index.html ) or the Bernoulli example in Stan.jl for other options. gelmandiag(sim, mpsrf=true, transform=true) # ## Plot the results p = plot(sim, [:trace, :mean, :density, :autocor], legend=true); draw(p, ncol=4) # ## Housekeeping ... # Intermediate results are all in the 'tmp' directory. Results that you might want to keep around are left in the current working directory (ProjDir as defined above). ;ls # What's in the 'tmp' directory? ;ls tmp # ##### You can remove the 'tmp' directory. If you do this, future simulations will require redefinition of the Stanmodel() and recompilation when calling stan()! isdir("tmp") && rm("tmp", recursive=true)
Examples/OtherExamples/NotebookExample/StanBernoulli.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline # %pdb off from pprint import pprint import itertools import numpy from metrics import wer, cew, ssr, average, hreff import montecarlo import market import dms import withdrawal import mortality from portfolio import Portfolio import harvesting from decimal import Decimal as D import plot from matplotlib import pyplot as plt import pandas # - def run_one_new(dataset, strategy, hreff_floor=4, debug_cashflows=True): p = Portfolio(600000, 400000) h = strategy(p).harvest() h.send(None) w = withdrawal.VPW(p, h, years_left=35).withdrawals() def add_debug_log(annual): if debug_cashflows: debug.append({ 'Returns' : annual.returns, 'Withdraw' : annual.withdraw_n, 'Portfolio' : annual.portfolio_n, 'Withdraw Orig' : annual.withdraw_pct_orig, 'Stocks' : annual.portfolio_stocks, 'Bonds' : annual.portfolio_bonds, }) cashflows = [] returns = [] debug = [] # Initial withdrawal at start of retirement annual = w.send(None) add_debug_log(annual) cashflows.append(annual.withdraw_pct_orig) for i in dataset: annual = w.send(i) add_debug_log(annual) returns.append(annual.returns) cashflows.append(annual.withdraw_pct_orig) if debug_cashflows: pandas.DataFrame(data=debug).to_csv('WERCOMP-cashflows-%s.csv' % strategy.__name__) w = wer(cashflows, returns) h = hreff(cashflows, returns, floor=D(hreff_floor)/100) return (w, h, returns) def new_calc(strategies, returns): wer_df = pandas.DataFrame(index=numpy.arange(0, 1), columns=[s.__name__ for s in strategies]) hreff_df = pandas.DataFrame(index=numpy.arange(0, 1), columns=[s.__name__ for s in strategies]) returns_df = pandas.DataFrame(index=numpy.arange(0, 1), columns=[i for i in range(60)]) pairs = zip(strategies, itertools.tee(returns, len(strategies))) for (s, r) in pairs: (wer, hreff, returns) = run_one_new(r, s) wer_df.loc[0][s.__name__] = wer hreff_df.loc[0][s.__name__] = hreff returns_df.loc[0] = returns + [None for _ in range(60-30)] return (wer_df, hreff_df, returns_df) m = market.Returns_US_1871() retirement = [m.random_year() for x in range(30)] # + strategies = [ harvesting.N_60_RebalanceHarvesting, # harvesting.N_100_RebalanceHarvesting, harvesting.PrimeHarvesting, # harvesting.AltPrimeHarvesting, harvesting.BondsFirst, # harvesting.OmegaNot, # harvesting.Weiss, # harvesting.AgeBased_100, # harvesting.AgeBased_110, # harvesting.AgeBased_120, # harvesting.Glidepath, harvesting.ActuarialHarvesting, ] wer_df, hreff_df, returns_df = new_calc(strategies, retirement) # + d = [(col, series.mean()) for (col, series) in wer_df.iteritems()] series = pandas.Series(dict(d)) series.sort_values(inplace=True) series.plot(kind='bar', title='WER') #print(series) # + d = [(col, series.mean()) for (col, series) in hreff_df.iteritems()] series = pandas.Series(dict(d)) series.sort_values(inplace=True) series.plot(kind='bar', title='HREFF') # -
WER Detailed [Harvest Strategies].ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import tensorflow tensorflow.keras.__version__ # # 파이썬을 이용한 딥러닝 HW2 # # 챕터 3의 세가지 예제 데이터중 한가지 데이터를 정해서 챕터4 에서 배운 regularization 방법들을 사용해보고, history graph를 그려서 비교해보세요. # + from tensorflow.keras.datasets import reuters import numpy as np from tensorflow.keras.utils import to_categorical (train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000) def vectorize_sequences(sequences, dimension=10000): results = np.zeros((len(sequences), dimension)) for i, sequence in enumerate(sequences): results[i, sequence] = 1. return results x_train = vectorize_sequences(train_data) x_test = vectorize_sequences(test_data) y_train = to_categorical(train_labels) y_test = to_categorical(test_labels) # + from tensorflow.keras import models from tensorflow.keras import layers original_model = models.Sequential() original_model.add(layers.Dense(64, activation='relu', input_shape=(10000,))) original_model.add(layers.Dense(64, activation='relu')) original_model.add(layers.Dense(46, activation='softmax')) original_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # - original_hist = original_model.fit(x_train, y_train, epochs=30, batch_size=512, validation_data=(x_test, y_test)) # + import matplotlib.pyplot as plt original_loss = original_hist.history['loss'] original_val_loss = original_hist.history['val_loss'] epochs = range(1, len(original_loss) + 1) plt.plot(epochs, original_loss, 'bo', label='Training loss') plt.plot(epochs, original_val_loss, 'b', label='Validation loss') plt.title('Training and validation loss of original model') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # + from tensorflow.keras import regularizers new_model = models.Sequential() new_model.add(layers.Dense(64, kernel_regularizer=regularizers.l2(0.001), activation='relu', input_shape=(10000,))) new_model.add(layers.Dropout(0.5)) new_model.add(layers.Dense(64, kernel_regularizer=regularizers.l2(0.001), activation='relu')) new_model.add(layers.Dropout(0.5)) new_model.add(layers.Dense(46, activation='softmax')) new_model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) # - new_hist = new_model.fit(x_train, y_train, epochs=30, batch_size=512, validation_data=(x_test, y_test)) # + import matplotlib.pyplot as plt new_loss = new_hist.history['loss'] new_val_loss = new_hist.history['val_loss'] epochs = range(1, len(new_loss) + 1) plt.plot(epochs, new_loss, 'bo', label='Training loss') plt.plot(epochs, new_val_loss, 'b', label='Validation loss') plt.title('Training and validation loss of new model') plt.xlabel('Epochs') plt.ylabel('Loss') plt.legend() plt.show() # + import matplotlib.pyplot as plt epochs = range(1, 31) plt.plot(epochs, original_val_loss, 'b+', label='Original model') plt.plot(epochs, new_val_loss, 'bo', label='New model') plt.xlabel('Epochs') plt.ylabel('Validation loss') plt.legend() plt.show() # - # L2 regularization과 drop out을 추가해서 overfitting을 감소시킬 수 있으나, original model을 early stopping 하는 방법이 validation loss가 더 작다.
hw2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] colab_type="text" id="3yFpBwmNz70v" # # Regresión <NAME> # # + [markdown] colab_type="text" id="v8OxSXXSz-OP" # # Cómo importar las librerías # # + colab={} colab_type="code" id="edZX51YLzs59" import numpy as np import matplotlib.pyplot as plt import pandas as pd # + [markdown] colab_type="text" id="8XfXlqtF0B58" # # Importar el data set # # + colab={} colab_type="code" id="-nnozsHsz_-N" dataset = pd.read_csv('Position_Salaries.csv') X = dataset.iloc[:, 1:2].values y = dataset.iloc[:, 2].values # + [markdown] colab_type="text" id="cgweTaJ67BOB" # # Ajustar el Random Forest con el dataset # - from sklearn.ensemble import RandomForestRegressor # + colab={} colab_type="code" id="2AwTWELX7DZQ" regression = RandomForestRegressor(n_estimators = 300, random_state = 0) regression.fit(X, y) # + [markdown] colab_type="text" id="je3kcRlG7JV5" # # Predicción de nuestros modelos con Random Forest # # + colab={} colab_type="code" id="HS-M9s587Kj3" y_pred = regression.predict([[6.0], [6.4], [6.5], [6.6], [6.7], [7.0]]) y_pred[6.5] # + [markdown] colab_type="text" id="JnMLSqzW8NH7" # # Visualización de los resultados del Random Forest # + colab={} colab_type="code" id="1qZ3wRR08Oar" X_grid = np.arange(min(X), max(X), 0.01) X_grid = X_grid.reshape(len(X_grid), 1) plt.scatter(X, y, color = "red") plt.plot(X_grid, regression.predict(X_grid), color = "blue") plt.title("Modelo de Regresión con Random Forest") plt.xlabel("Posición del empleado") plt.ylabel("Sueldo (en $)") plt.show() # -
Notebook/2.Regresion/7.random_forest_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import itertools import matplotlib.pyplot as plt # read dataset and split features and label dataset = pd.read_csv('train_data.txt',sep=' ', names=[i for i in range(11)]) X = dataset.iloc[:,:9].values y = dataset.iloc[:,10].values # split train set and test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) # scaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.fit_transform(X_test) # train the classifier from sklearn.svm import SVC classifier = SVC(kernel = 'linear', random_state = 0) classifier.fit(X_train, y_train) # predict and plot y_pred = classifier.predict(X_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) accuracy = (cm[0,0]+cm[1,1])/(sum(cm[0])+sum(cm[1])) def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.ylabel('True label') plt.xlabel('Predicted label') plt.tight_layout() plot_confusion_matrix(cm,[1,-1],True) print('the accuracy rate is',accuracy) # -
.ipynb_checkpoints/SVM_SKL-checkpoint.ipynb
# --- # jupyter: # jupytext: # notebook_metadata_filter: all,-language_info # split_at_heading: true # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import pandas as pd # ## Describing distributions # # We have seen several examples of *distributions*. # # We can describe distributions as having a *center*, and a *spread*. # # In [the mean as predictor](mean_meaning), we saw that the mean is # a useful measure of the center of a distribution. # # What measure should we use for the spread? # ## Back to chronic kidney disease # # We return to the [data on chronic kidney disease](https://matthew-brett.github.io/cfd2019/data/chronic_kidney_disease). # # Download the data to your computer via this link: [ckd_clean.csv](https://matthew-brett.github.io/cfd2019/data/ckd_clean.csv). ckd_full = pd.read_csv('ckd_clean.csv') ckd_full.head() # We will use this dataset to get a couple of variables (columns) and # therefore a couple of distributions. # # Let's start with the White Blood Cell Count, usually abbreviated as WBC. wbc = ckd_full['White Blood Cell Count'] wbc.hist() plt.title('White Blood Cell Count'); wbc.describe() # Compare this to Hemoglobin concentrations: hgb = ckd_full['Hemoglobin'] hgb.hist() plt.title('Hemoglobin'); hgb.describe() # Notice that we can't easily plot these two on the same axes, because # their units are so different. # Here's what that looks like. Notice that the hemoglobin values disappear in a tiny spike to the left. # Use alpha to make the histograms a little transparent. # Label them for a legend. hgb.hist(alpha=0.7, label='HGB') wbc.hist(alpha=0.7, label='WBC') plt.title("HGB and WBC together - HGB tiny spike at left") plt.legend(); # We could try and fix this by subtracting the mean, as a center value, so # the values are now *deviations* from the mean. wbc_deviations = wbc - np.mean(wbc) wbc_deviations.hist() plt.title('White Blood Cell Count deviations'); hgb_deviations = hgb - np.mean(hgb) hgb_deviations.hist() plt.title('Hemoglobin deviations'); # The deviations each have a mean very very close to zero, and therefore, # they have the same center: np.mean(wbc_deviations), np.mean(hgb_deviations) # We still cannot sensibly plot them on the same axes, because the WBC values have # a very different *spread*. The WBC values completely dominate the x axis of # the graph. We can't reasonably compare the WBC deviations to the # Hemoglobin deviations, because they have such different *units*. hgb_deviations.hist(alpha=0.7, label='HGB') wbc_deviations.hist(alpha=0.7, label='WBC') plt.title("HGB and WBC deviations - you can't see HGB") plt.legend(); # We would like a measure of the spread of the distribution, so we can set # the two distributions to have the same spread. # ## The standard deviation # In the [mean as predictor](mean_meaning) section, we found that mean was # the best value to use as a predictor, to minimize the sum of *squared* # deviations. # # Maybe we could get an idea of the typical *squared* deviation, as # a measure of spread? hgb_deviations[:10] hgb_dev_sq = hgb_deviations ** 2 hgb_dev_sq[:10] hgb_dev_sq.hist() plt.title('HGB squared deviations') # The center, or typical value, of this distribution, could be the *mean*. hgb_dev_sq_mean = np.mean(hgb_dev_sq) hgb_dev_sq_mean # This is the *mean squared deviation*. This is also called the # *variance*. Numpy has a function to calculate that in one shot: # The mean squared deviation is the variance np.var(hgb) # The mean squared deviation is a good indicator of the typical squared # deviation. What should we use for some measure of the typical # deviation? # # We could take the square root of the mean squared deviation, like this: np.sqrt(hgb_dev_sq_mean) # This is a measure of the spread of the distribution. It is a measure of # the typical or average deviation. # # It is also called the *standard deviation*. np.std(hgb) # We can make our distribution have a standard center *and* a standard # spread by dividing our mean-centered distribution, by the standard # deviation. Then the distribution will have a standard deviation very # close to 1. # # This version of the distribution, with mean 0 and standard deviation of # 1, is called the *standardized* distribution. standardized_hgb = hgb_deviations / np.std(hgb) standardized_hgb.hist() plt.title('Standardized Hemoglobin') # We can make a function to do this: def standard_units(x): return (x - np.mean(x))/np.std(x) std_hgb_again = standard_units(hgb) std_hgb_again.hist() plt.title('Standardized Hemoglobin, again') # If we do the same to the WBC, we can compare values of the # distributions: std_wbc = standard_units(wbc) std_wbc.hist() plt.title('Standardized White Blood Cell Count') # Now we can put both these distributions on the same graph, to compare them directly. std_hgb_again.hist(alpha=0.7, label='HGB') std_wbc.hist(alpha=0.7, label='WBC') plt.title('Standardized HGB and WBC') plt.legend() # Every value in standardized units gives the deviation of the original # value from its mean, in terms of the number of standard deviations.
ipynb/08/standard_scores.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # <NAME> - <NAME> # # # \begin{equation} # V(x)=E_{0}\left[ \left(\frac{x}{a}\right)^4 -2\left(\frac{x}{a}\right)^2 \right]-\frac{b}{a}x # \end{equation} import openmm as mm from openmm import app from openmm import unit from openmmtools.constants import kB import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt from numpy.random import default_rng rng = default_rng() # + # Definición del sistema. n_particles = 1 mass = 100 * unit.amu # + # Creación del sistema. system = mm.System() for ii in range(n_particles): system.addParticle(mass) # + # Añadiendo el potencial externo al sistema Eo = 3.0 * unit.kilocalories_per_mole a = 0.5 * unit.nanometers b = 0.0 * unit.kilocalories_per_mole k = 1.0*unit.kilocalories_per_mole/unit.angstrom**2 A = Eo/(a**4) B = -2.0*Eo/(a**2) C = -b/a D = k/2.0 force = mm.CustomExternalForce('A*x^4+B*x^2+C*x + D*(y^2+z^2)') force.addGlobalParameter('A', A) force.addGlobalParameter('B', B) force.addGlobalParameter('C', C) force.addGlobalParameter('D', D) for ii in range(n_particles): force.addParticle(ii, []) _ = system.addForce(force) # + # Definición del estado termodinámico y el integrador. step_size = 0.01*unit.picoseconds temperature = 300*unit.kelvin friction = 1.0/unit.picosecond # Damping para la dinámica de Langevin integrator = mm.LangevinIntegrator(temperature, friction, step_size) # - # **Por favor prueba a remplazar en la siguiente celda 'CPU' por 'CUDA' a ver si corre** # + # Creación de la plataforma. platform_name = 'CUDA' platform = mm.Platform.getPlatformByName(platform_name) # - # Creación del contexto. context = mm.Context(system, integrator, platform) def movement(lmax): return lmax * rng.uniform(-1,1) def decide(Ui, Uf, temperature): kBT = kB * temperature accept = False if Uf <= Ui: accept = True else: weight = np.exp(- (Uf - Ui)/kBT) random = rng.uniform(0,1) if weight >= random: accept = True else: accept = False return accept # + # Condiciones iniciales initial_positions = np.zeros([n_particles, 3], np.float32) * unit.angstroms initial_positions[0,0] = 5.0 * unit.angstroms mc_steps = 50000 num_trues = 0 mc_traj = np.zeros([mc_steps+1], np.float32) * unit.angstroms mc_traj[0] = initial_positions[0,0] for ii in tqdm(range(mc_steps)): context.setPositions(initial_positions) state_initial = context.getState(getEnergy=True) Ui = state_initial.getPotentialEnergy() final_positions = np.zeros([n_particles, 3], np.float32) * unit.angstroms final_positions[0,0] = initial_positions[0,0] + movement(4.0*unit.angstroms) context.setPositions(final_positions) state_final = context.getState(getEnergy=True) Uf = state_final.getPotentialEnergy() accept = decide(Ui, Uf, temperature) if accept == True: initial_positions = final_positions num_trues += 1 mc_traj[ii+1] = initial_positions[0,0] acceptance_rate=num_trues/mc_steps # - acceptance_rate plt.scatter(range(mc_steps+1), mc_traj) mc_traj.mean()
Tarea5/Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # %matplotlib inline import pandas as pd import numpy as np import urllib import folium import json import matplotlib.pyplot as plt import matplotlib.dates as dates import scipy import scipy.stats as sp import scipy.optimize as op import statsmodels.api as sm from scipy.stats import linregress from scipy.optimize import curve_fit from scipy import stats from datetime import datetime, date, timedelta from matplotlib.backends.backend_pdf import PdfPages #from matplotlib.pyplot import cm import platform import sys import glob import matplotlib as mpl import matplotlib.cm as cm import matplotlib.pyplot as plt import pyproj from pyproj import Proj import simplekml import os import csv #import matplotlib from pylab import rcParams rcParams['figure.figsize'] = 15, 15 # - import wellapplication as wa import arcpy from arcpy import env from arcpy.sa import * # # Read Prism Output files # Use prism time series downloads to create faux weather stations for swat input. csv_file = r'C:\Users\PAULINKENBRANDT\Downloads\PRISM_ppt_tmin_tmax_provisional_4km_20000101_20170417_41.4218_-111.8846.csv' # + lat = [] lon = [] elev=[] idlist = [] namelist = [] tempnamelist = [] csv.field_size_limit(500 * 1024 * 1024) csv_dir = 'C:/Users/PAULINKENBRANDT/Downloads/prismfiles/' i=0 pptfile = {} tempfile = {} os.chdir(csv_dir) for csv_file in glob.glob("*.csv"): i += 1 idlist.append(i) namelist.append('PRISM'+str(i)) tempnamelist.append('PTEMP'+str(i)) print(csv_file) csvFileArray = [] with open(csv_file, newline='') as csvfile: for row in csv.reader(csvfile): csvFileArray.append(row) loclist = str(csvFileArray[1]).split(':') lat.append(float(loclist[2].split(' ')[1])) lon.append(float(loclist[3].split(' ')[1])) elev.append(float(loclist[4].split(' ')[1][:-3])) pptfile['PRISM'+str(i)] = pd.read_csv(csv_file,skiprows=10,usecols=[1]) pptfile['PRISM'+str(i)].columns = ['20000101'] pptfile['PRISM'+str(i)].to_csv(csv_dir+'outfiles/'+'PRISM'+str(i)+'.txt',index=False) tempfile['PTEMP'+str(i)] = pd.read_csv(csv_file,skiprows=10,usecols=[2,3]) tempfile['PTEMP'+str(i)].columns = ['20000101',''] tempfile['PTEMP'+str(i)].to_csv(csv_dir+'outfiles/'+'PTEMP'+str(i)+'.txt',index=False) colms = {'ID':idlist,'NAME':namelist,'LAT':lat,'LONG':lon,'ELEVATION':elev} df = pd.DataFrame(colms) df = df[['ID','NAME','LAT','LONG','ELEVATION']] df.to_csv(csv_dir+'outfiles/'+'ppt.txt',index=False) pptfile['PRISM1'] colms2 = {'ID':idlist,'NAME':tempnamelist,'LAT':lat,'LONG':lon,'ELEVATION':elev} df2 = pd.DataFrame(colms2) df2 = df2[['ID','NAME','LAT','LONG','ELEVATION']] df2.to_csv(csv_dir+'outfiles/'+'temp.txt',index=False) # - # Update text headers of temp stations # + txt_dir = csv_dir + 'outfiles/' os.chdir(txt_dir) for inputFileName in glob.glob("PTEMP*"): outputFileName = txt_dir + '/newtemp/' + inputFileName with open(inputFileName, newline='') as inFile, open(outputFileName, 'w', newline='') as outfile: r = csv.reader(inFile) w = csv.writer(outfile) next(r, None) # skip the first row from the reader, the old header # write new header w.writerow(['20000101']) # copy the rest for row in r: w.writerow(row) # - # # Aggregate text files FullHRU = 'H:/GIS/Ogden_Valley/SWAT_09_2017/SWAT_09_2017.mdb/FullHRU' hru = pd.read_csv("H:/GIS/Ogden_Valley/SWAT_09_2017/hru.txt") hru.columns yearly_hru.DA_RCHGmm.mean() # + arcpy.env.workspace = r'H:\GIS\Ogden_Valley\SWAT_09_2017\OV_input.gdb' arcpy.env.overwriteOutput = True def aggregate_data(datatype): data = pd.read_csv("H:/GIS/Ogden_Valley/SWAT_09_2017/{:}.txt".format(datatype)) if datatype == 'hru': keepers = ['HRUGIS', 'HRU', 'SUB', 'AREAkm2', 'PRECIPmm', 'MON','YEAR', 'SNOWFALLmm', 'SNOWMELTmm', 'IRRmm', 'PETmm', 'ETmm', 'SW_INITmm', 'SW_ENDmm', 'PERCmm', 'GW_RCHGmm', 'DA_RCHGmm', 'REVAPmm', 'SA_IRRmm', 'DA_IRRmm', 'SA_STmm', 'DA_STmm', 'SURQ_GENmm', 'SURQ_CNTmm', 'TLOSS_mm', 'LATQ_mm', 'GW_Qmm', 'WYLD_Qmm', 'DAILYCN', 'TMP_AVdgC', 'TMP_MXdgC', 'TMP_MNdgC', 'SOL_TMPdgC', 'SOLARmj_m2', 'SNOmm'] mean_monthly_data = data.groupby(['HRUGIS','MON']).mean().reset_index() mean_monthly_data['HRUGIS'] = mean_monthly_data['HRUGIS'].apply(lambda x: str(x).zfill(9),1) yearly_data = mean_monthly_data.groupby(['HRUGIS']).sum().reset_index() yearly_data['HRUGIS'] = yearly_data['HRUGIS'].apply(lambda x: str(x).zfill(9),1) elif datatype == 'sub' or datatype == 'rch': keepers = data.columns mean_monthly_data = data.groupby(['SUB','MON']).mean().reset_index() yearly_data = mean_monthly_data.groupby(['SUB']).sum().reset_index() else: keepers = data.columns mean_monthly_data = data.groupby(['RES','MON']).mean().reset_index() mean_monthly_data = data.groupby(['RES']).mean().reset_index() mean_monthly_data = mean_monthly_data[keepers] output = "H:/GIS/Ogden_Valley/SWAT_09_2017/OV_input.gdb/{:}MonthlyData".format(datatype) output1 = "H:/GIS/Ogden_Valley/SWAT_09_2017/OV_input.gdb/{:}YearlyData".format(datatype) output2 = "H:/GIS/Ogden_Valley/SWAT_09_2017/OV_input.gdb/{:}AllData".format(datatype) def df_to_gdb(df,output): x = np.array(np.rec.fromrecords(df.values)) names = df.dtypes.index.tolist() x.dtype.names = tuple(names) if arcpy.Exists(output): arcpy.Delete_management(output) arcpy.da.NumPyArrayToTable(x,output) df_to_gdb(mean_monthly_data,output) df_to_gdb(yearly_data,output1) df_to_gdb(data,output2) aggregate_data('hru') aggregate_data('sub') aggregate_data('rch') # - if arcpy.Exists("ov_hru_yearly"): arcpy.Delete_management("ov_hru_yearly") arcpy.MakeFeatureLayer_management("ov_hru_Points", "ov_hru_yearly") arcpy.AddJoin_management( "ov_hru_yearly", "HRUGIS", "hruYearlyData", "HRUGIS") arcpy.CopyFeatures_management( "ov_hru_yearly", "ov_hru_yearly") # + intfields = ['PRECIPmm', 'SNOWFALLmm', 'SNOWMELTmm', 'IRRmm', 'PETmm', 'ETmm', 'SW_INITmm', 'SW_ENDmm', 'PERCmm', 'GW_RCHGmm', 'DA_RCHGmm', 'REVAPmm', 'SA_STmm', 'DA_STmm', 'SURQ_GENmm', 'SURQ_CNTmm', 'LATQ_mm', 'GW_Qmm', 'WYLD_Qmm','SNOmm'] if arcpy.Exists("ov_hru_monthly"): arcpy.Delete_management("ov_hru_monthly") arcpy.MakeFeatureLayer_management("ov_hru_Points", "ov_hru_monthly") arcpy.AddJoin_management( "ov_hru_Monthly", "HRUGIS", "hruMonthlyData", "HRUGIS") arcpy.CopyFeatures_management( "ov_hru_Monthly", "ov_hru_Monthly") # - for month in range(1,13): arcpy.SelectLayerByAttribute_management ( "ov_hru_Monthly", "NEW_SELECTION", "MON = {:}".format(month)) arcpy.CopyFeatures_management( "ov_hru_Monthly", "ov_hru_Monthly_{:}".format(month)) for field in intfields: rast = arcpy.sa.NaturalNeighbor("ov_hru_Monthly",field,cell_size=100) outExtractByMask = arcpy.sa.ExtractByMask(rast, "Shed") outExtractByMask.save("{:}{:}".format(field,str(month).zfill(2))) # From this output, join the tables to a fishnet of points. The exported feature class can then be exported. # # Interpolating Points # + arcpy.env.workspace = r'H:\GIS\Ogden_Valley\SWAT_09_2017\OV_input.gdb' def get_field_names(table): read_descr = arcpy.Describe(table) field_names = [] for field in read_descr.fields: field_names.append(field.name) field_names.remove('OBJECTID') return field_names get_field_names('ov_hru_yearly') # + intfields = ['PRECIPmm', 'SNOWFALLmm', 'SNOWMELTmm', 'IRRmm', 'PETmm', 'ETmm', 'SW_INITmm', 'SW_ENDmm', 'PERCmm', 'GW_RCHGmm', 'DA_RCHGmm', 'REVAPmm', 'SA_STmm', 'DA_STmm', 'SURQ_GENmm', 'SURQ_CNTmm', 'LATQ_mm', 'GW_Qmm', 'WYLD_Qmm','SNOmm'] for field in intfields: rast = arcpy.sa.NaturalNeighbor('ov_hru',field,cell_size=100) outExtractByMask = arcpy.sa.ExtractByMask(rast, "Shed") outExtractByMask.save(field) # - shed_area*810714 10000 m2 /1000 flow_out = (153.22+243.12+97.44) #mm shed_area = 791.44 * flow_out/1000000
.ipynb_checkpoints/Precip_Processing-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Monte Carlo - Black-Scholes-Merton # *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).* # Load the data for Microsoft (‘MSFT’) for the period ‘2000-1-1’ until today. # We have written a few lines of code that will import the documents you need and define the functions estimating d1, d2, and the Black-Scholes-Merton formula. # + import numpy as np import pandas as pd from pandas_datareader import data as wb from scipy.stats import norm data = pd.read_csv('D:/Python/MSFT_2000.csv', index_col = 'Date') # + def d1(S, K, r, stdev, T): return (np.log(S / K) + (r + stdev ** 2 / 2) * T) / (stdev * np.sqrt(T)) def d2(S, K, r, stdev, T): return (np.log(S / K) + (r - stdev ** 2 / 2) * T) / (stdev * np.sqrt(T)) def BSM(S, K, r, stdev, T): return (S * norm.cdf(d1(S, K, r, stdev, T))) - (K * np.exp(-r * T) * norm.cdf(d2(S, K, r, stdev, T))) # - # Store the annual standard deviation of the log returns in a variable, called “stdev”. # Set the risk free rate, r, equal to 2.5% (0.025); the strike price, K, equal to 110.0; and the time horizon, T, equal to 1, respectively. # Create a variable S equal to the last adjusted closing price of Microsoft. Use the “iloc” method. # Call the d1 and d2 functions with the relevant arguments to obtain their values. # Use the BSM function to estimate the price of a call option, given you know the values of S, K, r, stdev, and T.
23 - Python for Finance/8_Monte Carlo Simulations as a Decision-Making Tool/13_Monte Carlo: Black-Scholes-Merton (6:00)/MC - Black-Scholes-Merton - Exercise_CSV.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 9-Cluster Trajectories # In this notebook we use the results from the previous ones to display trajectory clusters on a map. We have identified the most important stopping locations and the trips among them. Now, we can dive into these and determine the number of different trajectories. # # **Requirements:** # # - Please run the `07-cluster-names.ipynb` notebook first and its dependencies. # - Recommended install: [ipywidgets](https://ipywidgets.readthedocs.io/en/stable/user_install.html). Enable using `jupyter nbextension enable --py widgetsnbextension --sys-prefix` for Jupyter Notebook and `jupyter labextension install @jupyter-widgets/jupyterlab-manager` for Jupyter Lab. # + import numpy as np import pandas as pd import folium import hdbscan import ipywidgets as widgets from sqlapi import VedDb from distances.frechet import FastDiscreteFrechetMatrix, LinearDiscreteFrechet, earth_haversine from tqdm.auto import tqdm from colour import Color from h3 import h3 from shapely.geometry import Polygon from shapely.ops import cascaded_union from ipywidgets import interact, fixed # - # Create an object of the `VedDB` type to interface with the database. db = VedDb() # ## Supporting Functions # # These are the supporting functions for the whole notebook. The `get_trajectories` function returns all the trips between the given cluster identifiers as a NumPy array of arrays. def get_trajectories(cluster_ini, cluster_end): sql = """ select vehicle_id , day_num , ts_ini , ts_end from move where cluster_ini = ? and cluster_end = ?;""" moves = db.query(sql, (cluster_ini, cluster_end)) sql = """ select latitude , longitude from signal where vehicle_id = ? and day_num = ? and time_stamp <= ? """ trajectories = [] for move in tqdm(moves): trajectory = db.query_df(sql, parameters=[move[0], move[1], move[3]]) \ .drop_duplicates(subset=["latitude", "longitude"]) \ .to_numpy() trajectories.append(trajectory) return trajectories # The function `get_cluster_hexes` retrieves from the databse all H3 indexes that make up a given cluster. def get_cluster_hexes(cluster_id): sql = "select h3 from cluster_point where cluster_id = ?" hexes = list({h[0] for h in db.query(sql, [cluster_id])}) return hexes # The `get_hexagon` function converts an H3 index to a geospatial polygon. def get_hexagon(h): geo_lst = list(h3.h3_to_geo_boundary(h)) geo_lst.append(geo_lst[0]) return np.array(geo_lst) # The `create_map_polygon` creates a Folium polygon. def create_map_polygon(xy, tooltip='', color='#3388ff', opacity=0.7, fill_color='#3388ff', fill_opacity=0.4, weight=3): points = [[x[0], x[1]] for x in xy] polygon = folium.vector_layers.Polygon(locations=points, tooltip=tooltip, fill=True, color=color, fill_color=fill_color, fill_opacity=fill_opacity, weight=weight, opacity=opacity) return polygon # The `create_map_polyline` creates a Folium map polyline def create_map_polyline(xy, tooltip='', color='#3388ff', opacity=0.7, weight=3): polyline = folium.vector_layers.PolyLine(locations=xy, tooltip=tooltip, color=color, weight=weight, opacity=opacity) return polyline # The `get_trajectory_group_bb` function calculates the bounding box for the group of trajectories reported by `get_trajectories`. def get_trajectory_group_bb(trjs): min_lat = min([t[:, 0].min() for t in trjs]) min_lon = min([t[:, 1].min() for t in trjs]) max_lat = max([t[:, 0].max() for t in trjs]) max_lon = max([t[:, 1].max() for t in trjs]) return [[min_lat, min_lon], [max_lat, max_lon]] # Given a set of HDBSCAN-calculated cluster identifiers, the `get_cluster_colors` function assigns a color to each of the clusters, excluding the outlier indicator (-1). def get_cluster_colors(clusters): blue = Color("blue") red = Color("red") ids = np.unique(clusters) if np.isin(-1, ids): color_range = list(blue.range_to(red, ids.shape[0] - 1)) else: color_range = list(blue.range_to(red, ids.shape[0])) return color_range # Creates an HTML Folium map. def create_map(): html_map = folium.Map(prefer_canvas=True, control_scale=True, max_zoom=18, tiles="cartodbpositron") return html_map # The `add_cluster_polygon` function adds the H3-based cluster geofence to an existing Folium map. def add_cluster_polygon(html_map, cluster_id): polygons = [] hexes = get_cluster_hexes(cluster_id) for h in hexes: points = get_hexagon(h) xy = [[x[1], x[0]] for x in points] xy.append([points[0][1], points[0][0]]) polygons.append(Polygon(xy)) merged = cascaded_union(polygons) if merged.geom_type == "MultiPolygon": max_len = 0 largest = None for geom in merged.geoms: xy = geom.exterior.coords.xy lxy = list(zip(xy[1], xy[0])) create_map_polygon(lxy, tooltip=str(cluster_id)).add_to(html_map) elif merged.geom_type == "Polygon": xy = merged.exterior.coords.xy lxy = list(zip(xy[1], xy[0])) create_map_polygon(lxy, tooltip=str(cluster_id)).add_to(html_map) return html_map # The `show_trajectory_group_map` displays all the trajectories in a map, optionally with clustering coloring information. def show_trajectory_group_map(html_map, trajectories, clusterer=None): cluster_colors = ['#3388ff'] if clusterer is not None: cluster_colors = get_cluster_colors(clusterer.labels_) for i in range(len(trajectories)): trajectory = trajectories[i] if clusterer is None: color = '#3388ff' else: color_idx = clusterer.labels_[i] if color_idx == -1 or clusterer.outlier_scores_[i] > 0.9: color = '#777777' else: color = cluster_colors[color_idx].hex polyline = create_map_polyline(trajectory, color=color) polyline.add_to(html_map) bound_box = get_trajectory_group_bb(trajectories) html_map.fit_bounds(bound_box) return html_map # The `show_single_trajectory` function displays a map with a single trajectory from the set of existing trajectories. This function is used below to help audit the individual trajectories. def show_single_trajectory(trajectories, trajectory_num): html_map = create_map() bound_box = get_trajectory_group_bb(trajectories) html_map.fit_bounds(bound_box) trajectory = trajectories[trajectory_num] color = '#3388ff' polyline = create_map_polyline(trajectory, color=color) polyline.add_to(html_map) return html_map # ## Trajectory Display # Here, we display all trajectories together in one Folium map. Use the variables below to set the starting cluster identifier and the ending cluster identifier. A nice query to extract promising pairs of clusters is this: # ``` # select * from ( # select cluster_ini # , cluster_end # , count(move_id) as move_count # from move # where cluster_ini <> -1 and # cluster_end <> -1 and # cluster_ini <> cluster_end # group by cluster_ini, cluster_end # ) tt order by tt.move_count desc; # ``` cluster_ini = 9 cluster_end = 6 # ## Display the Grouped Trajectories # # We start by displaying the collected trajectories on a map. For the trips starting at cluster 9 and ending at 6, there are two clear clusters of trajectories along with some outliers. trajectories = get_trajectories(cluster_ini, cluster_end) html_map = create_map() html_map = show_trajectory_group_map(html_map, trajectories) html_map = add_cluster_polygon(html_map, cluster_ini) html_map = add_cluster_polygon(html_map, cluster_end) html_map # ## Cluster the Trajectories # # To cluster the trajectories with HDBSCAN, we must first start by calculating the symmetric distance matrix between each pair of trajectories. The `calculate_distance_matrix` function does just that. def calculate_distance_matrix(trajectories): n_traj = len(trajectories) dist_mat = np.zeros((n_traj, n_traj), dtype=np.float64) dfd = FastDiscreteFrechetMatrix(earth_haversine) for i in range(n_traj - 1): p = trajectories[i] for j in range(i + 1, n_traj): q = trajectories[j] # Make sure the distance matrix is symmetric dist_mat[i, j] = dfd.distance(p, q) dist_mat[j, i] = dist_mat[i, j] return dist_mat dist_mat = calculate_distance_matrix(trajectories) # After calculating the distance matrix, we can now run it through the HDBSCAN algorithm and colect the calculated cluster identifiers. clusterer = hdbscan.HDBSCAN(metric='precomputed', min_cluster_size=2, min_samples=1, cluster_selection_methos='leaf') clusterer.fit(dist_mat) clusterer.labels_ # We can now display the colored version of the map above, by using the HDBSCAN-calculated cluster identifiers. Note that outlier trajectories are drawn in gray. html_map = create_map() html_map = show_trajectory_group_map(html_map, trajectories, clusterer) html_map = add_cluster_polygon(html_map, cluster_ini) html_map = add_cluster_polygon(html_map, cluster_end) html_map
09-cluster-trajectories.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbgrader={"grade": false, "locked": false, "solution": false} # # PROBABILITY THEORY # # BSC (HONS) Mathematics Department # BSC (HONS) Financial Mathematics Department # BSC (HONS) Computer Science with Data Analysis Department # # # ## Instructor # # Dr. <NAME> # Mathematics Department # School of Science, Engineering and Environment # The University of Salford # # # ## Lecture 4 # # ______________________________________________________________________________ # - # # Conditional Probability and Baye's Rule # # ## Problem # There are two urns $A$ and $B$. Urn $A$ contains $r_A$ red balls and $w_A$ white balls whereas urn $B$ contains $r_B$ red balls and $w_B$ white balls. One of the urns is picked at random and then one ball is picked at random from this urn. Here is a function **conditional_probability** that calculates the conditional probability that the randomly chosen ball belonged to urn $A$ given that it is white. Assume that $\frac{r_A}{w_A}\neq\frac{r_B}{w_B}$. # + # modify this cell def conditional__probability(rA, wA, rB, wB): # inputs: all of them are of type 'float' # output: a variable of type 'float' TERM1 = wA/(wA+rA) TERM2 = wB/(wB+rB) return TERM1/(TERM1+TERM2) # - conditional__probability(2., 4., 3., 3.)
Lecture 6 conditional probability and Bayes rule.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="4WYLR5vQcmx9" colab_type="text" # # Drug Sentiment Analysis # + [markdown] id="hyHH1hlycmyH" colab_type="text" # ## Problem Statement # The dataset provides patient reviews on specific drugs along with related conditions and a 10 star patient rating reflecting overall patient satisfaction. We have to create a target feature out of ratings and predict the sentiment of the reviews. # + [markdown] id="O42sXp6xcmyJ" colab_type="text" # ### Data Description : # # The data is split into a train (75%) a test (25%) partition. # # * drugName (categorical): name of drug # * condition (categorical): name of condition # * review (text): patient review # * rating (numerical): 10 star patient rating # * date (date): date of review entry # * usefulCount (numerical): number of users who found review useful # # The structure of the data is that a patient with a unique ID purchases a drug that meets his condition and writes a review and rating for the drug he/she purchased on the date. Afterwards, if the others read that review and find it helpful, they will click usefulCount, which will add 1 for the variable. # + [markdown] id="klKtZHRAfG3s" colab_type="text" # ### Import all the necessary packages # Here we have imported the basic packages that are required to do basic processing. Feel free to use any library that you think can be useful here. # + _uuid="39f7afa7-dce7-4db7-9a37-29b56845a928" _cell_guid="1958231a-20a7-4fbb-ab4e-6422838f6d8a" id="-JszlLjxcmya" colab_type="code" colab={} import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns # %matplotlib inline from matplotlib import style style.use('ggplot') # + [markdown] id="f_EFcuOSgBGD" colab_type="text" # ###Load Data # + _uuid="a75c997f-1fe8-4ffa-af75-be913ed573a9" _cell_guid="a4617fbc-5b2f-4a93-92c2-f4d20a4a1e0a" id="JQr7uyJAcmyi" colab_type="code" colab={} #load the train and test data test = pd.read_csv('mention-data-path') train = pd.read_csv('mention-data-path') # + [markdown] id="naGQpGoJh0a3" colab_type="text" # ###Checking Out The Data # + _uuid="b6ccc833-1706-4291-a3d0-7630f893c7a8" _cell_guid="2d4cc03e-3214-4903-b04b-4689c0d1e349" id="mopAIDJvcmyo" colab_type="code" colab={} #write code to check the head of train data # + _uuid="438d1681-f4e9-4477-9b8a-f758c1902bea" _cell_guid="668db8cc-6276-40d7-b505-745bb685b913" id="0zE_zp5kcmyv" colab_type="code" colab={} #Write code to check head of train data # + _uuid="04adfed9-ade0-478a-8ad7-336d2ca3f90e" _cell_guid="fb19bedf-6959-424e-85bb-743f202d0606" id="DznirfmAcmy4" colab_type="code" colab={} #check the shape of the given dataset #write code to check the shape of train data #write code to check the shape of test data # + _uuid="bd1556ef-e22d-43e3-b153-ddf714704496" _cell_guid="8408627d-46fb-4d37-8a84-fc5fea689ffc" id="GdiSXysQcmy-" colab_type="code" colab={} #write code to check the columns in train data # + [markdown] id="-z10BhItcmzF" colab_type="text" # ## Exploratory Data Analysis # + [markdown] id="M7IhoZRQkoCF" colab_type="text" # The purpose of EDA is to find out interesting insights and irregularities in our Dataset. We will look at Each feature and try to find out interesting facts and patterns from them. And see whether there is any relationship between the variables or not. # + [markdown] id="5qAbG0d1cmzG" colab_type="text" # Merge the train and test data as there are no target labels. We will perform our EDA and Pre-processing on merged data. Then we will divide training and testing # + _uuid="4194e136-3c1b-4f4f-81c9-fe385f48b759" _cell_guid="57677c7f-7eca-4f9e-89b0-601d238fa735" id="CT1pOSNocmzH" colab_type="code" colab={} #merge train and test data merge = [train,test] merged_data = #write code to merge the data # write code to check the shape of merged_data # + [markdown] id="fkpaFO3TcmzM" colab_type="text" # ### Check number of uniqueIds to see if there's any duplicate record in our dataset # + id="yGBfvbtXcmzN" colab_type="code" colab={} #write code to check the number of unique ids # + [markdown] id="roKmEDVpcmzU" colab_type="text" # ### Check information of the merged data # + id="bojYC67VcmzX" colab_type="code" colab={} #write code to check the information about merged data # + [markdown] id="jhjX32Gzcmzd" colab_type="text" # ### Check the Description # + id="nsDMm3xJcmze" colab_type="code" colab={} #write code to check the description of merged data '''Note - the description should also include all the categorical variables''' # + [markdown] id="YCzPxwq5cmzk" colab_type="text" # ### Check the Number of null values in each column # + id="ApRzC7M9cmzm" colab_type="code" colab={} #Write code to check null values in merged data # + [markdown] id="cAs_hl2moB-I" colab_type="text" # ### Check number of unique values in drugName and condition # + _uuid="8efe468d-c24d-42fa-b187-4e7cbef98067" _cell_guid="348f25b3-2f96-4e6c-9239-8e262b3ca921" id="XcFPC38icmzs" colab_type="code" colab={} #check number of unique values in drugName #check number of unique values in condition # + [markdown] id="zWo4HuOzcmz6" colab_type="text" # ### Check the top 20 conditions # + id="HDq40lymcmz7" colab_type="code" colab={} #plot a bargraph to check top 20 conditions plt.figure(figsize=(12,6)) #write your code to here # + [markdown] id="AbneidWBcm0C" colab_type="text" # ### Plot the bottom 20 conditions # + id="eA4P-gAicm0E" colab_type="code" colab={} #plot a bargraph to check bottom 20 conditions plt.figure(figsize=(12,6)) # write your code to check the bottom-20 conditions # + [markdown] id="0OcLFlQzcm0K" colab_type="text" # ### Check top 20 drugName # + id="UYJgT5p9cm0L" colab_type="code" colab={} #plot a bargraph to check top 20 drugName plt.figure(figsize=(12,6)) #write your code here # + [markdown] id="iaaBJHhfcm0Q" colab_type="text" # ### Check bottom 20 drugName # + id="gaoMxrF0cm0R" colab_type="code" colab={} #plot a bargraph to check top 20 drugName plt.figure(figsize=(12,6)) #write your code here # + [markdown] id="Js1xa2ePcm0X" colab_type="text" # ### Checking Ratings Distribution # + _uuid="631c7e75-ad6f-43d8-95f2-b613758ea6b5" _cell_guid="0c0bd489-5356-47b2-bfcd-4471fe17b588" id="v5HPfi0kcm0X" colab_type="code" colab={} #Write your code to get the value counts in descending order and reset the index as rating and counts # + _uuid="a305ebe8-08f2-4117-8122-efbca8f25045" _cell_guid="f374ff6c-3b26-4bce-837c-deeb3ef60efd" id="9zYfs3Ukcm0f" colab_type="code" colab={} # plot a bar chart to check the distribution of ratings # + [markdown] id="gs2HxyZycm0l" colab_type="text" # ### Check the distribution of usefulCount # + id="W1wOp6COcm0m" colab_type="code" colab={} #Write code to plot a distplot of usefulCount # + id="QAAD3ehQcm0v" colab_type="code" colab={} # Write code to plot a boxplot of usefulCount to see five number summary # + [markdown] id="jqj2LJ7xcm00" colab_type="text" # ### Check number of Drugs per condition # + _uuid="a416f8d1-f057-47c5-be82-37dd97633573" _cell_guid="63dbb417-f980-416f-afef-89b1a06985f6" id="rBveUSnScm01" colab_type="code" colab={} #lets check the number of drugs/condition #write code to check the number of drugs present per condition # + [markdown] _uuid="70a9564b-c106-476b-bdf2-5d0ad084f604" _cell_guid="0019994c-97fc-4eef-8052-29ee8b323379" id="eKlx6myLcm07" colab_type="text" # ##### Let's look at ''3 <_/span_> user found this comment helpful' in condtions # + _uuid="59915372-5594-45b7-81f0-6e4cb9241244" _cell_guid="ec1e82ad-f344-41e4-9dac-2b4bb8c26373" id="SG8dE5uCcm09" colab_type="code" colab={} span_data = #write code to get all the records which has conditions values following the pattern('</span>') #print span_data noisy_data_ = #Write code to check percentage span_data out of total records #print percentage of nosiy_data # + _uuid="a8bd5809-dbdb-4a3b-853c-d1cf34bd38d1" _cell_guid="ca811552-9985-46a2-b34a-b16d744f4aab" id="7vEA8jNpcm1D" colab_type="code" colab={} #Write code to drop the noisy data # + [markdown] _uuid="a733971c-4bc6-4f8e-976c-e0ef9f1e4342" _cell_guid="cf78388a-fb11-4333-a4ef-293f17777223" id="KEMbq8LJcm1H" colab_type="text" # ### Now let's look at the not listed/other # + _uuid="0fc68de2-2e80-4bfb-8e49-336b1b9661b5" _cell_guid="d79390e0-756f-4a66-bbda-45dac1a31579" id="u6sPIqehcm1I" colab_type="code" colab={} #Write code to check the percentage of 'not listed / othe' conditions in our dataset # + _uuid="98df698b-0529-446a-bc73-59433d85e586" _cell_guid="f4646fc8-8247-47e0-a037-8874a8e29893" id="iFKG5kUAcm1R" colab_type="code" colab={} #Write code to drop the records where Condition == ''not listed / othe'' # + [markdown] id="kj-JaZBRcm1b" colab_type="text" # ### Now Check number of drugs present per condition after removing noise # + _uuid="b109b30c-d3c0-491a-a077-a0683ba0cbac" _cell_guid="68be6ce0-e2f5-41ae-bca6-40a17f41803f" id="5PLu8Ekncm1e" colab_type="code" colab={} #lets check top-20 condition with higher number of drugs #write your code here to plot a bargraph to see the number of drugs per condition (top-20) # + [markdown] id="QEzBZCflcm1n" colab_type="text" # ### Check bottom 20 drugs per conditions # + _uuid="611efcc6-44b7-414d-b916-d4ebf5b1bced" _cell_guid="462e8087-ad2b-4209-a8e8-ad538caebcc4" id="tQdp4zHZcm1p" colab_type="code" colab={} #Write code to check the number of drugs per condtion bottom-20 # + [markdown] id="w6gUk0Cvcm1y" colab_type="text" # ### Now let's check if a single drug can be used for Multiple conditions # + _uuid="e988c512-fd86-4ec5-9c8f-0922e93fc53a" _cell_guid="54903d7b-e41a-46e0-b060-92e99dadd407" id="7uxdlxV6cm1z" colab_type="code" colab={} #let's check if a single drug is used for multiple conditions drug_multiple_cond = #Write code to get the drugName and for number of conditions it is used for print(drug_multiple_cond) # + [markdown] id="NhXKiI4rcm15" colab_type="text" # ### Check the number of drugs with rating 10 # + _uuid="a6ec5c5a-7138-400b-8967-33f96165b420" _cell_guid="61189e32-fe2d-49fe-9f23-d07bde7e07af" id="ddPUv8J8cm16" colab_type="code" colab={} #Write code to check the Number of drugs with rating 10. # + [markdown] id="rTqXlet7cm2H" colab_type="text" # ### Check number of drugs with rating 10 # + _uuid="1c614afc-779a-44c3-9df5-f7004c4a6f5b" _cell_guid="c283c949-2738-478d-898f-c26254f78d73" id="SFk_S3cRcm2I" colab_type="code" colab={} #Check top 20 drugs with rating=10/10 #Write code to check top-20 drugName with rating-10 # + [markdown] id="_eq-CLzzcm2a" colab_type="text" # ### Top 10 drugs with 1/10 Rating # + _uuid="76dc7ad7-a7b8-4cf6-a15e-12da70a1e98f" _cell_guid="ee58eaca-ab67-4923-a785-170dc603a4d3" id="fgYv_ilhcm2b" colab_type="code" colab={} #check top 20 drugs with 1/10 rating #Write your code to check the top-20 drugs with rating 1/10 # + [markdown] id="1Kzuzb09cm2m" colab_type="text" # ### Now we will look at the Date column # + _uuid="f4f672e4-f608-4687-9a3d-eca4dbdd2fef" _cell_guid="1e2bfea6-502c-4d99-87de-3b0b91a709fe" id="bUjmCjnNcm2n" colab_type="code" colab={} # convert date to datetime and create year andd month features merged_data['date'] = pd.to_datetime(merged_data['date']) merged_data['year'] = merged_data['date'].dt.year #create year merged_data['month'] = merged_data['date'].dt.month #create month # + [markdown] id="ZCw3puzHcm2r" colab_type="text" # ### Check Number of reviews per year # + id="YCyPBLrtcm2r" colab_type="code" colab={} #plot number of reviews year wise count_reviews = merged_data['year'].value_counts().sort_index() #plot a bargraph to check number of reviews per year # + [markdown] id="QwTXKojLcm25" colab_type="text" # ### Check average rating per year # + id="2j-cIxT5cm26" colab_type="code" colab={} #check average rating per year yearly_mean_rating = merged_data.groupby('year')['rating'].mean() #Write code to plot a bargraph showing average rating per year # + [markdown] id="w2vyLlI2cm3E" colab_type="text" # ### Per year drug count and Condition count # + id="9QTt3mrFcm3E" colab_type="code" colab={} year_wise_condition = merged_data.groupby('year')['condition'].nunique() #plot a bargraph to check the condtions per year # + id="P_ATgB1wcm3L" colab_type="code" colab={} #check drugs year wise year_wise_drug = merged_data.groupby('year')['drugName'].nunique() #plot a bargraph to check the drugName per year # + [markdown] id="nEpTmp5rcm3V" colab_type="text" # ## Data Pre-Processing # + [markdown] id="3IH8bI913OcH" colab_type="text" # Data Pre-processing is a vital part in model building. **"Garbage In Garbage Out"**, we all have heard this statement. But what does it mean. It means if we feed in garbage in our data like missing values, and different features which doesn't have any predictive power and provides the same information in our model. Our model will be just making a random guess and it won't be efficient enough for us to use it for any predictions. # + [markdown] id="v6WRL0tm4SlF" colab_type="text" # We will remove those unwanted features and noise from our data. We also know that we can only feed in numerical values in our model but here we have numerical as well as categorical features as well. We will transform those categorical features into numric values. # + id="vC4AiVv9cm3W" colab_type="code" colab={} # Write code to check the null values # + id="e0RuG0S1cm3p" colab_type="code" colab={} #Write code to drop the null values # + [markdown] id="tU76Kej5cm3s" colab_type="text" # ### Pre-Processing Reviews # + id="_X3Jy9HKcm3u" colab_type="code" outputId="d48554b9-9c87-4a12-fe66-bb4b044a9e0d" colab={} #check first three reviews for i in merged_data['review'][0:3]: print(i,'\n') # + [markdown] id="t8ybYT9pcm37" colab_type="text" # ### Steps for reviews pre-processing. # * **Remove HTML tags** # * Using BeautifulSoup from bs4 module to remove the html tags. We have already removed the html tags with pattern "64</_span_>...", we will use get_text() to remove the html tags if there are any. # * **Remove Stop Words** # * Remove the stopwords like "a", "the", "I" etc. # * **Remove symbols and special characters** # * We will remove the special characters from our reviews like '#' ,'&' ,'@' etc. # * **Tokenize** # * We will tokenize the words. We will split the sentences with spaces e.g "I might come" --> "I", "might", "come" # * **Stemming** # * Remove the suffixes from the words to get the root form of the word e.g 'Wording' --> "Word" # + id="sUlPPZxocm38" colab_type="code" colab={} #import the libraries for pre-processing from bs4 import BeautifulSoup import nltk import re from nltk.corpus import stopwords from nltk.stem.snowball import SnowballStemmer stops = set(stopwords.words('english')) #english stopwords stemmer = SnowballStemmer('english') #SnowballStemmer def review_to_words(raw_review): # 1. Delete HTML review_text = #Write code to delete all the html tags # 2. Make a space letters_only = #Write code to set spaces between words # 3. lower letters words = #write code to lower all the reviews and split it # 5. Stopwords meaningful_words = #Write code to remove stopwords # 6. Stemming stemming_words = #write code to apply stemming on meaningful_words # 7. space join words return( ' '.join(stemming_words)) # + id="-XKsRJ6xcm4O" colab_type="code" colab={} #apply review_to_words function on reviews # + [markdown] id="ow0D8sS2cm4a" colab_type="text" # ### Now we will create our target variable "Sentiment" from rating # + id="UEZq_H6acm4b" colab_type="code" colab={} #create sentiment feature from ratings #if rating > 5 sentiment = 1 (positive) #if rating < 5 sentiment = 0 (negative) #Write your code here # + [markdown] id="wj4FRLv7cm4g" colab_type="text" # ## Building Model # + id="ey3Sjckecm4h" colab_type="code" colab={} #import all the necessary packages from sklearn.model_selection import train_test_split #import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer #import TfidfVectorizer from sklearn.metrics import confusion_matrix #import confusion_matrix from sklearn.naive_bayes import MultinomialNB #import MultinomialNB from sklearn.ensemble import RandomForestClassifier #import RandomForestClassifier # + [markdown] id="W4q5AtMDcm4u" colab_type="text" # ### TfidfVectorizer (Term frequency - Inverse document frequency) # We all know that we cannot pass raw text features in our model. We have to convert them into numeric values. We will use TfidfVectorizer to convert our reviews in Vectors.\ # # **TF - Term Frequency** :- # # How often a term t occurs in a document d. # # TF = (_Number of occurences of a word in document_) / (_Number of words in that document_) # # **Inverse Document Frequency** # # IDF = log(Number of sentences / Number of sentence containing word) # # **Tf - Idf = Tf * Idf** # # + id="QtaEWDJPcm4v" colab_type="code" colab={} # Creates TF-IDF vectorizer and transforms the corpus vectorizer = TfidfVectorizer() reviews_corpus = #fit the vectorizer on reviews reviews_corpus.shape # + [markdown] id="Mhk23gVncm49" colab_type="text" # ### **Store Dependent feature in sentiment and split the Data into train and test** # + id="-Np32Fsrcm4-" colab_type="code" colab={} #dependent feature sentiment = #Write code to store target feature i.e sentiment in sentiment variable #write code to check the shape # + id="EryFSJ6Lcm5C" colab_type="code" colab={} #split the data in train and test X_train,X_test,Y_train,Y_test = #Write code to split data into training and testing (test_size = 0.33) #check shape of training set #check shape of testing set # + [markdown] id="K2llP8TCcm5F" colab_type="text" # ### Apply Multinomial Naive Bayes # + id="0aok9ovXcm5G" colab_type="code" colab={} #fit the model and predicct the output clf = MultinomialNB() #fit the training data pred = #predict the sentiment for test data #Write code to check accuracy #print confusion matrix # + [markdown] id="H0Edz8uscm5L" colab_type="text" # ### Apply RandomForest # + id="lVXXSF6Fcm5M" colab_type="code" colab={} #fit the model and predicct the output clf = RandomForestClassifier() #Write code to fit training data pred = # Predict the target labels #Write code to check accuracy #print confusion matrix # + [markdown] id="z9NrJLuGm0KC" colab_type="text" # ##Parameter Tuning # + id="kf79Ssu2cm5P" colab_type="code" colab={} #try different sets of parameters like n_estimators , max_depth, min_samples_leaf etc and choose the best set of parameters. # + [markdown] id="bPXRAreZ24ot" colab_type="text" # ## Conclusion # Write down your interpretations about your model and insights here. # + id="uSD_7mglP7WJ" colab_type="code" colab={}
Drug Sentiment Analysis/Drugs_Sentiment_Analysis_Template.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Implementation of the language models # + hide_input=true from fastai.gen_doc.nbdoc import * from fastai.text import * from fastai.text.models import * # - # [`text.models`](/text.models.html#text.models) module fully implements the encoder for an [AWD-LSTM](https://arxiv.org/pdf/1708.02182.pdf), the [transformer model](https://arxiv.org/abs/1706.03762) and the [transformer XL model](https://arxiv.org/abs/1901.02860). They can then plugged in with a decoder to make a language model, or some classifying layers to make a text classifier. # ## Language model modules # + hide_input=false show_doc(AWD_LSTM, title_level=3) # - # The main idea of the article is to use a [RNN](http://www.pnas.org/content/79/8/2554) with dropout everywhere, but in an intelligent way. There is a difference with the usual dropout, which is why you’ll see a [`RNNDropout`](/text.models.awd_lstm.html#RNNDropout) module: we zero things, as is usual in dropout, but we always zero the same thing according to the sequence dimension (which is the first dimension in pytorch). This ensures consistency when updating the hidden state through the whole sentences/articles. # # This being given, there are a total four different dropouts in the encoder of the AWD-LSTM: # # - the first one, embedding dropout, is applied when we look the ids of our tokens inside the embedding matrix (to transform them from numbers to a vector of float). We zero some lines of it, so random ids are sent to a vector of zeros instead of being sent to their embedding vector. This is the `embed_p` parameter. # - the second one, input dropout, is applied to the result of the embedding with dropout. We forget random pieces of the embedding matrix (but as stated in the last paragraph, the same ones in the sequence dimension). This is the `input_p` parameter. # - the third one is the weight dropout. It’s the trickiest to implement as we randomly replace by 0s some weights of the hidden-to-hidden matrix inside the RNN: this needs to be done in a way that ensure the gradients are still computed and the initial weights still updated. This is the `weight_p` parameter. # - the fourth one is the hidden dropout. It’s applied to the output of one of the layers of the RNN before it’s used as input of the next layer (again same coordinates are zeroed in the sequence dimension). It isn’t applied to the last output (which will get its own dropout in the decoder).This is the `hidden_p` parameter. # # The other attributes are `vocab_sz` for the number of tokens in your vocabulary, `emb_sz` for the embedding size, `n_hid` for the hidden size of your inner LSTMs (or QRNNs), `n_layers` the number of layers and `pad_token` for the index of an eventual padding token (1 by default in fastai). # # The flag `qrnn=True` replace the inner LSTMs by [QRNNs](https://arxiv.org/abs/1611.01576). # + hide_input=true show_doc(AWD_LSTM.reset) # + hide_input=false show_doc(Transformer, title_level=3) # - # The main idea of this article is to use regular neural net for NLP instead of an RNN, but with lots of attention layers. Intuitively, those attention layers tell the model to pay more interest to this or that world when trying to predict its output. # # It starts from embeddings from `vocab_sz` (number of tokens) to `d_model` (which is basically the hidden size throughout the model), and it will look at inputs of size batch_size by `ctx_len` (for context length). We add a positional encoding to the embeddings (since a regular neural net has no idea of the order of words), either learned or coming from [`PositionalEncoding`](/text.models.transformer.html#PositionalEncoding) depending on `learned_pos_enc`. We then have a dropout of `embed_p` followed by `n_layers` blocks of [`MultiHeadAttention`](/text.models.transformer.html#MultiHeadAttention) followed by [`feed_forward`](/text.models.transformer.html#feed_forward). # # In the attention we use `n_heads` with each a hidden state of `d_head` (will default to `d_model//n_heads`). If `mask=True`, a mask will make sure no attention is paid to future tokens (which would be cheating when training a language model). If `scale=True`, the attention scores are scaled by a factor `1 / math.sqrt(d_head)`. A dropout of `attn_p` is applied to the attention scores, then the final result get applied a dropout of `resid_p` before being summed to the original input (residual connection before the layer norm). # # In feed forward, we have two linear layers from `d_model` to `d_inner` and then back. Those have `bias` if that flag is `True` and a dropout of `ff_p` is applied, after each if `double_drop=True`, or just at the end otherwise. `act` is used in the middle as a non-linearity. # + hide_input=false show_doc(TransformerXL, title_level=3) # - # TransformerXL is a transformer architecture with a sort of hidden state formed by the results of the intermediate layers on previous tokens. Its size is determined by `mem_len`. By using this context, those models are capable of learning longer dependencies and can also be used for faster text generation at inference: a regular transformer model would have to reexamine the whole of sequence of indexes generated so far, whereas we can feed the new tokens one by one to a transformer XL (like we do with a regular RNN). # + hide_input=false show_doc(TransformerXL.reset) # - # ## Decoders # + hide_input=false show_doc(LinearDecoder, title_level=3) # - # Create a the decoder to go on top of an [`RNNCore`](/text.models.awd_lstm.html#RNNCore) encoder and create a language model. `n_hid` is the dimension of the last hidden state of the encoder, `n_out` the size of the output. Dropout of `output_p` is applied. If a `tie_encoder` is passed, it will be used for the weights of the linear layer, that will have `bias` or not. # + hide_input=false show_doc(PoolingLinearClassifier, title_level=3) # - # The last output, `MaxPooling` of all the outputs and `AvgPooling` of all the outputs are concatenated, then blocks of [`bn_drop_lin`](/layers.html#bn_drop_lin) are stacked, according to the values in [`layers`](/layers.html#layers) and `drops`. # ## Basic NLP modules # On top of the pytorch or the fastai [`layers`](/layers.html#layers), the language models use some custom layers specific to NLP. # + hide_input=true show_doc(EmbeddingDropout, title_level=3) # - # Each row of the embedding matrix has a probability `embed_p` of being replaced by zeros while the others are rescaled accordingly. enc = nn.Embedding(100, 7, padding_idx=1) enc_dp = EmbeddingDropout(enc, 0.5) tst_input = torch.randint(0,100,(8,)) enc_dp(tst_input) # + hide_input=true show_doc(RNNDropout, title_level=3) # - dp = RNNDropout(0.3) tst_input = torch.randn(3,3,7) tst_input, dp(tst_input) # + hide_input=true show_doc(WeightDropout, title_level=3) # - # Applies dropout of probability `weight_p` to the layers in `layer_names` of `module` in training mode. A copy of those weights is kept so that the dropout mask can change at every batch. module = nn.LSTM(5, 2) dp_module = WeightDropout(module, 0.4) getattr(dp_module.module, 'weight_hh_l0') # It's at the beginning of a forward pass that the dropout is applied to the weights. tst_input = torch.randn(4,20,5) h = (torch.zeros(1,20,2), torch.zeros(1,20,2)) x,h = dp_module(tst_input,h) getattr(dp_module.module, 'weight_hh_l0') # + hide_input=true show_doc(PositionalEncoding, title_level=3) # + hide_input=true show_doc(DecoderLayer, title_level=3) # + hide_input=true show_doc(MultiHeadAttention, title_level=3) # + hide_input=true show_doc(MultiHeadRelativeAttention, title_level=3) # + hide_input=true show_doc(SequentialRNN, title_level=3) # + hide_input=true show_doc(SequentialRNN.reset) # - # Call the `reset` function of [`self.children`](/torch_core.html#children) (if they have one). # + hide_input=true show_doc(dropout_mask) # - tst_input = torch.randn(3,3,7) dropout_mask(tst_input, (3,7), 0.3) # Such a mask is then expanded in the sequence length dimension and multiplied by the input to do an [`RNNDropout`](/text.models.awd_lstm.html#RNNDropout). # + hide_input=true show_doc(feed_forward) # - # ## Undocumented Methods - Methods moved below this line will intentionally be hidden # + hide_input=false show_doc(WeightDropout.forward) # + hide_input=false show_doc(EmbeddingDropout.forward) # + hide_input=false show_doc(RNNDropout.forward) # - show_doc(WeightDropout.reset) show_doc(PoolingLinearClassifier.forward) show_doc(LinearDecoder.forward) # ## New Methods - Please document or move to the undocumented section
docs_src/text.models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python (augur) # language: python # name: augur # --- # + import augur # import everything from githubapi.py and ghtorrent.py so we can # just copy and paste our function later import json import re from dateutil.parser import parse import pandas as pd import github import numpy as np import sqlalchemy as s import datetime import requests import time # %matplotlib inline # create an Augur application so we can test our function augur_app = augur.Application()('../augur.cfg') github = augurApp.github() ghtorrent = augurApp.ghtorrent() # - def code_reviews(self, owner, repo=None): url = 'https://api.github.com/repos/{}/{}/pulls'.format(owner,repo) json = requests.get(url, auth=('user', self.GITHUB_API_KEY)).json() dicts = [] pullNums = [] for item in json: info = {} #repoID info['pullNum'] = item['number'] info['state'] = item['state'] info['createdAt'] = item['created_at'] dicts.append(info) pullNums.append(item['number']) countReviews = [] for index in range(0,len(pullNums)): url2 = 'https://api.github.com/repos/{}/{}/pulls/{}/reviews'.format(owner,repo,pullNums[index]) j = requests.get(url2, auth=('user', self.GITHUB_API_KEY)).json() countReviews = np.append(countReviews, len(j)) return pd.DataFrame(dicts).join(pd.DataFrame(data=countReviews, columns=['num_reviews'])) # + # add our new function to the class augur.GitHubAPI.code_reviews = code_reviews # test our function on the initialized instance ld = github.code_reviews('rails', 'rails') # - ld ld['num_reviews'].plot() ld['deletions'].plot() # + ## GraphQL Explorer note { repository(owner: "OSSHealth", name: "augur") { pullRequests(first: 100, after: "Y3Vyc29yOnYyOpHOCy9BZQ==") { edges { cursor node { number createdAt reviews(first: 100) { edges { node { createdAt author { login } createdAt } } } } cursor } } } }
notebooks/code-reviews.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Neural Net # # ## Import # import the necessary packages from keras.models import Sequential from keras.layers.convolutional import Convolution2D from keras.layers.convolutional import MaxPooling2D from keras.layers.core import Activation from keras.layers.core import Flatten from keras.layers.core import Dense # ## Setting Up a Convolutional Neural Network class CNNet: @staticmethod def build(width, height, depth, classes, weightsPath=None): # initialize the model model = Sequential() # first set of CONV => RELU => POOL model.add(Convolution2D(20, 5, 5, border_mode="same", input_shape=(depth, height, width))) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # second set of CONV => RELU => POOL model.add(Convolution2D(50, 5, 5, border_mode="same")) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) # set of FC => RELU layers model.add(Flatten()) model.add(Dense(500)) model.add(Activation("relu")) # softmax classifier model.add(Dense(classes)) model.add(Activation("softmax")) # if a weights path is supplied (inicating that the model was # pre-trained), then load the weights if weightsPath is not None: model.load_weights(weightsPath) # return the constructed network architecture return model #
source/deep_learning/face_rating.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Imputing Values # # You now have some experience working with missing values, and imputing based on common methods. Now, it is your turn to put your skills to work in being able to predict for rows even when they have NaN values. # # First, let's read in the necessary libraries, and get the results together from what you achieved in the previous attempt. # + import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error import ImputingValues as t import seaborn as sns # %matplotlib inline df = pd.read_csv('./survey_results_public.csv') df.head() #Only use quant variables and drop any rows with missing values num_vars = df[['Salary', 'CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']] df_dropna = num_vars.dropna(axis=0) #Split into explanatory and response variables X = df_dropna[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']] y = df_dropna['Salary'] #Split into train and test X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state=42) lm_model = LinearRegression(normalize=True) # Instantiate lm_model.fit(X_train, y_train) #Fit #Predict and score the model y_test_preds = lm_model.predict(X_test) "The r-squared score for your model was {} on {} values.".format(r2_score(y_test, y_test_preds), len(y_test)) # - # #### Question 1 # # **1.** As you may remember from an earlier analysis, there are many more salaries to predict than the values shown from the above code. One of the ways we can start to make predictions on these values is by imputing items into the **X** matrix instead of dropping them. # # Using the **num_vars** dataframe drop the rows with missing values of the response (Salary) - store this new dataframe in **drop_sal_df**, then impute the values for all the other missing values with the mean of the column - store this in **fill_df**. # + drop_sal_df = num_vars.dropna(subset=['Salary']) #Drop the rows with missing salaries # test look drop_sal_df.head() # - #Check that you dropped all the rows that have salary missing t.check_sal_dropped(drop_sal_df) # + fill_mean = lambda col: col.fillna(col.mean()) fill_df = drop_sal_df.apply(fill_mean, axis=0) #Fill all missing values with the mean of the column. # test look fill_df.head() # - #Check your salary dropped, mean imputed datafram matches the solution t.check_fill_df(fill_df) # #### Question 2 # # **2.** Using **fill_df**, predict Salary based on all of the other quantitative variables in the dataset. You can use the template above to assist in fitting your model: # # * Split the data into explanatory and response variables # * Split the data into train and test (using seed of 42 and test_size of .30 as above) # * Instantiate your linear model using normalized data # * Fit your model on the training data # * Predict using the test data # * Compute a score for your model fit on all the data, and show how many rows you predicted for # # Use the tests to assure you completed the steps correctly. # + #Split into explanatory and response variables x = fill_df[['CareerSatisfaction', 'HoursPerWeek', 'JobSatisfaction', 'StackOverflowSatisfaction']] y = fill_df['Salary'] #Split into train and test x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30, random_state=42) #Predict and score the model lm_model = LinearRegression(normalize=True) lm_model.fit(x_train, y_train) y_test_preds = lm_model.predict(x_test) #Rsquared and y_test rsquared_score = r2_score(y_test, y_test_preds) #r2_score length_y_test = len(y_test) #num in y_test "The r-squared score for your model was {} on {} values.".format(rsquared_score, length_y_test) # - # Pass your r2_score, length of y_test to the below to check against the solution t.r2_y_test_check(rsquared_score, length_y_test) # This model still isn't great. Let's see if we can't improve it by using some of the other columns in the dataset.
lessons/CRISP_DM/Imputing Values.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:analysis] # language: python # name: conda-env-analysis-py # --- # # Calculate Equilibrium Climate Sensitivity for CMIP6 data using Gregory regression # ## Goal: Use Gregory regression to calculate ECS $\Delta T$ [Gregory et al 2004](https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2003GL018747) # # ### Using CESM2 from the CMIP analysis platform repository on glade # + # %matplotlib inline import numpy as np import warnings warnings.filterwarnings('ignore') import xarray as xr from matplotlib import pyplot as plt from scipy import stats plt.rcParams['figure.figsize'] = (8,5) # - # ### Open the surface temperature file for abrupt4xCO2 var="ts" file="/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/abrupt-4xCO2/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_abrupt-4xCO2_r1i1p1f1_gn_000101-015012.nc" ds = xr.open_dataset(file) # ### Get the cell area to calculate the area weights areads = xr.open_dataset("/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/abrupt-4xCO2/r1i1p1f1/fx/areacella/gn/latest/areacella_fx_CESM2_abrupt-4xCO2_r1i1p1f1_gn.nc") weight=areads.areacella / areads.areacella.sum(dim=('lon','lat')) weight.sum(dim=('lon','lat')) # ### Calculate the global mean tsGm=(np.tile(weight,[len(ds.time),1,1])*ds.ts).sum(dim=('lat','lon')) tsGm.plot() # ### Calculate the annual mean tsGm_annual = tsGm.resample(time='A',label='left',loffset='6M').mean(dim='time') tsGm.plot() tsGm_annual.plot(marker='o') # ### Get and calculate net TOA fluxes # OLR var="rlut" filep="/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/abrupt-4xCO2/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_abrupt-4xCO2_r1i1p1f1_gn_000101-015012.nc" dsp = xr.open_dataset(filep) olrGm=(np.tile(weight,[len(dsp.time),1,1])*dsp.rlut).sum(dim=('lat','lon')) # SW d var="rsdt" filep="/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/abrupt-4xCO2/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_abrupt-4xCO2_r1i1p1f1_gn_000101-015012.nc" dsp = xr.open_dataset(filep) swdGm=(np.tile(weight,[len(dsp.time),1,1])*dsp.rsdt).sum(dim=('lat','lon')) # SW u var="rsut" filep="/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/abrupt-4xCO2/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_abrupt-4xCO2_r1i1p1f1_gn_000101-015012.nc" dsp = xr.open_dataset(filep) swuGm=(np.tile(weight,[len(dsp.time),1,1])*dsp.rsut).sum(dim=('lat','lon')) olrGm_annual = olrGm.resample(time='A',label='left',loffset='6M').mean(dim='time') swdGm_annual = swdGm.resample(time='A',label='left',loffset='6M').mean(dim='time') swuGm_annual = swuGm.resample(time='A',label='left',loffset='6M').mean(dim='time') N = swdGm_annual - swuGm_annual - olrGm_annual N.plot() # + # Create a figure fig = plt.figure(figsize=(10, 6)) # Ask, out of a 1x1 grid, the first axes. ax = fig.add_subplot(1, 1, 1) # Plot times as x-variable and temperatures as y-variable ax.plot(tsGm_annual,N,marker='o',linestyle='None',color='k') ax.set_xlabel('Temperature [K]') ax.set_ylabel('Net TOA flux [W/m2]') ax.set_title('CESM2', fontdict={'size':16}) # - # ### Now get the baseline data from the preindustrial control sims experiment='piControl' var="ts" ds_ts_pi_all = xr.open_mfdataset("/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/"+experiment+"/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_"+experiment+"_r1i1p1f1_gn_*.nc") endtimes=ds_ts_pi_all.time[-(12*30):] ts_pi=ds_ts_pi_all.ts.sel(time=slice(endtimes[0],endtimes[-1])).mean(dim=('time')) ts_pi_gm=(ts_pi*weight).sum(dim=('lat','lon')) ts_pi_gm.values experiment='piControl' var="rlut" ds_olr_pi_all = xr.open_mfdataset("/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/"+experiment+"/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_"+experiment+"_r1i1p1f1_gn_*.nc") olr_pi=ds_olr_pi_all.rlut.sel(time=slice(endtimes[0],endtimes[-1])).mean(dim=('time')) olr_pi_gm=(olr_pi*weight).sum(dim=('lat','lon')) olr_pi_gm.values var="rsut" ds_rsut_pi_all = xr.open_mfdataset("/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/"+experiment+"/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_"+experiment+"_r1i1p1f1_gn_*.nc") var="rsdt" ds_rsdt_pi_all = xr.open_mfdataset("/glade/collections/cmip/CMIP6/CMIP/NCAR/CESM2/"+experiment+"/r1i1p1f1/Amon/"+var+"/gn/latest/"+var+"_Amon_CESM2_"+experiment+"_r1i1p1f1_gn_*.nc") # + rsut_pi=ds_rsut_pi_all.rsut.sel(time=slice(endtimes[0],endtimes[-1])).mean(dim=('time')) rsut_pi_gm=(rsut_pi*weight).sum(dim=('lat','lon')) rsut_pi_gm.values rsdt_pi=ds_rsdt_pi_all.rsdt.sel(time=slice(endtimes[0],endtimes[-1])).mean(dim=('time')) rsdt_pi_gm=(rsdt_pi*weight).sum(dim=('lat','lon')) rsdt_pi_gm.values # - N_pi = rsdt_pi_gm - rsut_pi_gm - olr_pi_gm N_pi.values # + # Create a figure fig = plt.figure(figsize=(10, 6)) # Ask, out of a 1x1 grid, the first axes. ax = fig.add_subplot(1, 1, 1) # Plot times as x-variable and temperatures as y-variable ax.plot(tsGm_annual-ts_pi_gm,N-N_pi,marker='o',linestyle='None',color='k') ax.set_xlabel('Temperature Change [K]') ax.set_ylabel('Change in Net TOA flux [W/m2]') ax.set_title('CESM2', fontdict={'size':16}) # - dt = tsGm_annual - ts_pi_gm dN = N - N_pi # ### Add linear regression # Two methods, both from [Jen Kay's class](https://github.com/jenkayco/ATOC7500ObjectiveDataAnalysis/blob/master/ATOC7500_applicationlab2_AR1_regression_AO.ipynb) ### Calculate the correlation statistics - slow way, but convenient slope, intercept, r_value, p_value, std_err = stats.linregress(dt,dN) print('scipy.stats.linregress slope: ',round(slope,3)) print('scipy.stats.linregress intercept: ',round(intercept,3)) print('scipy.stats.linregress r_value: ',round(r_value,3)) # from https://plot.ly/matplotlib/linear-fits/ xi = np.array([0,dt.max()]) line = slope*xi+intercept ECS = -intercept / slope print(ECS) # ### ECS for 2xCO2, for comparison to 5.3 K # Assume 2xCO2 F=3.8 W/m2 : NOTE - this is NOT the current standard practice! ECS2x = -3.8 / slope print(ECS2x) # Divide 4xCO2 ECS by 2 - this IS the current standard practice! ECS2x = ECS / 2 print(ECS2x) # + # Create a figure fig = plt.figure(figsize=(10, 6)) # Ask, out of a 1x1 grid, the first axes. ax = fig.add_subplot(1, 1, 1) # Plot times as x-variable and temperatures as y-variable ax.plot(tsGm_annual-ts_pi_gm,N-N_pi,marker='o',linestyle='None',color='k') ax.set_xlabel('Temperature Change [K]') ax.set_ylabel('Change in Net TOA flux [W/m2]') ax.set_title('CESM2', fontdict={'size':16}) # heres the regression line plt.plot([0,ECS],[intercept,0]) xlims=plt.xlim() plt.xlim(0,xlims[1]) ylims=plt.ylim() plt.ylim(0,ylims[1]) # - # # ECS (to 4xCO2) ECS
cmip6dpdt_pendergrass/cesm2gregory_dpdt_abrupt4xCO2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + deletable=true editable=true import os import ipywidgets as widgets import tensorflow as tf from IPython import display from dragnn.protos import spec_pb2 from dragnn.python import graph_builder from dragnn.python import spec_builder from dragnn.python import load_dragnn_cc_impl # This loads the actual op definitions from dragnn.python import render_parse_tree_graphviz from dragnn.python import visualization from google.protobuf import text_format from syntaxnet import load_parser_ops # This loads the actual op definitions from syntaxnet import sentence_pb2 from syntaxnet.ops import gen_parser_ops from tensorflow.python.platform import tf_logging as logging def load_model(base_dir, master_spec_name, checkpoint_name): # Read the master spec master_spec = spec_pb2.MasterSpec() with open(os.path.join(base_dir, master_spec_name), "r") as f: text_format.Merge(f.read(), master_spec) spec_builder.complete_master_spec(master_spec, None, base_dir) logging.set_verbosity(logging.WARN) # Turn off TensorFlow spam. # Initialize a graph graph = tf.Graph() with graph.as_default(): hyperparam_config = spec_pb2.GridPoint() builder = graph_builder.MasterBuilder(master_spec, hyperparam_config) # This is the component that will annotate test sentences. annotator = builder.add_annotation(enable_tracing=True) builder.add_saver() # "Savers" can save and load models; here, we're only going to load. sess = tf.Session(graph=graph) with graph.as_default(): #sess.run(tf.global_variables_initializer()) #sess.run('save/restore_all', {'save/Const:0': os.path.join(base_dir, checkpoint_name)}) builder.saver.restore(sess, os.path.join(base_dir, checkpoint_name)) def annotate_sentence(sentence): with graph.as_default(): return sess.run([annotator['annotations'], annotator['traces']], feed_dict={annotator['input_batch']: [sentence]}) return annotate_sentence segmenter_model = load_model("data/en/segmenter", "spec.textproto", "checkpoint") parser_model = load_model("data/en", "parser_spec.textproto", "checkpoint") # + deletable=true editable=true def annotate_text(text): sentence = sentence_pb2.Sentence( text=text, token=[sentence_pb2.Token(word=text, start=-1, end=-1)] ) # preprocess with tf.Session(graph=tf.Graph()) as tmp_session: char_input = gen_parser_ops.char_token_generator([sentence.SerializeToString()]) preprocessed = tmp_session.run(char_input)[0] segmented, _ = segmenter_model(preprocessed) annotations, traces = parser_model(segmented[0]) assert len(annotations) == 1 assert len(traces) == 1 return sentence_pb2.Sentence.FromString(annotations[0]), traces[0] annotate_text("John is eating pizza with a fork"); None # just make sure it works # + [markdown] deletable=true editable=true # # Interactive parse tree explorer # Run the cell below, and then enter text in the interactive widget. # + deletable=true editable=true def _parse_tree_explorer(): # put stuff in a function to not pollute global scope text = widgets.Text("John is eating pizza with anchovies") # Also try: John is eating pizza with a fork display.display(text) html = widgets.HTML() display.display(html) def handle_submit(sender): del sender # unused parse_tree, trace = annotate_text(text.value) html.value = u""" <div style="max-width: 100%">{}</div> <style type="text/css">svg {{ max-width: 100%; }}</style> """.format(render_parse_tree_graphviz.parse_tree_graph(parse_tree)) text.on_submit(handle_submit) _parse_tree_explorer() # + [markdown] deletable=true editable=true # # Interactive trace explorer # Run the cell below, and then enter text in the interactive widget. # + deletable=true editable=true def _trace_explorer(): # put stuff in a function to not pollute global scope text = widgets.Text("John is eating pizza with anchovies") display.display(text) output = visualization.InteractiveVisualization() display.display(display.HTML(output.initial_html())) def handle_submit(sender): del sender # unused parse_tree, trace = annotate_text(text.value) display.display(display.HTML(output.show_trace(trace))) text.on_submit(handle_submit) _trace_explorer()
syntaxnet/examples/dragnn/interactive_text_analyzer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # defining years and population lists year = [1950, 1960, 1970, 1980, 1990, 2000, 2010, 2020] population = [2.53, 2.8, 3, 4.1, 5.5, 6.1, 7, 7.9] # Print the last item from year and pop print(year[-1]) print(population[-1]) # Import matplotlib.pyplot as plt import matplotlib.pyplot as plt # Make a line plot: year on the x-axis, pop on the y-axis plt.plot(year, population) # Display the plot with plt.show() plt.show()
introduction/matlab_plot/jupyter/1.line_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.6 64-bit (''test2'': conda)' # language: python # name: python37664bittest2conda430366e329e54cb68cfb37f49729e20f # --- # # Classification Shap # + from BorutaShap import BorutaShap, load_data X, y = load_data(data_type='classification') X.head() # + # no model selected default is Random Forest, if classification is False it is a Regression problem Feature_Selector = BorutaShap(importance_measure='shap', classification=True) Feature_Selector.fit(X=X, y=y, n_trials=100, random_state=0) # - # Returns Boxplot of features Feature_Selector.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all') # as 3 attributes are still undecided we have two choices either increase the n_trials or use the TentativeRoughFix() # method which compares the median values of the max shadow feature and the undecided features. Feature_Selector.TentativeRoughFix() # Returns Boxplot of features Feature_Selector.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all') # Returns a subset of the original data with the selected features subset = Feature_Selector.Subset() subset.head() # # Classification Shap Sampling # + # no model selected default is Random Forest, if classification is False it is a Regression problem Feature_Selector = BorutaShap(importance_measure='shap', classification=True) Feature_Selector.fit(X=X, y=y, n_trials=100, random_state=0, sample=True) # - # Returns Boxplot of features Feature_Selector.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all') # as 4 attributes are still undecided we have two choices either increase the n_trials or use the TentativeRoughFix() # method which compares the median values of the max shadow feature and the undecided features. Feature_Selector.TentativeRoughFix() # Returns Boxplot of features Feature_Selector.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all') # # Classification Gini # + # no model selected default is Random Forest, if classification is False it is a Regression problem Feature_Selector = BorutaShap(importance_measure='gini', classification=True) Feature_Selector.fit(X=X, y=y, n_trials=100, random_state=0) # - # Returns Boxplot of features Feature_Selector.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all') # as 2 attributes are still undecided we have two choices either increase the n_trials or use the TentativeRoughFix() # method which compares the median values of the max shadow feature and the undecided features. Feature_Selector.TentativeRoughFix() # Returns Boxplot of features Feature_Selector.plot(X_size=12, figsize=(12,8), y_scale='log', which_features='all')
Examples/Classification.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # Kernel Density Estimation and histograms # * Histograms are non-parametric method (i.e. they do not make assumptions on the underlying distribution of the data) to study discrete data. # # * KDE is a method to interpret those data by applying a kernel of various sizes and adding the overlapping points. We can use: boxy, Gaussian etc kernels. # # * The appropriate bin-size of the histograms is given by: # * Freedaman-Diaconis rule (good for univariate, outliers| bad for uniform and multivariate) # * Scott+92 (assuming normal distribution, bad with outliers) # * Shimazaki+07 (assumed Poissonian distribution) # * Knuth+06 (uses Bayesian inference) # # Below we have an example from https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/ # + # Importing Libraries from sklearn.neighbors import KernelDensity from scipy.stats import gaussian_kde from statsmodels.nonparametric.kde import KDEUnivariate from statsmodels.nonparametric.kernel_density import KDEMultivariate import numpy as np import matplotlib.pyplot as plt from scipy.stats.distributions import norm from sklearn.grid_search import GridSearchCV # %matplotlib inline import warnings warnings.filterwarnings('ignore') print "Package Versions:" import sklearn; print " scikit-learn:", sklearn.__version__ import scipy; print " scipy:", scipy.__version__ import statsmodels; print " statsmodels:", statsmodels.__version__ # + # Define various KDE functions def kde_statsmodels_u(x, x_grid, bandwidth=0.2, **kwargs): """Univariate Kernel Density Estimation with Statsmodels""" kde = KDEUnivariate(x) kde.fit(bw=bandwidth, **kwargs) return kde.evaluate(x_grid) def kde_statsmodels_m(x, x_grid, bandwidth=0.2, **kwargs): """Multivariate Kernel Density Estimation with Statsmodels""" kde = KDEMultivariate(x, var_type='c', bw=bandwidth * np.ones_like(x), **kwargs) return kde.pdf(x_grid) def kde_scipy(x, x_grid, bandwidth=0.2, **kwargs): """Kernel Density Estimation with Scipy""" # Note that scipy weights its bandwidth by the covariance of the # input data. To make the results comparable to the other methods, # we divide the bandwidth by the sample standard deviation here. kde = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs) return kde.evaluate(x_grid) def kde_sklearn(x, x_grid, bandwidth=0.2, **kwargs): """Kernel Density Estimation with Scikit-learn""" kde_skl = KernelDensity(bandwidth=bandwidth, **kwargs) kde_skl.fit(x[:, np.newaxis]) # score_samples() returns the log-likelihood of the samples log_pdf = kde_skl.score_samples(x_grid[:, np.newaxis]) return np.exp(log_pdf) kde_funcs = [kde_statsmodels_u, kde_scipy, kde_sklearn] kde_funcnames = ['Statsmodels-U', 'Scipy', 'Scikit-learn'] # kde_funcs = [kde_statsmodels_u, kde_statsmodels_m, kde_scipy, kde_sklearn] # kde_funcnames = ['Statsmodels-U', 'Statsmodels-M', 'Scipy', 'Scikit-learn'] # + # The grid we'll use for plotting x_grid = np.linspace(-4.5, 3.5, 1000) # Draw points from a bimodal distribution in 1D np.random.seed(0) x = np.concatenate([norm(-1, 1.).rvs(400), norm(1, 0.3).rvs(100)]) pdf_true = (0.8 * norm(-1, 1).pdf(x_grid) + 0.2 * norm(1, 0.3).pdf(x_grid)) # Plot the three kernel density estimates fig, ax = plt.subplots(1, 3, sharey=True, figsize=(13, 3)) # fig, ax = plt.subplots(1, 4, sharey=True, # figsize=(13, 3)) fig.subplots_adjust(wspace=0) for i in range(len(kde_funcs)): pdf = kde_funcs[i](x, x_grid, bandwidth=0.2) ax[i].plot(x_grid, pdf, color='blue', alpha=0.5, lw=3) ax[i].fill(x_grid, pdf_true, ec='gray', fc='gray', alpha=0.4) ax[i].set_title(kde_funcnames[i]) ax[i].set_xlim(-4.5, 3.5) from IPython.display import HTML HTML("<font color='grey'>Gray = True underlying distribution</font><br>" "<font color='blue'>Blue = KDE model distribution (500 pts)</font>") # - # The selection of bandwidth is an important piece of KDE. For the same input data, different bandwidths can produce very different results: fig, ax = plt.subplots() for bandwidth in [0.1, 0.3, 1.0,10]: ax.plot(x_grid, kde_sklearn(x, x_grid, bandwidth=bandwidth), label='bw={0}'.format(bandwidth), linewidth=3, alpha=0.5) ax.hist(x, 30, fc='gray', histtype='stepfilled', alpha=0.3, normed=True) ax.set_xlim(-4.5, 3.5) ax.legend(loc='upper left') # So to select the right bandwidth we use the cross validation within Scikit-learn which is straightforward with the GridSearchCV meta-estimator: # + grid = GridSearchCV(KernelDensity(), {'bandwidth': np.linspace(0.1, 1.0, 30)}, cv=20) # 20-fold cross-validation grid.fit(x[:, None]) print grid.best_params_ kde = grid.best_estimator_ pdf = np.exp(kde.score_samples(x_grid[:, None])) fig, ax = plt.subplots() ax.plot(x_grid, pdf, linewidth=3, alpha=0.5, label='bw=%.2f' % kde.bandwidth) ax.hist(x, 30, fc='gray', histtype='stepfilled', alpha=0.3, normed=True) ax.legend(loc='upper left') ax.set_xlim(-4.5, 3.5); # - # # Example # Check out this site for comparison of KDE implementations in python: # https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/ # + # Load in stellar radius data from the previous catalog of exoplanets x = np.genfromtxt('data/J_ApJ_770_69/table2.dat', delimiter = '|', usecols = (12,13)) plt.hist2d(x[:,0],x[:,1],bins=10) plt.xlabel('R_planet') plt.ylabel('R_star') plt.show() # -
kde_histograms.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/michelucci/oreilly-london-ai/blob/master/day2/Transfer%20Learning/Transfer_learning_with_keras_with_files.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="mEFIpv0gq-Ng" colab_type="text" # # Transfer Learning with Keras # ## Using Files # # (C) 2019 <NAME> # # www.toelt.ai # # Reference: https://towardsdatascience.com/a-comprehensive-hands-on-guide-to-transfer-learning-with-real-world-applications-in-deep-learning-212bf3b2f27a # + id="E_NNB_7Oq-Ni" colab_type="code" colab={} import tensorflow as tf from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions import numpy as np # + id="DCdKBPAdq-Nm" colab_type="code" colab={} import numpy as np import os import shutil import glob np.random.seed(42) # + id="09gyHXh-q-Np" colab_type="code" colab={} import matplotlib.pyplot as plt from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array, array_to_img # %matplotlib inline # + [markdown] id="nUvmdvXZq-Nr" colab_type="text" # ## Load the data # + [markdown] id="Qb2HGJZlrMhh" colab_type="text" # **NOTE:** The 2,000 images used in this exercise are excerpted from the ["Dogs vs. Cats" dataset](https://www.kaggle.com/c/dogs-vs-cats/data) available on Kaggle, which contains 25,000 images. Here, we use a subset of the full dataset to decrease training time for educational purposes. [Source of the next few cells: google] # + id="j0NH_T_frFLz" colab_type="code" outputId="a2c5349b-1a3f-4fd7-cb06-5b76caa55339" colab={"base_uri": "https://localhost:8080/", "height": 204} # !wget --no-check-certificate \ # https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip \ # -O /tmp/cats_and_dogs_filtered.zip # + id="cEDu8in5rHy7" colab_type="code" colab={} import os import zipfile local_zip = '/tmp/cats_and_dogs_filtered.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('/tmp') zip_ref.close() # + id="TqwZUCviuxAW" colab_type="code" outputId="daf2f0a2-bd0c-4539-c8b7-ff45b3f05880" colab={"base_uri": "https://localhost:8080/", "height": 63} # !ls /tmp # + [markdown] id="CPAmaDfprXO1" colab_type="text" # The contents of the .zip are extracted to the base directory `/tmp/cats_and_dogs_filtered`, which contains `train` and `validation` subdirectories for the training and validation datasets , which in turn each contain `cats` and `dogs` subdirectories. Let's define each of these directories: # + id="nLZq17KR1SZb" colab_type="code" outputId="ce448104-41dc-460e-f4f6-ad8d8c121734" colab={"base_uri": "https://localhost:8080/", "height": 63} # !ls /tmp/cats_and_dogs_filtered/train # + id="B2pXFkqTrZzX" colab_type="code" colab={} base_dir = '/tmp/cats_and_dogs_filtered' train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') # Directory with our training cat pictures train_cats_dir = os.path.join(train_dir, 'cats') # Directory with our training dog pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # Directory with our validation cat pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # Directory with our validation dog pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # + id="miJJZ6VGrd6o" colab_type="code" outputId="dca380c3-6416-4da0-d3fa-2995115ed97b" colab={"base_uri": "https://localhost:8080/", "height": 76} train_cat_fnames = os.listdir(train_cats_dir) print (train_cat_fnames[:10]) train_dog_fnames = os.listdir(train_dogs_dir) train_dog_fnames.sort() print (train_dog_fnames[:10]) validation_cat_fnames = os.listdir(validation_cats_dir) validation_dog_fnames = os.listdir(validation_dogs_dir) # + id="hyiiiki8rlZS" colab_type="code" outputId="e826e3f6-6dbd-4f3c-9c13-15ce0ed82e3c" colab={"base_uri": "https://localhost:8080/", "height": 85} print ('total training cat images:', len(os.listdir(train_cats_dir))) print ('total training dog images:', len(os.listdir(train_dogs_dir))) print ('total validation cat images:', len(os.listdir(validation_cats_dir))) print ('total validation dog images:', len(os.listdir(validation_dogs_dir))) # + id="HzM93plMsIyT" colab_type="code" outputId="943295da-1419-4b17-f379-bef65989bc4b" colab={"base_uri": "https://localhost:8080/", "height": 34} cat_train = os.listdir(train_cats_dir) dog_train = os.listdir(train_dogs_dir) cat_val = os.listdir(validation_cats_dir) dog_val = os.listdir(validation_dogs_dir) # Example: let's print the name of some of the files print(cat_train[0:3]) # + id="b3H8CXbz1hx0" colab_type="code" colab={} train_files = np.concatenate([[train_dogs_dir+'/' + x for x in train_dog_fnames], [train_cats_dir+'/' + x for x in train_cat_fnames]]) validation_files = np.concatenate([[validation_dogs_dir+'/' + x for x in validation_dog_fnames], [validation_cats_dir+'/' + x for x in validation_cat_fnames]]) # + id="hCyml-aw2Hom" colab_type="code" outputId="7a56b623-ca4b-430c-9739-008f900f37e9" colab={"base_uri": "https://localhost:8080/", "height": 51} print(train_files.shape) print(validation_files.shape) # + id="SkP9lbcY97co" colab_type="code" outputId="03c5a922-39df-4ab2-a3b0-19a660823130" colab={"base_uri": "https://localhost:8080/", "height": 68} train_files[0:3] # + id="B-Vnlo7s3MAv" colab_type="code" colab={} train_dir = '/tmp/tr/' validation_dir = '/tmp/va/' os.mkdir(train_dir) if not os.path.isdir(train_dir) else None os.mkdir(validation_dir) if not os.path.isdir(validation_dir) else None for fn in train_files: shutil.copy(fn, train_dir) for fn in validation_files: shutil.copy(fn, validation_dir) # + [markdown] id="Pjacz2blq-N0" colab_type="text" # Let's now read the files # + id="y4I7AdhMq-N1" colab_type="code" outputId="06a1e8f6-0af5-4845-f4d2-5946a7859401" colab={"base_uri": "https://localhost:8080/", "height": 34} IMG_DIM = (150, 150) train_files = glob.glob(train_dir + '/*') train_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in train_files] train_imgs = np.array(train_imgs) train_labels = [fn.split('/')[3].split('.')[0].strip() for fn in train_files] # MAC '/', WINDOWS '\\' validation_files = glob.glob(validation_dir + '/*') validation_imgs = [img_to_array(load_img(img, target_size=IMG_DIM)) for img in validation_files] validation_imgs = np.array(validation_imgs) validation_labels = [fn.split('/')[3].split('.')[0].strip() for fn in validation_files] print('Train dataset shape:', train_imgs.shape, '\tValidation dataset shape:', validation_imgs.shape) # + id="noDfwOtj-8Qt" colab_type="code" outputId="ce8c175b-5b88-4e88-d5e9-66fcc396152a" colab={"base_uri": "https://localhost:8080/", "height": 34} train_labels[0:3] # + id="9apYKetM-Hq_" colab_type="code" outputId="f86ea5ff-14eb-4ee0-c5ea-e7c1182bb44c" colab={"base_uri": "https://localhost:8080/", "height": 184} train_imgs_scaled = train_imgs.astype('float32') validation_imgs_scaled = validation_imgs.astype('float32') train_imgs_scaled /= 255 validation_imgs_scaled /= 255 print(train_imgs[3].shape) array_to_img(train_imgs[3]) # + id="xENlj0LntqE3" colab_type="code" outputId="fb062326-ea3e-4a62-813f-f760eea8c8e2" colab={"base_uri": "https://localhost:8080/", "height": 34} batch_size = 30 num_classes = 2 epochs = 10 input_shape = (150, 150, 3) # encode text category labels from sklearn.preprocessing import LabelEncoder le = LabelEncoder() le.fit(train_labels) train_labels_enc = le.transform(train_labels) validation_labels_enc = le.transform(validation_labels) print(train_labels[10:15], train_labels_enc[10:15]) # + [markdown] id="fDdQPMgWq-N9" colab_type="text" # # CNN Model # + id="vyeq5UVLq-N9" colab_type="code" outputId="11633e93-6d8b-4f8c-9d4a-bc7f69aec2e3" colab={"base_uri": "https://localhost:8080/", "height": 581} from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout from tensorflow.keras.models import Sequential from tensorflow.keras import optimizers model = Sequential() model.add(Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(), metrics=['accuracy']) model.summary() # + id="jO-fbQz4uGT9" colab_type="code" outputId="dd90b323-b30e-46bc-bdd3-a21ef9b2656a" colab={"base_uri": "https://localhost:8080/", "height": 374} history = model.fit(x=train_imgs_scaled, y=train_labels_enc, validation_data=(validation_imgs_scaled, validation_labels_enc), batch_size=batch_size, epochs=epochs, verbose=1) # + [markdown] id="pzBnFK8QwPOu" colab_type="text" # Note that we are extremly overfitting here! Check how the training accuracy is almost 100% while the validation accuracy is around 73%. Overfitting is typically one of the main problems when training big neural networks. # + id="OfJQ5guiq-OA" colab_type="code" outputId="9cac430f-db2c-4a64-8ca3-6217d31aa5b9" colab={"base_uri": "https://localhost:8080/", "height": 308} f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Basic CNN Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,10+1)) ax1.plot(epoch_list, history.history['acc'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 10+1, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 10+1, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') l2 = ax2.legend(loc="best") # + colab_type="code" id="I6OX4rTbwrUH" colab={} input_shape = (150, 150, 3) # + id="TnB7kM8Oq-OF" colab_type="code" outputId="f2318312-0ef5-4d93-d7bd-3f3d73a5f709" colab={"base_uri": "https://localhost:8080/", "height": 728} from tensorflow.keras.applications import vgg16 from tensorflow.keras.models import Model import tensorflow.keras as keras vgg = vgg16.VGG16(include_top=False, weights='imagenet', input_shape=input_shape) output = vgg.layers[-1].output output = keras.layers.Flatten()(output) vgg_model = Model(vgg.input, output) vgg_model.trainable = False for layer in vgg_model.layers: layer.trainable = False import pandas as pd pd.set_option('max_colwidth', -1) layers = [(layer, layer.name, layer.trainable) for layer in vgg_model.layers] pd.DataFrame(layers, columns=['Layer Type', 'Layer Name', 'Layer Trainable']) # + id="nxQvCenIy7Zc" colab_type="code" outputId="280bb469-c0a4-4770-c1dc-93e46d03edca" colab={"base_uri": "https://localhost:8080/", "height": 303} bottleneck_feature_example = vgg.predict(train_imgs_scaled[0:1]) print(bottleneck_feature_example.shape) plt.imshow(bottleneck_feature_example[0][:,:,0]) # + [markdown] id="KprJxEqBy8mU" colab_type="text" # As you may notice all parameters are still trainable. We need to correct this. # + id="28DBYk04zBZO" colab_type="code" outputId="bfd05869-98f8-4ee8-ed91-b933cd08d1d1" colab={"base_uri": "https://localhost:8080/", "height": 68} # %%time def get_bottleneck_features(model, input_imgs): features = model.predict(input_imgs, verbose=0) return features train_features_vgg = get_bottleneck_features(vgg_model, train_imgs_scaled) validation_features_vgg = get_bottleneck_features(vgg_model, validation_imgs_scaled) print('Train Bottleneck Features:', train_features_vgg.shape, '\tValidation Bottleneck Features:', validation_features_vgg.shape) # + id="WJSyMCzMzFO_" colab_type="code" outputId="ea202afe-8045-49a6-c458-c7f18288855c" colab={"base_uri": "https://localhost:8080/", "height": 323} from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, InputLayer from tensorflow.keras.models import Sequential from tensorflow.keras import optimizers input_shape = vgg_model.output_shape[1] model = Sequential() model.add(InputLayer(input_shape=(input_shape,))) model.add(Dense(512, activation='relu', input_dim=input_shape)) model.add(Dropout(0.3)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.Adam(lr =1e-4), #optimizer=optimizers.RMSprop(lr=1e-4), metrics=['accuracy']) model.summary() # + id="KeVOsQ9zzF5a" colab_type="code" outputId="f364960a-4a0d-4626-f127-ca0054c5e128" colab={"base_uri": "https://localhost:8080/", "height": 374} history = model.fit(x=train_features_vgg, y=train_labels_enc, validation_data=(validation_features_vgg, validation_labels_enc), batch_size=batch_size, epochs=epochs, verbose=1) # + [markdown] id="t9X1Of4z_2Z_" colab_type="text" # # Create a complete network and use it for training # + id="lh6i3u5t_5--" colab_type="code" colab={} from tensorflow.keras.layers import Dense,GlobalAveragePooling2D from tensorflow.keras.models import Model # + id="bYD7VFZC_74Y" colab_type="code" colab={} base_model=VGG16(include_top=False, weights='imagenet') #imports the VGG16 model and discards the last layer. x=base_model.output x=GlobalAveragePooling2D()(x) x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results. preds=Dense(2,activation='softmax')(x) #final layer with softmax activation # + id="C815GN3-_9bE" colab_type="code" colab={} model=Model(inputs=base_model.input,outputs=preds) # + id="Ll4ddOvD__3Y" colab_type="code" outputId="a701f67d-769b-45b0-9cc9-0598632b9d75" colab={"base_uri": "https://localhost:8080/", "height": 901} model.summary() # + id="iJeIDqbNABJa" colab_type="code" colab={} for layer in model.layers: layer.trainable=False # or if we want to set the first 20 layers of the network to be non-trainable for layer in model.layers[:20]: layer.trainable=False for layer in model.layers[20:]: layer.trainable=True # + id="HHfiDkohAC6Z" colab_type="code" outputId="3c68b510-d617-4739-a34a-f28a2cbe5a57" colab={"base_uri": "https://localhost:8080/", "height": 34} len(model.layers) # + id="w0vaMIiCAEFo" colab_type="code" colab={} model.compile(optimizer='Adam',loss='sparse_categorical_crossentropy',metrics=['accuracy']) # + id="x1w7yFVhAFnm" colab_type="code" outputId="b4d43dcc-3715-4e44-d06f-b7cf73163864" colab={"base_uri": "https://localhost:8080/", "height": 901} model.summary() # + id="9QD0Rec2AHdf" colab_type="code" outputId="b7905aa1-4850-4b28-eb59-a7cdf6cc78b9" colab={"base_uri": "https://localhost:8080/", "height": 374} history = model.fit(x=train_imgs_scaled, y=train_labels_enc, validation_data=(validation_imgs_scaled, validation_labels_enc), batch_size=batch_size, epochs=epochs, verbose=1) # + id="05VX8s7qAJ9G" colab_type="code" outputId="2c6a780c-b35f-4b1c-aaeb-5ff5cf994beb" colab={"base_uri": "https://localhost:8080/", "height": 308} f, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) t = f.suptitle('Basic CNN Performance', fontsize=12) f.subplots_adjust(top=0.85, wspace=0.3) epoch_list = list(range(1,10+1)) ax1.plot(epoch_list, history.history['acc'], label='Train Accuracy') ax1.plot(epoch_list, history.history['val_acc'], label='Validation Accuracy') ax1.set_xticks(np.arange(0, 10+1, 5)) ax1.set_ylabel('Accuracy Value') ax1.set_xlabel('Epoch') ax1.set_title('Accuracy') ax1.set_ylim(0,1) l1 = ax1.legend(loc="best") ax2.plot(epoch_list, history.history['loss'], label='Train Loss') ax2.plot(epoch_list, history.history['val_loss'], label='Validation Loss') ax2.set_xticks(np.arange(0, 10+1, 5)) ax2.set_ylabel('Loss Value') ax2.set_xlabel('Epoch') ax2.set_title('Loss') ax1.set_ylim(0,1) l2 = ax2.legend(loc="best") # + id="zXnccjumDDvR" colab_type="code" colab={}
day2/Transfer Learning/Transfer_learning_with_keras_with_files.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Send an Email with smtplib # + import smtplib def send_email(host, subject, to_addr, from_addr, from_addr_pwd, body_text): """ Send an email """ BODY = "\r\n".join(( "From: %s" % from_addr, "To: %s" % to_addr, "Subject: %s" % subject , "", body_text )) server = smtplib.SMTP(host) server.login(from_addr, from_addr_pwd) server.sendmail(from_addr, [to_addr], BODY) server.quit() if __name__ == "__main__": host = "smtp.126.com" subject = "Test email from Python" to_addr = "<EMAIL>" from_addr = "<EMAIL>" from_addr_pwd = "<PASSWORD>" body_text = "Python rules them all!" send_email(host, subject, to_addr, from_addr, from_addr_pwd,body_text)
python/python101/built-in-packages/06-email.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="LikIUYuS_DyQ" # # VGG # # This notebook is an implement of [___Very Deep Convolutional Networks for Large-Scale Image Recognition___](https://arxiv.org/pdf/1409.1556.pdf) by Simonyan et al. The original model was trained for ImageNet dataset, but in this notebook we fine-tuned it for Cifar 10 dataset, which is a relatively smaller dataset and is better to store on Google Colab. # + [markdown] id="MfJAV5slGQq_" # We first need to install and import all the dependent libraries in the session. # + colab={"base_uri": "https://localhost:8080/"} id="aVhDJW4GGVUT" outputId="1c896629-9879-4f32-a81d-9fac2ce320f1" # !pip install tensorflow tensorflow-datasets matplotlib sklearn import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.regularizers import l2 import tensorflow_datasets as tfds from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # + [markdown] id="WK2DZExoAPVk" # This is function that constructs a VGG model. We provide VGG with layers 11, 13, 16, and 19, which are provided in the paper. The structure of the model is almost same with the original paper, but the kernel size and strides are adjusted to fit the smaller pictures of Cifar 10. We define two variables, weight_decay and dropout_rate, as the hyperparameters of the model for kernel regularization and dropout layer, respectively. # + id="71DVAiu4KFjz" def add_layer(model, num_filters, num_layers, weight_decay, dropout_rate): for i in range(num_layers): model.add(Conv2D(filters = num_filters, kernel_size = (3, 3), activation = "relu", padding = "same", kernel_regularizer = l2(weight_decay))) model.add(BatchNormalization()) if i < num_layers - 1: model.add(Dropout(rate = dropout_rate)) model.add(MaxPooling2D(pool_size = 2, strides = 2)) def createVGG(type, weight_decay, dropout_rate): if type == 11: params = [1, 1, 2, 2, 2] elif type == 13: params = [2, 2, 2, 2, 2] elif type == 16: params = [2, 2, 3, 3, 3] elif type == 19: params = [2, 2, 4, 4, 4] else: raise Exception("The parameter is not valid!") model = tf.keras.Sequential() model.add(InputLayer(input_shape = (32, 32, 3))) add_layer(model, 64, params[0], weight_decay, dropout_rate) add_layer(model, 128, params[1], weight_decay, dropout_rate) add_layer(model, 256, params[2], weight_decay, dropout_rate) add_layer(model, 512, params[3], weight_decay, dropout_rate) add_layer(model, 512, params[4], weight_decay, dropout_rate) model.add(Flatten()); model.add(Dense(512, activation = "relu", kernel_regularizer = l2(weight_decay))) model.add(BatchNormalization()) model.add(Dropout(dropout_rate)) model.add(Dense(10, activation = "softmax")) return model # + [markdown] id="NLaTochAFfm8" # In this part of the program, we get the Cifar 10 dataset using tensorflow dataset and separate it into training set and test set. # + colab={"base_uri": "https://localhost:8080/", "height": 415, "referenced_widgets": ["e1332cd4063a452fa823a3ee830b4b3d", "<KEY>", "64c753d51ecd4f85964e75e9e28d8abe", "<KEY>", "f344fc35e0334927ae63f6d19ce8a14c", "<KEY>", "<KEY>", "db0cd1f7707e403ea0294dacaf74052b", "<KEY>", "634d1a0b4134454f8a1074b30b24e7fb", "<KEY>", "587a3fe4bce1433c81019a63fdee5cc9", "0e0f7463d2e341b1ae31a2e31865a7d7", "<KEY>", "<KEY>", "4f50ee8c24bb4edea872b5c633e2bf97", "<KEY>", "<KEY>", "b34be41555ba46a79cafe83bd00423b5", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "da3fb3a1733f47b984caefc97ad60021", "<KEY>", "815fe5f29b924c368a3933f6aa907fe0", "a94fa5dcf9714bef8b0758f5eda6214f", "2312dc7e7a104511aba9ba9fdd7de52f", "d4e5446420754da99711ac56402d715f", "<KEY>", "<KEY>", "<KEY>", "9efe30c36f2d4d6e97492d7d37756ec3", "<KEY>", "a06516e8e35349e99308d66e08e4ed37", "e22b3a6751474ff886cd9fe8e9b9dba8", "<KEY>", "<KEY>", "6a73855567ba4b9b82e37b086ef8c585", "<KEY>", "<KEY>", "1e5231316920493f86caf5d57683297f", "<KEY>", "72e71dfb506a4488bd8854da4e2aace0", "df0737f434604a468ff62fc58382f328", "a4f7087c60ac48e6833c1b024609290a", "192c78e0b76e45b18d25fcc6ba2ed452", "64e0665bbeea472b92ad8710c5d479d9", "dfa5abf1117349ed8df0bb2b2846b5ca", "<KEY>", "dadfadefb9d84147a29782ce45d1625d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "868d6ed9e87848838638f545a26cf5d8", "33d5f001fa304e6792ed12734ea57dc6", "adbbb76c168c4e888ffd8d07dfedd513", "3553a3630e56455699f8444f582e8565", "476df2c1a9c448ab828eda1bdcea08fe", "<KEY>", "89218eae8f644dc48d6af7f213ce036e", "af8b97b59d5442f48b49a8f79febe94b", "0d03308964784a4ab4eebec692dc5f9c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9075cea7eee744c4917327e5b93be1c3", "7f92e52b76844e209b45675cf2fb809a", "becebf24553047aab07d14339196b6be", "<KEY>", "<KEY>", "<KEY>", "a33675fe28e1448098e508a267dffbf2", "<KEY>", "f5272a996dff4413bce7398ddfaed8c8"]} id="B_ecmr8NFtIM" outputId="db156fc7-135a-4c7f-ff7b-042e26ef64a8" # Set random state for reproducibility random_state = 4 def get_data(): x, y = tfds.as_numpy( tfds.load("cifar10", split = "train", shuffle_files = True, as_supervised = True, batch_size = -1)) return train_test_split(x, y, test_size = .2, random_state = random_state) train_x, test_x, train_y, test_y = get_data() # + [markdown] id="L7dpVq72RSei" # This part trains the VGG model on Cifar 10 dataset. We tested several sets of hyperparameters and adopted one with the best validation loss. We then store the best weights of each training epochs on drive so that we can continue training even if the session disconnects. We also store searching results and training weights in case the process takes too much time or the session crashes accidentally. We show the result of the training process with a graph about the training and validation accuracy for each epoch. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="FaLmu9n5ROzk" outputId="301efe82-f842-45a4-ee07-e0e9a1435f53" # Set a checkpoint to save weights cp = tf.keras.callbacks.ModelCheckpoint("/weights", monitor = "loss", verbose = 1, save_best_only = True, mode = "auto") model = createVGG(19, 1.5, .3) model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 1e-5), loss = tf.keras.losses.SparseCategoricalCrossentropy(), metrics = ["accuracy"]) # We can use the existing data if the training process has started # model.load_weights("/weights") history = model.fit(train_x, train_y, epochs = 200, validation_split = .2, batch_size = 64) plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc = 'upper left') plt.show() # + [markdown] id="nUhdLp-cB1_G" # Here we test our model on test set and show how VGG predicts on sample images in the test set. # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ok-a_tSrKW8w" outputId="5befb0ea-2656-44cf-ecf7-0bbf267da146" `labels = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"] pred = np.argmax(model.predict(test_x), axis = 1) print("Test Accuracy: {:.2%}".format(np.sum(pred == test_y) / len(test_y))) sample_data = test_x[: 9] sample_label = test_y[: 9] fig = plt.figure(figsize = (10, 40)) for i in range(len(sample_data)): ax = fig.add_subplot(911 + i) ax.imshow(test_x[i]) ax.set_title("Labelled as " + labels[int(sample_label[i])] + ", classified as " + labels[int(pred[i])])
cv/VGG/.ipynb_checkpoints/VGG-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Object-based filtering of pixel classifications <img align="right" src="../../Supplementary_data/dea_logo.jpg"> # # * [**Sign up to the DEA Sandbox**](https://docs.dea.ga.gov.au/setup/sandbox.html) to run this notebook interactively from a browser # * **Compatibility:** Notebook currently compatible with the `DEA Sandbox` environment # # ## Background # # Geographic Object-Based Image Analysis (GEOBIA), aims to group pixels together into meaningful image-objects. There are two advantages to a GEOBIA workflow: one, we can reduce the 'salt and pepper' effect typical of classifying pixels; and two, we can increase the computational efficiency of our workflow by grouping pixels into fewer, larger, but more meaningful objects. A review of the emerging trends in GEOBIA can be found in [Chen et al. (2017)](https://www.tandfonline.com/doi/abs/10.1080/15481603.2018.1426092). # ## Description # # In this notebook, we take the pixel-based classifications generated in the `4_Classify_satellite_data.ipynb` notebook, and filter the classifications by image-objects. To do this, we first need to conduct image segmentation using the function `rsgislib.segmentation.runShepherdSegmentation`. This image segmentation algorithm is fast and scalable. The image segmentation is conducted on the `NDVI` layer output in the previous notebook. # To filter the pixel observations, we assign to each segment the majority (mode) pixel classification using the `scipy.ndimage.measurements import _stats` module. # # 1. Convert the NDVI layer to a `.kea` file format (a requirement for the Remote Sensing and GIS Software Library, RSGISLib) # 2. Run the image segmentation # 3. Calculate the **mode** statistic for each segment # 4. Write the new object-based classification to disk as a COG # 5. An advanced section that demonstrates running a tiled, parallel image segmentation (useful if segmenting a very large GeoTIFF) # *** # ## Getting started # # To run this analysis, run all the cells in the notebook, starting with the "Load packages" cell. # ### Load Packages # + import os import sys import gdal import shutil import xarray as xr import numpy as np import subprocess as sp import matplotlib.pyplot as plt from odc.io.cgroups import get_cpu_quota from datacube.utils.cog import write_cog from rsgislib.segmentation import segutils from scipy.ndimage.measurements import _stats sys.path.append('../../Scripts') from dea_classificationtools import HiddenPrints import warnings warnings.filterwarnings("ignore") # - # ## Analysis Parameters # # * `pred_tif`: The path and name of the prediction GeoTIFF output in the previous notebook. # * `tif_to_seg`: The geotiff to use as an input to the image segmentation, in the default example this was an NDVI layer output in the last notebook. # * `min_seg_size`: An integer which specifies the minimum number of pixels within a segment; segments with fewer than then minimum number of pixels are merged with adjacent segments. # * `numClusters`: An integer which specifies the number of clusters within the KMeans clustering. A good default is 60. # * `results`: A folder location to store the classified GeoTIFFs. # + pred_tif = 'results/prediction.tif' tif_to_seg = 'results/NDVI.tif' min_seg_size = 100 # in number of pixels numClusters = 60 # number of k-means clusters results = 'results/' # - # ## Generate an object-based classification # ### Convert to `.kea` format # + # Inputs to image seg kea_file = tif_to_seg[:-4] + '.kea' segmented_kea_file = tif_to_seg[:-4] + '_segmented.kea' # Convert tiff to kea gdal.Translate(destName=kea_file, srcDS=tif_to_seg, format='KEA', outputSRS='EPSG:6933') # - # ### Run image segmentation # + # %%time # Store temp files somewhere tmp = 'tmp/' if not os.path.exists(tmp): os.mkdir(tmp) # Run image seg with HiddenPrints(): segutils.runShepherdSegmentation(inputImg=kea_file, outputClumps=segmented_kea_file, tmpath=tmp, numClusters=numClusters, minPxls=min_seg_size) # - # ### Open segments and pixel-based predictions segments = xr.open_rasterio(segmented_kea_file).squeeze().values pred = xr.open_rasterio(pred_tif).squeeze().drop_vars('band') # ### Calculate mode # # Within each segment, the majority classification is calculated and assigned to that segment. count, _sum = _stats(pred, labels=segments, index=segments) mode = _sum > (count / 2) mode = xr.DataArray(mode, coords=pred.coords, dims=pred.dims, attrs=pred.attrs).astype(np.int16) # ### Clean up intermediate files shutil.rmtree(tmp) os.remove(kea_file) os.remove(segmented_kea_file) # ### Write result to disk write_cog(mode, results + 'prediction_object_.tif', overwrite=True) # ### Plot result # # Below we plot the the pixel-based classification alongside the newly created object-based classification. You can see the 'salt and pepper' effect of individual pixels being classified as crop has been removed in the object based classification, resulting in a 'cleaner' classification. fig, axes = plt.subplots(1, 2, sharey=True, figsize=(16, 8)) pred.plot(ax=axes[0], add_colorbar=False) mode.plot(ax=axes[1], add_colorbar=False) axes[0].set_title('Pixel-based Classification') axes[1].set_title('Object-based Classification (mode)') plt.tight_layout() # *** # ## Optional: Tiled, parallel image segmentation # Image segmentation at large scales can be both time and memory intensive. The module `dea_tools.segmentation.performTiledSegmentation` builds upon the image segmentation algorithm developed by Shepherd et al. (2019) (implemented in the package RSGISLib) to run image segmentation across multiple CPUs. A full description of their approach can be found in _Clewey et al. (2014) A Python-Based Open Source System for Geographic Object-Based Image Analysis (GEOBIA) Utilizing Raster Attribute Tables_. The code below demonstrates how to use the `dea_tools.segmentation.performTiledSegmentation` module to conduct a tiled, parallel image segmentation. # # The tiling approach is based on the bounding coordinates of the GeoTIFF. If a GeoTIFF is irregularly shaped such that a tile(s) contains none of the input GeoTIFF, then the segmentation will fail. If this occurs, check the `<>S1Tiles.shp` file output during stage 1 of the algorithm. Overlay this file on top of your input GeoTIFF to check if there are empty tiles. At the moment, the only solution is to change the extent of the GeoTIFF to be more regularly shaped. The `validDataTileFraction` variable will handle tiles that contain a small fraction of the input GeoTIFF, tiles containing less than the specified fraction are merged with a neighbouring tile. The image below shows an example of the tiling approach with merged tiles: # # <img align="center" src="../../Supplementary_data/Scalable_machine_learning/tilingApproach.png" width="35%"> # # Below, we will conduct the same analysis as we did in the first example above, but this time the image segmentation will be conducted using the `dea_tools.segmentation.performTiledSegmentation()` function. For the default example, this will be slower than the serial version, however, when conducting image segmentation over very large GeoTIFFs, this option will be preferred. # Import the parallel segementation module sys.path.append('../../Scripts') from dea_segmentation import performTiledSegmentation # ## Analysis Parameters # * `validDataTileFraction`: The fraction of a tile that should contain valid data. Below this threshold, a tile will be merged with its neighbour. e.g. `0.3` # * `tile_width, tile_height`: The tile size parameters in number of pixels # + # New parameters to add validDataTileFraction = 0.2 tile_width, tile_height = 1000, 1000 # Previous parameters we added above, reposting here pred_tif = 'results/prediction.tif' tif_to_seg = 'results/NDVI.tif' min_seg_size = 100 results = 'results/' # - # ### Automatically find the number of CPUs ncpus = round(get_cpu_quota()) print('ncpus = ' + str(ncpus)) # ## Tiled, parallel image segmentation # ### Convert `.tif` to `.kea` # + # Store temp files somewhere tmp = 'tmp/' if not os.path.exists(tmp): os.mkdir(tmp) # Inputs to image seg kea_file = tif_to_seg[:-4] + '.kea' segmented_kea_file = tif_to_seg[:-4] + '_segmented.kea' # Convert tiff to kea gdal.Translate(destName=kea_file, srcDS=tif_to_seg, format='KEA', outputSRS='EPSG:6933') # - # ### Run the parallel, tiled segmentation # # This will take a couple of minutes to run. # + # Run the segmentation with HiddenPrints(): performTiledSegmentation(kea_file, segmented_kea_file, tmpDIR=tmp, numClusters=numClusters, validDataThreshold=validDataTileFraction, tileWidth=tile_width, tileHeight=tile_height, minPxls=min_seg_size, ncpus=ncpus) # Remove tmp folder shutil.rmtree(tmp) # - # ### Open segments and pixel-based predictions segments = xr.open_rasterio(segmented_kea_file).squeeze().values pred = xr.open_rasterio(pred_tif).squeeze().drop_vars('band') # ### Calculate mode count, _sum = _stats(pred, labels=segments, index=segments) mode = _sum > (count / 2) mode = xr.DataArray(mode, coords=pred.coords, dims=pred.dims, attrs=pred.attrs).astype(np.int16) # ### Clean up intermediate files os.remove(kea_file) os.remove(segmented_kea_file) # ### Plot the result mode.plot(size=6); # ## Recommended next steps # # This is the last notebook in the `Scalable Machine Learning on the ODC` workflow! To revist any of the other notebooks, use the links below. # # 1. [Extracting training data from the ODC](1_Extract_training_data.ipynb) # 2. [Inspecting training data](2_Inspect_training_data.ipynb) # 3. [Evaluate, optimize, and fit a classifier](3_Evaluate_optimize_fit_classifier.ipynb) # 4. [Classifying satellite data](4_Classify_satellite_data.ipynb) # 5. **Object-based filtering of pixel classifications (this notebook)** # # # *** # # ## Additional information # # **License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). # Digital Earth Australia data is licensed under the [Creative Commons by Attribution 4.0](https://creativecommons.org/licenses/by/4.0/) license. # # **Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)). # If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks). # # **Last modified:** March 2021 # # ## Tags # Browse all available tags on the DEA User Guide's [Tags Index](https://docs.dea.ga.gov.au/genindex.html) # + raw_mimetype="text/restructuredtext" active="" # **Tags**: :index:`image segmentation`, :index:`GEOBIA`
Real_world_examples/Scalable_machine_learning/5_Object-based_filtering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernel_info: # name: python3 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline from matplotlib import style style.use('fivethirtyeight') import matplotlib.pyplot as plt import numpy as np import pandas as pd import datetime as dt # # Reflect Tables into SQLAlchemy ORM # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func engine = create_engine("sqlite:///Resources/hawaii.sqlite") # + # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # - # We can view all of the classes that automap found Base.classes.keys() # Save references to each table measurements_table = Base.classes.measurement stations_table = Base.classes.station # Create our session (link) from Python to the DB session = Session(bind=engine) # # Exploratory Climate Analysis # + # Design a query to retrieve the last 12 months of precipitation data and plot the results # Calculate the date 1 year ago from the last data point in the database final_date = '0000-00-00' for row in session.query(measurements_table): if (final_date < row.date): final_date = row.date final_date = dt.datetime.strptime(final_date, '%Y-%m-%d') final_date = final_date.replace(year=final_date.year - 1) print(f"First Allowed Date: {final_date.strftime('%m/%d/%Y')}") # Perform a query to retrieve the data and precipitation scores data = session.query( measurements_table.date, measurements_table.prcp ).filter(measurements_table.date >= final_date.strftime('%Y-%m-%d')) # Save the query results as a Pandas DataFrame and set the index to the date column Precip = pd.DataFrame(data, columns=['Date', 'Precipitation']).dropna() Precip.set_index('Date', inplace=True) # # Sort the dataframe by date Precip = Precip.sort_index() # Last prints to confirm data works as intended print(f"Total valid precipitation points found: {Precip.count().max()}") print(f"Total distinct dates collected: {Precip.groupby('Date').count().count().min()}") Precip.head(10) # - # Use Pandas Plotting with Matplotlib to plot the data Precip.groupby('Date').sum().plot( title = f"Total Recorded Precipitation {final_date.strftime('%m/%d/%Y')} - {final_date.replace(year=final_date.year + 1).strftime('%m/%d/%Y')}", color='green', rot=90 ) plt.show() Precip.groupby('Date').mean().plot( title = f"Average Recorded Precipitation {final_date.strftime('%m/%d/%Y')} - {final_date.replace(year=final_date.year + 1).strftime('%m/%d/%Y')}", color='brown', rot=90 ) plt.show() # ##### Disclaimer: It's not clear if this is data is best represented by the average precipitation recorded or the total precipitation recorded on each day; so I plotted both. I also grouped the data based on the station the data was taken from. # Use Pandas to calculate the summary statistics for the precipitation data print(f"Summary stats based on data as individual points:\n{Precip['Precipitation'].describe()}\n") print(f"Summary stats based on average for each date data was taken from:\n{Precip.groupby('Date').mean()['Precipitation'].describe()}\n") print(f"Summary stats based on total precipitation per date:\n{Precip.groupby('Date').sum()['Precipitation'].describe()}") # + # Design a query to show how many stations are available in this dataset? stations_avail = session.query(stations_table.station) print(f"There are {stations_avail.count()} stations shared between the two tables.") # + # What are the most active stations? (i.e. what stations have the most rows)? # List the stations and the counts in descending order. active_stations = session.query( measurements_table.station, func.count() ).group_by( measurements_table.station ).order_by( -func.count() ).all() for row in active_stations: print(f"Station {row[0]} has {row[1]} recorded events") # + # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature of the most active station? highest_id = active_stations[0][0] temp_min = session.query( func.min(measurements_table.tobs) ).filter(measurements_table.station == highest_id).all() temp_max = session.query( func.max(measurements_table.tobs) ).filter(measurements_table.station == highest_id).all() temp_avg = session.query( func.avg(measurements_table.tobs) ).filter(measurements_table.station == highest_id).all() print(f"For station {highest_id}:\n-the highest temp is {temp_max[0][0]}F;\n-the lowest temp is {temp_min[0][0]}F; and\n-the average temp is {temp_avg[0][0]}F.") # + # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram # Gotta recalculate the starting date for the last 12 months of this particular station's data collection final_date = '0000-00-00' for row in session.query(measurements_table).filter(measurements_table.station == highest_id): if (final_date < row.date): final_date = row.date final_date = dt.datetime.strptime(final_date, '%Y-%m-%d') final_date = final_date.replace(year=final_date.year - 1) print(f"First Allowed Date: {final_date.strftime('%m/%d/%Y')}") # Now we can get the data and plot the histogram temp_obs = session.query(measurements_table.date, measurements_table.tobs).\ filter(measurements_table.date >= final_date.strftime('%Y-%m-%d')).\ filter(measurements_table.station == highest_id).\ group_by(measurements_table.date).all() Temp_Obs = pd.DataFrame(temp_obs, columns=['date', 'Temp (F)']) Temp_Obs.plot.hist( title=f"Temperatures Recorded During the Last Year for Station {highest_id}", color='orange', width=1.5, bins=12 ) plt.show() # - # ## Bonus Challenge Assignment # + # This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(measurements_table.tobs), func.avg(measurements_table.tobs), func.max(measurements_table.tobs)).\ filter(measurements_table.date >= start_date).filter(measurements_table.date <= end_date).all() # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # - # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. tmin, tavg, tmax = calc_temps('2011-02-28', '2011-03-05')[0] print(f"min temp: {tmin}\navg temp: {tavg}\nmax temp: {tmax}") # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) avg_bar = plt.bar( tavg, height=tavg, color='salmon', yerr=tmax - tmin ) plt.title("Trip Avg Temp") plt.ylabel("Temp (F)") plt.tight_layout() plt.show() # + # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation rain_sum = session.query(measurements_table.station, measurements_table.tobs).\ filter(measurements_table.date >= '2011-03-05').\ filter(measurements_table.date <= '2012-02-28').\ group_by(measurements_table.station).order_by(-measurements_table.tobs).all() print("Total Rainfal Between 2011-03-05 and 2012-02-28 per Station") for tup in rain_sum: print(f"{tup[0]}: {tup[1]}") # + # Create a query that will calculate the daily normals # (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ sel = [func.min(measurements_table.tobs), func.avg(measurements_table.tobs), func.max(measurements_table.tobs)] return session.query(*sel).filter(func.strftime("%m-%d", measurements_table.date) == date).all() daily_normals("01-01") # + # calculate the daily normals for your trip # push each tuple of calculations into a list called `normals` # Set the start and end date of the trip start_date = dt.datetime.strptime('2011-03-05', '%Y-%m-%d') end_date = dt.datetime.strptime('2012-02-28', '%Y-%m-%d') # Use the start and end date to create a range of dates date_range = session.query(measurements_table.date).\ filter(measurements_table.date >= start_date.strftime('%Y-%m-%d')).\ filter(measurements_table.date <= end_date.strftime('%Y-%m-%d')).\ group_by(measurements_table.date).all() date_range = list(np.ravel(date_range)) # Stip off the year and save a list of %m-%d strings query_dates = [] for i in range(0, len(date_range)): sub_date = dt.datetime.strptime(date_range[i], '%Y-%m-%d') query_dates.append(sub_date.strftime('%m-%d')) # Loop through the list of %m-%d strings and calculate the normals for each date normals = [] for date in query_dates: normals.append(list(np.ravel(daily_normals(date)))) print(f"Entry Count: {len(normals)}") # - # Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index Plot_Data = pd.DataFrame( normals, columns=['Temp Min', 'Temp Avg', 'Temp Max'], index = pd.Index(data=date_range, name='Date') ) Plot_Data.head() # Plot the daily normals as an area plot with `stacked=False` Plot_Data.plot.area(stacked=False, rot=20) plt.ylabel('Temp (F)') plt.tight_layout() plt.show()
climate.ipynb
/ --- / jupyter: / jupytext: / text_representation: / extension: .q / format_name: light / format_version: '1.5' / jupytext_version: 1.14.4 / kernelspec: / display_name: SQL / language: sql / name: SQL / --- / + azdata_cell_guid="c42b62c8-adff-4efe-a447-bb7fab3899cd" -- Create a new table called '[TWITTER]' in schema '[SOURCES]' -- Drop the table if it already exists IF OBJECT_ID('[sources].[TWITTER]', 'U') IS NOT NULL DROP TABLE [sources].[TWITTER] GO -- Create the table in the specified schema CREATE TABLE [sources].[TWITTER] ( [ID] INT NOT NULL PRIMARY KEY, -- Primary Key column [NAME] NVARCHAR(50) NOT NULL, [LOCATION] NVARCHAR(50) NOT NULL -- Specify more columns here ); GO
queries/Notebook-0.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Processor temperature # # We have a temperature sensor in the processor of our company's server. We want to analyze the data provided to determinate whether we should change the cooling system for a better one. It is expensive and as a data analyst we cannot make decisions without a basis. # # We provide the temperatures measured throughout the 24 hours of a day in a list-type data structure composed of 24 integers: # ``` # temperatures_C = [33,66,65,0,59,60,62,64,70,76,80,69,80,83,68,79,61,53,50,49,53,48,45,39] # ``` # # ## Goals # # 1. Treatment of lists # 2. Use of loop or list comprenhention # 3. Calculation of the mean, minimum and maximum. # 4. Filtering of lists. # 5. Interpolate an outlier. # 6. Logical operators. # 7. Print # ## Temperature graph # To facilitate understanding, the temperature graph is shown below. You do not have to do anything in this section. The test starts in **Problem**. # + # import import matplotlib.pyplot as plt # %matplotlib inline # axis x, axis y y = [33,66,65,0,59,60,62,64,70,76,80,81,80,83,90,79,61,53,50,49,53,48,45,39] x = list(range(len(y))) # plot plt.plot(x, y) plt.axhline(y=70, linewidth=1, color='r') plt.xlabel('hours') plt.ylabel('Temperature ºC') plt.title('Temperatures of our server throughout the day') # - # ## Problem # # If the sensor detects more than 4 hours with temperatures greater than or equal to 70ºC or any temperature above 80ºC or the average exceeds 65ºC throughout the day, we must give the order to change the cooling system to avoid damaging the processor. # # We will guide you step by step so you can make the decision by calculating some intermediate steps: # # 1. Minimum temperature # 2. Maximum temperature # 3. Temperatures equal to or greater than 70ºC # 4. Average temperatures throughout the day. # 5. If there was a sensor failure at 03:00 and we did not capture the data, how would you estimate the value that we lack? Correct that value in the list of temperatures. # 6. Bonus: Our maintenance staff is from the United States and does not understand the international metric system. Pass temperatures to Degrees Fahrenheit. # # Formula: F = 1.8 * C + 32 # # web: https://en.wikipedia.org/wiki/Conversion_of_units_of_temperature # # + # assign a variable to the list of temperatures temperature_list = y # 1. Calculate the minimum of the list and print the value using print() minimum = min(temperature_list) print("minimum temp. = ", minimum) # 2. Calculate the maximum of the list and print the value using print() maximum = max(temperature_list) print("maximum temp. = ", maximum) # 3. Items in the list that are greater than 70ºC and print the result greater_70=[] for i in temperature_list: if i > 70: greater_70.append(i) print("List of temp. above 70C = ", greater_70) # 4. Calculate the mean temperature throughout the day and print the result mean = sum(temperature_list)/len(temperature_list) print("mean temp. = ", mean) # 5.1 Solve the fault in the sensor by estimating a value temperature_at_2 = temperature_list[2] temperature_at_4 = temperature_list[4] estimated_temp_at_3 = (temperature_list[2] + temperature_list[4])/2 print("estimated temp. at 3 = ", estimated_temp_at_3) # 5.2 Update of the estimated value at 03:00 on the list temperature_list.remove(0) temperature_list.insert(3,estimated_temp_at_3) print("updated temp list = ", temperature_list) # Bonus: convert the list of ºC to ºFarenheit F_list=[] for i in temperature_list: F_list.append(1.8*i+32) print("list of temp. in F = ", F_list) # - # ## Take the decision # Remember that if the sensor detects more than 4 hours with temperatures greater than or equal to 70ºC or any temperature higher than 80ºC or the average was higher than 65ºC throughout the day, we must give the order to change the cooling system to avoid the danger of damaging the equipment: # * more than 4 hours with temperatures greater than or equal to 70ºC # * some temperature higher than 80ºC # * average was higher than 65ºC throughout the day # If any of these three is met, the cooling system must be changed. # # + # Print True or False depending on whether you would change the cooling system or not change_cooling=False for i in temperature_list: if i > 80: change_cooling = True above_70=0 for i in temperature_list: if i >= 70: above_70 += 1 if above_70 > 4: change_cooling = True avg_temp = sum(temperature_list)/len(temperature_list) if avg_temp > 65: change_cooling = True print("Change the cooling system:", change_cooling, "!") # - # ## Future improvements # 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC # 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met? # 3. Average of each of the lists (ºC and ºF). How they relate? # 4. Standard deviation of each of the lists. How they relate? # # 1. We want the hours (not the temperatures) whose temperature exceeds 70ºC above_70=0 for i in temperature_list: if i >= 70: above_70 += 1 print("The temperature was above 70ºC for", above_70, "hours.") # 2. Condition that those hours are more than 4 consecutive and consecutive, not simply the sum of the whole set. Is this condition met? Condition="Condition is not met" for i in temperature_list[4:]: if i > 70 and temperature_list[-1] > 70 and temperature_list[-2] > 70 and temperature_list[-3] > 70: Condition="This condition is met" print(Condition) # + # 3. Average of each of the lists (ºC and ºF). How they relate? average_C = sum(temperature_list)/len(temperature_list) print("average in C = ", average_C) average_F = sum(F_list)/len(F_list) print("average in F = ", average_F) print("We cann use the formular to calculate the average ºF from the average ºC", "(Average ºC * 1.8)+ 32 =", average_C*1.8+32) # + # 4. Standard deviation of each of the lists. How they relate? numerator = 0 denominator = 0 for i in temperature_list: numerator += (i - average_C)**2 denominator = len(temperature_list) - 1 se_C = (numerator/denominator)**0.5 print("SE of temp. list in C = ", se_C) numerator = 0 denominator = 0 for i in F_list: numerator += (i - average_F)**2 denominator = len(F_list) - 1 se_F = (numerator/denominator)**0.5 print("SE of temp. list in F = ", se_F) print("We cannot use the formular to calculate the SE of ºF from the SE of ºC", "(SE of ºC * 1.8)+ 32 is", average_C*1.8+32,". This is not equal to the real SE of ºF") # -
temperature/temperature.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # name: python2 # --- # + [markdown] colab_type="text" id="tghWegsjhpkt" # ##### Copyright &copy; 2018 The TensorFlow Authors. # + colab={} colab_type="code" id="rSGJWC5biBiG" #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # + [markdown] colab_type="text" id="YuSYVbwEYNHw" # # TensorFlow Data Validation # ***An Example of a Key Component of TensorFlow Extended*** # + [markdown] colab_type="text" id="rLsMb4vqY244" # Note: You can run this example right now in a Jupyter-style notebook, no setup required! Just click "Run in Google Colab" # # <div class="devsite-table-wrapper"><table class="tfo-notebook-buttons" align="left"> # <td><a target="_blank" href="https://www.tensorflow.org/tfx/tutorials/data_validation/chicago_taxi"> # <img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a></td> # <td><a target="_blank" href="https://colab.sandbox.google.com/github/tensorflow/tfx/blob/master/docs/tutorials/data_validation/chicago_taxi.ipynb"> # <img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td> # <td><a target="_blank" href="https://github.com/tensorflow/tfx/blob/master/docs/tutorials/data_validation/chicago_taxi.ipynb"> # <img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td> # </table></div> # + [markdown] colab_type="text" id="mPt5BHTwy_0F" # This example colab notebook illustrates how TensorFlow Data Validation (TFDV) can be used to investigate and visualize your dataset. That includes looking at descriptive statistics, inferring a schema, checking for and fixing anomalies, and checking for drift and skew in our dataset. It's important to understand your dataset's characteristics, including how it might change over time in your production pipeline. It's also important to look for anomalies in your data, and to compare your training, evaluation, and serving datasets to make sure that they're consistent. # # We'll use data from the [Taxi Trips dataset](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) released by the City of Chicago. # # Note: This site provides applications using data that has been modified for use from its original source, www.cityofchicago.org, the official website of the City of Chicago. The City of Chicago makes no claims as to the content, accuracy, timeliness, or completeness of any of the data provided at this site. The data provided at this site is subject to change at any time. It is understood that the data provided at this site is being used at one’s own risk. # # [Read more](https://cloud.google.com/bigquery/public-data/chicago-taxi) about the dataset in [Google BigQuery](https://cloud.google.com/bigquery/). Explore the full dataset in the [BigQuery UI](https://bigquery.cloud.google.com/dataset/bigquery-public-data:chicago_taxi_trips). # # Key Point: As a modeler and developer, think about how this data is used and the potential benefits and harm a model's predictions can cause. A model like this could reinforce societal biases and disparities. Is a feature relevant to the problem you want to solve or will it introduce bias? For more information, read about [ML fairness](https://developers.google.com/machine-learning/fairness-overview/). # + [markdown] colab_type="text" id="Fnm6Mj3vTGLm" # The columns in the dataset are: # <table> # <tr><td>pickup_community_area</td><td>fare</td><td>trip_start_month</td></tr> # # <tr><td>trip_start_hour</td><td>trip_start_day</td><td>trip_start_timestamp</td></tr> # <tr><td>pickup_latitude</td><td>pickup_longitude</td><td>dropoff_latitude</td></tr> # <tr><td>dropoff_longitude</td><td>trip_miles</td><td>pickup_census_tract</td></tr> # <tr><td>dropoff_census_tract</td><td>payment_type</td><td>company</td></tr> # <tr><td>trip_seconds</td><td>dropoff_community_area</td><td>tips</td></tr> # </table> # + [markdown] colab_type="text" id="RptgLn2RYuK3" # ## Load the files # After checking our Python version we'll download our dataset in a zip file, using HTTPS and a Google Cloud server. # + colab={} colab_type="code" id="K4QXVIM7iglN" from __future__ import print_function import sys, os import tempfile, urllib, zipfile # Confirm that we're using Python 2 assert sys.version_info.major is 2, 'Oops, not running Python 2' # Set up some globals for our file paths BASE_DIR = tempfile.mkdtemp() DATA_DIR = os.path.join(BASE_DIR, 'data') OUTPUT_DIR = os.path.join(BASE_DIR, 'chicago_taxi_output') TRAIN_DATA = os.path.join(DATA_DIR, 'train', 'data.csv') EVAL_DATA = os.path.join(DATA_DIR, 'eval', 'data.csv') SERVING_DATA = os.path.join(DATA_DIR, 'serving', 'data.csv') # Download the zip file from GCP and unzip it zip, headers = urllib.urlretrieve('https://storage.googleapis.com/tfx-colab-datasets/chicago_data.zip') zipfile.ZipFile(zip).extractall(BASE_DIR) zipfile.ZipFile(zip).close() print("Here's what we downloaded:") # !ls -lR {os.path.join(BASE_DIR, 'data')} # + [markdown] colab_type="text" id="4qBFH1ARcSNk" # ## Install TFDV # # This will pull in all the dependencies, which will take a minute. Please ignore the warnings. # + colab={} colab_type="code" id="gE-J0fCb431_" # !pip install -q tensorflow_data_validation import tensorflow_data_validation as tfdv print('TFDV version: {}'.format(tfdv.version.__version__)) # + [markdown] colab_type="text" id="H0sFmiTbT8-x" # ## Compute and visualize statistics # # First we'll use [`tfdv.generate_statistics_from_csv`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/generate_statistics_from_csv) to compute statistics for our training data. (ignore the snappy warnings) # # TFDV can compute descriptive [statistics](https://github.com/tensorflow/metadata/blob/v0.6.0/tensorflow_metadata/proto/v0/statistics.proto) that provide a quick overview of the data in terms of the features that are present and the shapes of their value distributions. # # Internally, TFDV uses [Apache Beam](https://beam.apache.org/)'s data-parallel processing framework to scale the computation of statistics over large datasets. For applications that wish to integrate deeper with TFDV (e.g., attach statistics generation at the end of a data-generation pipeline), the API also exposes a Beam PTransform for statistics generation. # + colab={} colab_type="code" id="EE481oMbT-H0" train_stats = tfdv.generate_statistics_from_csv(data_location=TRAIN_DATA) # + [markdown] colab_type="text" id="JhXQSxJ2dB_6" # Now let's use [`tfdv.visualize_statistics`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/visualize_statistics), which uses [Facets](https://pair-code.github.io/facets/) to create a succinct visualization of our training data: # # * Notice that numeric features and catagorical features are visualized separately, and that charts are displayed showing the distributions for each feature. # * Notice that features with missing or zero values display a percentage in red as a visual indicator that there may be issues with examples in those features. The percentage is the percentage of examples that have missing or zero values for that feature. # * Notice that there are no examples with values for `pickup_census_tract`. This is an opportunity for dimensionality reduction! # * Try clicking "expand" above the charts to change the display # * Try hovering over bars in the charts to display bucket ranges and counts # * Try switching between the log and linear scales, and notice how the log scale reveals much more detail about the `payment_type` categorical feature # * Try selecting "quantiles" from the "Chart to show" menu, and hover over the markers to show the quantile percentages # + colab={} colab_type="code" id="U3tUKgh7Up3x" tfdv.visualize_statistics(train_stats) # + [markdown] colab_type="text" id="KVR02-y4V0uM" # ## Infer a schema # # Now let's use [`tfdv.infer_schema`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/infer_schema) to create a schema for our data. A schema defines constraints for the data that are relevant for ML. Example constraints include the data type of each feature, whether it's numerical or categorical, or the frequency of its presence in the data. For categorical features the schema also defines the domain - the list of acceptable values. Since writing a schema can be a tedious task, especially for datasets with lots of features, TFDV provides a method to generate an initial version of the schema based on the descriptive statistics. # # Getting the schema right is important because the rest of our production pipeline will be relying on the schema that TFDV generates to be correct. The schema also provides documentation for the data, and so is useful when different developers work on the same data. Let's use [`tfdv.display_schema`](https://www.tensorflow.org/tfx/data_validation/api_docs/python/tfdv/display_schema) to display the inferred schema so that we can review it. # + colab={} colab_type="code" id="6LLkRJThVr9m" schema = tfdv.infer_schema(statistics=train_stats) tfdv.display_schema(schema=schema) # + [markdown] colab_type="text" id="ZVa3EXE8WEDE" # ## Check evaluation data for errors # # So far we've only been looking at the training data. It's important that our evaluation data is consistent with our training data, including that it uses the same schema. It's also important that the evaluation data includes examples of roughly the same ranges of values for our numerical features as our training data, so that our coverage of the loss surface during evaluation is roughly the same as during training. The same is true for categorical features. Otherwise, we may have training issues that are not identified during evaluation, because we didn't evaluate part of our loss surface. # # * Notice that each feature now includes statistics for both the training and evaluation datasets. # * Notice that the charts now have both the training and evaluation datasets overlaid, making it easy to compare them. # * Notice that the charts now include a percentages view, which can be combined with log or the default linear scales. # * Notice that the mean and median for `trip_miles` are different for the training versus the evaluation datasets. Will that cause problems? # * Wow, the max `tips` is very different for the training versus the evaluation datasets. Will that cause problems? # * Click expand on the Numeric Features chart, and select the log scale. Review the `trip_seconds` feature, and notice the difference in the max. Will evaluation miss parts of the loss surface? # + colab={} colab_type="code" id="j_P0RLYlV6XG" # Compute stats for evaluation data eval_stats = tfdv.generate_statistics_from_csv(data_location=EVAL_DATA) # Compare evaluation data with training data tfdv.visualize_statistics(lhs_statistics=eval_stats, rhs_statistics=train_stats, lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET') # + [markdown] colab_type="text" id="ycRRa4leHp84" # ## Check for evaluation anomalies # # Does our evaluation dataset match the schema from our training dataset? This is especially important for categorical features, where we want to identify the range of acceptable values. # # Key Point: What would happen if we tried to evaluate using data with categorical feature values that were not in our training dataset? What about numeric features that are outside the ranges in our training dataset? # + colab={} colab_type="code" id="T7uGVeL2WOam" # Check eval data for errors by validating the eval data stats using the previously inferred schema. anomalies = tfdv.validate_statistics(statistics=eval_stats, schema=schema) tfdv.display_anomalies(anomalies) # + [markdown] colab_type="text" id="dzxx1gBpJIBa" # ## Fix evaluation anomalies in the schema # # Oops! It looks like we have some new values for `company` in our evaluation data, that we didn't have in our training data. We also have a new value for `payment_type`. These should be considered anomalies, but what we decide to do about them depends on our domain knowledge of the data. If an anomaly truly indicates a data error, then the underlying data should be fixed. Otherwise, we can simply update the schema to include the values in the eval dataset. # # Key Point: How would our evaluation results be affected if we did not fix these problems? # # Unless we change our evaluation dataset we can't fix everything, but we can fix things in the schema that we're comfortable accepting. That includes relaxing our view of what is and what is not an anomaly for particular features, as well as updating our schema to include missing values for categorical features. TFDV has enabled us to discover what we need to fix. # # Let's make those fixes now, and then review one more time. # + colab={} colab_type="code" id="legN2nXLWZAc" # Relax the minimum fraction of values that must come from the domain for feature company. company = tfdv.get_feature(schema, 'company') company.distribution_constraints.min_domain_mass = 0.9 # Add new value to the domain of feature payment_type. payment_type_domain = tfdv.get_domain(schema, 'payment_type') payment_type_domain.value.append('Prcard') # Validate eval stats after updating the schema updated_anomalies = tfdv.validate_statistics(eval_stats, schema) tfdv.display_anomalies(updated_anomalies) # + [markdown] colab_type="text" id="xNo72YP9LN98" # Hey, look at that! We verified that the training and evaluation data are now consistent! Thanks TFDV ;) # + [markdown] colab_type="text" id="KZ1P4ucHJj5o" # ## Schema Environments # + [markdown] colab_type="text" id="qb179jczJppA" # We also split off a 'serving' dataset for this example, so we should check that too. By default all datasets in a pipeline should use the same schema, but there are often exceptions. For example, in supervised learning we need to include labels in our dataset, but when we serve the model for inference the labels will not be included. In some cases introducing slight schema variations is necessary. # # **Environments** can be used to express such requirements. In particular, features in schema can be associated with a set of environments using `default_environment`, `in_environment` and `not_in_environment`. # # For example, in this dataset the `tips` feature is included as the label for training, but it's missing in the serving data. Without environment specified, it will show up as an anomaly. # + colab={} colab_type="code" id="wSZfbnifJuTA" serving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA) serving_anomalies = tfdv.validate_statistics(serving_stats, schema) tfdv.display_anomalies(serving_anomalies) # + [markdown] colab_type="text" id="FDYHvZ09LfkT" # We'll deal with the `tips` feature below. We also have an INT value in our trip seconds, where our schema expected a FLOAT. By making us aware of that difference, TFDV helps uncover inconsistencies in the way the data is generated for training and serving. It's very easy to be unaware of problems like that until model performance suffers, sometimes catastrophically. It may or may not be a significant issue, but in any case this should be cause for further investigation. # # In this case, we can safely convert INT values to FLOATs, so we want to tell TFDV to use our schema to infer the type. Let's do that now. # + colab={} colab_type="code" id="OhtYF8aAczpd" options = tfdv.StatsOptions(schema=schema, infer_type_from_schema=True) serving_stats = tfdv.generate_statistics_from_csv(SERVING_DATA, stats_options=options) serving_anomalies = tfdv.validate_statistics(serving_stats, schema) tfdv.display_anomalies(serving_anomalies) # + [markdown] colab_type="text" id="bJjh5rigc5xy" # Now we just have the `tips` feature (which is our label) showing up as an anomaly ('Column dropped'). Of course we don't expect to have labels in our serving data, so let's tell TFDV to ignore that. # + colab={} colab_type="code" id="bnbnw8H6Lp2M" # All features are by default in both TRAINING and SERVING environments. schema.default_environment.append('TRAINING') schema.default_environment.append('SERVING') # Specify that 'tips' feature is not in SERVING environment. tfdv.get_feature(schema, 'tips').not_in_environment.append('SERVING') serving_anomalies_with_env = tfdv.validate_statistics( serving_stats, schema, environment='SERVING') tfdv.display_anomalies(serving_anomalies_with_env) # + [markdown] colab_type="text" id="yteMr3AGMYEp" # ## Check for drift and skew # + [markdown] colab_type="text" id="8Ftd5k6AMkPV" # In addition to checking whether a dataset conforms to the expectations set in the schema, TFDV also provides functionalities to detect drift and skew. TFDV performs this check by comparing the statistics of the different datasets based on the drift/skew comparators specified in the schema. # # ### Drift # # Drift detection is supported for categorical features and between consecutive spans of data (i.e., between span N and span N+1), such as between different days of training data. We express drift in terms of [L-infinity distance](https://en.wikipedia.org/wiki/Chebyshev_distance), and you can set the threshold distance so that you receive warnings when the drift is higher than is acceptable. Setting the correct distance is typically an iterative process requiring domain knowledge and experimentation. # + [markdown] colab_type="text" id="eBFuLpXb6qSp" # ### Skew # # TFDV can detect three different kinds of skew in your data - schema skew, feature skew, and distribution skew. # # #### Schema Skew # # Schema skew occurs when the training and serving data do not conform to the same schema. Both training and serving data are expected to adhere to the same schema. Any expected deviations between the two (such as the label feature being only present in the training data but not in serving) should be specified through environments field in the schema. # # #### Feature Skew # # Feature skew occurs when the feature values that a model trains on are different from the feature values that it sees at serving time. For example, this can happen when: # # * A data source that provides some feature values is modified between training and serving time # * There is different logic for generating features between training and serving. For example, if you apply some transformation only in one of the two code paths. # # #### Distribution Skew # # Distribution skew occurs when the distribution of the training dataset is significantly different from the distribution of the serving dataset. One of the key causes for distribution skew is using different code or different data sources to generate the training dataset. Another reason is a faulty sampling mechanism that chooses a non-representative subsample of the serving data to train on. # + colab={} colab_type="code" id="wEUsZm_rOd1Q" # Add skew comparator for 'payment_type' feature. payment_type = tfdv.get_feature(schema, 'payment_type') payment_type.skew_comparator.infinity_norm.threshold = 0.01 # Add drift comparator for 'company' feature. company=tfdv.get_feature(schema, 'company') company.drift_comparator.infinity_norm.threshold = 0.001 skew_anomalies = tfdv.validate_statistics(train_stats, schema, previous_statistics=eval_stats, serving_statistics=serving_stats) tfdv.display_anomalies(skew_anomalies) # + [markdown] colab_type="text" id="1GzbbsPgf0Bg" # In this example we do see some drift, but it is well below the threshold that we've set. # + [markdown] colab_type="text" id="wJ5saC9eWvHx" # ## Freeze the schema # # Now that the schema has been reviewed and curated, we will store it in a file to reflect its "frozen" state. # + colab={} colab_type="code" id="ydkL4DkIWn18" from tensorflow.python.lib.io import file_io from google.protobuf import text_format file_io.recursive_create_dir(OUTPUT_DIR) schema_file = os.path.join(OUTPUT_DIR, 'schema.pbtxt') tfdv.write_schema_text(schema, schema_file) # !cat {schema_file} # + [markdown] colab_type="text" id="b8eC59yISdGB" # ## When to use TFDV # # It's easy to think of TFDV as only applying to the start of your training pipeline, as we did here, but in fact it has many uses. Here's a few more: # # * Validating new data for inference to make sure that we haven't suddenly started receiving bad features # * Validating new data for inference to make sure that our model has trained on that part of the decision surface # * Validating our data after we've transformed it and done feature engineering (probably using [TensorFlow Transform](https://www.tensorflow.org/tfx/transform/)) to make sure we haven't done something wrong
docs/tutorials/data_validation/chicago_taxi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import tempfile import warnings warnings.filterwarnings("ignore") class Utils: @staticmethod def load_data(path, index_col=0): df = pd.read_csv(path, index_col=0) return df @staticmethod def get_training_data(df): training_data = pd.DataFrame(df["2014-01-01":"2018-01-01"]) X = training_data.drop(columns="power") y = training_data["power"] return X, y @staticmethod def get_validation_data(df): validation_data = pd.DataFrame(df["2018-01-01":"2019-01-01"]) X = validation_data.drop(columns="power") y = validation_data["power"] return X, y @staticmethod def print_pandas_dataset(d, n=5): """ Given a Pandas dataFrame show the dimensions sizes :param d: Pandas dataFrame :return: None """ print("rows = %d; columns=%d" % (d.shape[0], d.shape[1])) print(d.head(n))
model_registery/notebooks/utils_class.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Transfer Learning with ONNX # # The notebooks retrieve the already converted onnx model for [SqueezeNet](https://github.com/onnx/models/tree/master/vision/classification/squeezenet) and uses it in a pipeline created with [scikit-learn](https://scikit-learn.org/stable/). from jyquickhelper import add_notebook_menu add_notebook_menu() # %matplotlib inline # ## Retrieve the ONNX model # + import os from pyensae.datasource import download_data src = ("https://s3.amazonaws.com/onnx-model-zoo/mobilenet/" "mobilenetv2-1.0/") model_file = "mobilenetv2-1.0.onnx" src = "https://s3.amazonaws.com/onnx-model-zoo/squeezenet/squeezenet1.1/" model_file = "squeezenet1.1.onnx" if not os.path.exists(model_file): print("Download '{0}'...".format(model_file)) download_data(model_file, website=src) print("Done.") # - # ## An image # + from PIL import Image img_path = "800px-Tour_Eiffel_Wikimedia_Commons_(cropped).jpg" image = Image.open(img_path) image.reduce(4) # - # ## ImageNet classes # # Next cell is only about the list of class names associated to the *ImageNet* competition. Better to jump to the next part. class_names = { 0: 'tench, Tinca tinca', 1: 'goldfish, Carassius auratus', 2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', 3: 'tiger shark, Galeocerdo cuvieri', 4: 'hammerhead, hammerhead shark', 5: 'electric ray, crampfish, numbfish, torpedo', 6: 'stingray', 7: 'cock', 8: 'hen', 9: 'ostrich, Struthio camelus', 10: 'brambling, Fringilla montifringilla', 11: 'goldfinch, Carduelis carduelis', 12: 'house finch, linnet, Carpodacus mexicanus', 13: 'junco, snowbird', 14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', 15: 'robin, American robin, Turdus migratorius', 16: 'bulbul', 17: 'jay', 18: 'magpie', 19: 'chickadee', 20: 'water ouzel, dipper', 21: 'kite', 22: 'bald eagle, American eagle, Haliaeetus leucocephalus', 23: 'vulture', 24: 'great grey owl, great gray owl, Strix nebulosa', 25: 'European fire salamander, Salamandra salamandra', 26: 'common newt, Triturus vulgaris', 27: 'eft', 28: 'spotted salamander, Ambystoma maculatum', 29: 'axolotl, mud puppy, Ambystoma mexicanum', 30: 'bullfrog, Rana catesbeiana', 31: 'tree frog, tree-frog', 32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', 33: 'loggerhead, loggerhead turtle, Caretta caretta', 34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', 35: 'mud turtle', 36: 'terrapin', 37: 'box turtle, box tortoise', 38: 'banded gecko', 39: 'common iguana, iguana, Iguana iguana', 40: 'American chameleon, anole, Anolis carolinensis', 41: 'whiptail, whiptail lizard', 42: 'agama', 43: 'frilled lizard, Chlamydosaurus kingi', 44: 'alligator lizard', 45: 'Gila monster, Heloderma suspectum', 46: 'green lizard, Lacerta viridis', 47: 'African chameleon, Chamaeleo chamaeleon', 48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', 49: 'African crocodile, Nile crocodile, Crocodylus niloticus', 50: 'American alligator, Alligator mississipiensis', 51: 'triceratops', 52: 'thunder snake, worm snake, Carphophis amoenus', 53: 'ringneck snake, ring-necked snake, ring snake', 54: 'hognose snake, puff adder, sand viper', 55: 'green snake, grass snake', 56: 'king snake, kingsnake', 57: 'garter snake, grass snake', 58: 'water snake', 59: 'vine snake', 60: 'night snake, Hypsiglena torquata', 61: 'boa constrictor, Constrictor constrictor', 62: 'rock python, rock snake, Python sebae', 63: 'Indian cobra, Naja naja', 64: 'green mamba', 65: 'sea snake', 66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', 67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus', 68: 'sidewinder, horned rattlesnake, Crotalus cerastes', 69: 'trilobite', 70: 'harvestman, daddy longlegs, Phalangium opilio', 71: 'scorpion', 72: 'black and gold garden spider, Argiope aurantia', 73: 'barn spider, Araneus cavaticus', 74: 'garden spider, Aranea diademata', 75: 'black widow, Latrodectus mactans', 76: 'tarantula', 77: 'wolf spider, hunting spider', 78: 'tick', 79: 'centipede', 80: 'black grouse', 81: 'ptarmigan', 82: 'ruffed grouse, partridge, Bonasa umbellus', 83: 'prairie chicken, prairie grouse, prairie fowl', 84: 'peacock', 85: 'quail', 86: 'partridge', 87: 'African grey, African gray, Psittacus erithacus', 88: 'macaw', 89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', 90: 'lorikeet', 91: 'coucal', 92: 'bee eater', 93: 'hornbill', 94: 'hummingbird', 95: 'jacamar', 96: 'toucan', 97: 'drake', 98: 'red-breasted merganser, Mergus serrator', 99: 'goose', 100: 'black swan, Cygnus atratus', 101: 'tusker', 102: 'echidna, spiny anteater, anteater', 103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', 104: 'wallaby, brush kangaroo', 105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', 106: 'wombat', 107: 'jellyfish', 108: 'sea anemone, anemone', 109: 'brain coral', 110: 'flatworm, platyhelminth', 111: 'nematode, nematode worm, roundworm', 112: 'conch', 113: 'snail', 114: 'slug', 115: 'sea slug, nudibranch', 116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore', 117: 'chambered nautilus, pearly nautilus, nautilus', 118: 'Dungeness crab, Cancer magister', 119: 'rock crab, Cancer irroratus', 120: 'fiddler crab', 121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', 122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus', 123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', 124: 'crayfish, crawfish, crawdad, crawdaddy', 125: 'hermit crab', 126: 'isopod', 127: 'white stork, Ciconia ciconia', 128: 'black stork, Ciconia nigra', 129: 'spoonbill', 130: 'flamingo', 131: 'little blue heron, Egretta caerulea', 132: 'American egret, great white heron, Egretta albus', 133: 'bittern', 134: 'crane', 135: 'limpkin, Aramus pictus', 136: 'European gallinule, Porphyrio porphyrio', 137: 'American coot, marsh hen, mud hen, water hen, Fulica americana', 138: 'bustard', 139: 'ruddy turnstone, Arenaria interpres', 140: 'red-backed sandpiper, dunlin, Erolia alpina', 141: 'redshank, Tringa totanus', 142: 'dowitcher', 143: 'oystercatcher, oyster catcher', 144: 'pelican', 145: 'king penguin, Aptenodytes patagonica', 146: 'albatross, mollymawk', 147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', 148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', 149: 'dugong, Dugong dugon', 150: 'sea lion', 151: 'Chihuahua', 152: 'Japanese spaniel', 153: 'Maltese dog, Maltese terrier, Maltese', 154: 'Pekinese, Pekingese, Peke', 155: 'Shih-Tzu', 156: 'Blenheim spaniel', 157: 'papillon', 158: 'toy terrier', 159: 'Rhodesian ridgeback', 160: 'Afghan hound, Afghan', 161: 'basset, basset hound', 162: 'beagle', 163: 'bloodhound, sleuthhound', 164: 'bluetick', 165: 'black-and-tan coonhound', 166: 'Walker hound, Walker foxhound', 167: 'English foxhound', 168: 'redbone', 169: 'borzoi, Russian wolfhound', 170: 'Irish wolfhound', 171: 'Italian greyhound', 172: 'whippet', 173: 'Ibizan hound, Ibizan Podenco', 174: 'Norwegian elkhound, elkhound', 175: 'otterhound, otter hound', 176: 'Saluki, gazelle hound', 177: 'Scottish deerhound, deerhound', 178: 'Weimaraner', 179: 'Staffordshire bullterrier, Staffordshire bull terrier', 180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', 181: 'Bedlington terrier', 182: 'Border terrier', 183: 'Kerry blue terrier', 184: 'Irish terrier', 185: 'Norfolk terrier', 186: 'Norwich terrier', 187: 'Yorkshire terrier', 188: 'wire-haired fox terrier', 189: 'Lakeland terrier', 190: 'Sealyham terrier, Sealyham', 191: 'Airedale, Airedale terrier', 192: 'cairn, cairn terrier', 193: 'Australian terrier', 194: 'Dandie Dinmont, Dandie Dinmont terrier', 195: 'Boston bull, Boston terrier', 196: 'miniature schnauzer', 197: 'giant schnauzer', 198: 'standard schnauzer', 199: 'Scotch terrier, Scottish terrier, Scottie', 200: 'Tibetan terrier, chrysanthemum dog', 201: 'silky terrier, Sydney silky', 202: 'soft-coated wheaten terrier', 203: 'West Highland white terrier', 204: 'Lhasa, Lhasa apso', 205: 'flat-coated retriever', 206: 'curly-coated retriever', 207: 'golden retriever', 208: 'Labrador retriever', 209: 'Chesapeake Bay retriever', 210: 'German short-haired pointer', 211: 'vizsla, Hungarian pointer', 212: 'English setter', 213: 'Irish setter, red setter', 214: 'Gordon setter', 215: 'Brittany spaniel', 216: 'clumber, clumber spaniel', 217: 'English springer, English springer spaniel', 218: 'Welsh springer spaniel', 219: 'cocker spaniel, English cocker spaniel, cocker', 220: 'Sussex spaniel', 221: 'Irish water spaniel', 222: 'kuvasz', 223: 'schipperke', 224: 'groenendael', 225: 'malinois', 226: 'briard', 227: 'kelpie', 228: 'komondor', 229: 'Old English sheepdog, bobtail', 230: 'Shetland sheepdog, Shetland sheep dog, Shetland', 231: 'collie', 232: 'Border collie', 233: 'Bouvier des Flandres, Bouviers des Flandres', 234: 'Rottweiler', 235: 'German shepherd, German shepherd dog, German police dog, alsatian', 236: 'Doberman, Doberman pinscher', 237: 'miniature pinscher', 238: 'Greater Swiss Mountain dog', 239: 'Bernese mountain dog', 240: 'Appenzeller', 241: 'EntleBucher', 242: 'boxer', 243: 'bull mastiff', 244: 'Tibetan mastiff', 245: 'French bulldog', 246: 'Great Dane', 247: 'Saint Bernard, St Bernard', 248: 'Eskimo dog, husky', 249: 'malamute, malemute, Alaskan malamute', 250: 'Siberian husky', 251: 'dalmatian, coach dog, carriage dog', 252: 'affenpinscher, monkey pinscher, monkey dog', 253: 'basenji', 254: 'pug, pug-dog', 255: 'Leonberg', 256: 'Newfoundland, Newfoundland dog', 257: 'Great Pyrenees', 258: 'Samoyed, Samoyede', 259: 'Pomeranian', 260: 'chow, chow chow', 261: 'keeshond', 262: 'Brabancon griffon', 263: 'Pembroke, Pembroke Welsh corgi', 264: 'Cardigan, Cardigan Welsh corgi', 265: 'toy poodle', 266: 'miniature poodle', 267: 'standard poodle', 268: 'Mexican hairless', 269: 'timber wolf, grey wolf, gray wolf, Canis lupus', 270: 'white wolf, Arctic wolf, Canis lupus tundrarum', 271: 'red wolf, maned wolf, Canis rufus, Canis niger', 272: 'coyote, prairie wolf, brush wolf, Canis latrans', 273: 'dingo, warrigal, warragal, Canis dingo', 274: 'dhole, Cuon alpinus', 275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', 276: 'hyena, hyaena', 277: 'red fox, Vulpes vulpes', 278: 'kit fox, Vulpes macrotis', 279: 'Arctic fox, white fox, Alopex lagopus', 280: 'grey fox, gray fox, Urocyon cinereoargenteus', 281: 'tabby, tabby cat', 282: 'tiger cat', 283: 'Persian cat', 284: 'Siamese cat, Siamese', 285: 'Egyptian cat', 286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', 287: 'lynx, catamount', 288: 'leopard, Panthera pardus', 289: 'snow leopard, ounce, Panthera uncia', 290: 'jaguar, panther, Panthera onca, Felis onca', 291: 'lion, king of beasts, Panthera leo', 292: 'tiger, Panthera tigris', 293: 'cheetah, chetah, Acinonyx jubatus', 294: 'brown bear, bruin, Ursus arctos', 295: 'American black bear, black bear, Ursus americanus, Euarctos americanus', 296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', 297: 'sloth bear, Melursus ursinus, Ursus ursinus', 298: 'mongoose', 299: 'meerkat, mierkat', 300: 'tiger beetle', 301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', 302: 'ground beetle, carabid beetle', 303: 'long-horned beetle, longicorn, longicorn beetle', 304: 'leaf beetle, chrysomelid', 305: 'dung beetle', 306: 'rhinoceros beetle', 307: 'weevil', 308: 'fly', 309: 'bee', 310: 'ant, emmet, pismire', 311: 'grasshopper, hopper', 312: 'cricket', 313: 'walking stick, walkingstick, stick insect', 314: 'cockroach, roach', 315: 'mantis, mantid', 316: 'cicada, cicala', 317: 'leafhopper', 318: 'lacewing, lacewing fly', 319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", 320: 'damselfly', 321: 'admiral', 322: 'ringlet, ringlet butterfly', 323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', 324: 'cabbage butterfly', 325: 'sulphur butterfly, sulfur butterfly', 326: 'lycaenid, lycaenid butterfly', 327: 'starfish, sea star', 328: 'sea urchin', 329: 'sea cucumber, holothurian', 330: 'wood rabbit, cottontail, cottontail rabbit', 331: 'hare', 332: 'Angora, Angora rabbit', 333: 'hamster', 334: 'porcupine, hedgehog', 335: 'fox squirrel, eastern fox squirrel, Sciurus niger', 336: 'marmot', 337: 'beaver', 338: 'guinea pig, Cavia cobaya', 339: 'sorrel', 340: 'zebra', 341: 'hog, pig, grunter, squealer, Sus scrofa', 342: 'wild boar, boar, Sus scrofa', 343: 'warthog', 344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius', 345: 'ox', 346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', 347: 'bison', 348: 'ram, tup', 349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', 350: 'ibex, Capra ibex', 351: 'hartebeest', 352: 'impala, Aepyceros melampus', 353: 'gazelle', 354: 'Arabian camel, dromedary, Camelus dromedarius', 355: 'llama', 356: 'weasel', 357: 'mink', 358: 'polecat, fitch, foulmart, foumart, Mustela putorius', 359: 'black-footed ferret, ferret, Mustela nigripes', 360: 'otter', 361: 'skunk, polecat, wood pussy', 362: 'badger', 363: 'armadillo', 364: 'three-toed sloth, ai, Bradypus tridactylus', 365: 'orangutan, orang, orangutang, Pongo pygmaeus', 366: 'gorilla, Gorilla gorilla', 367: 'chimpanzee, chimp, Pan troglodytes', 368: 'gibbon, Hylobates lar', 369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus', 370: 'guenon, guenon monkey', 371: 'patas, hussar monkey, Erythrocebus patas', 372: 'baboon', 373: 'macaque', 374: 'langur', 375: 'colobus, colobus monkey', 376: 'proboscis monkey, Nasalis larvatus', 377: 'marmoset', 378: 'capuchin, ringtail, Cebus capucinus', 379: 'howler monkey, howler', 380: 'titi, titi monkey', 381: 'spider monkey, Ateles geoffroyi', 382: 'squirrel monkey, Saimiri sciureus', 383: 'Madagascar cat, ring-tailed lemur, Lemur catta', 384: 'indri, indris, Indri indri, Indri brevicaudatus', 385: 'Indian elephant, Elephas maximus', 386: 'African elephant, Loxodonta africana', 387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', 388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', 389: 'barracouta, snoek', 390: 'eel', 391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', 392: 'rock beauty, Holocanthus tricolor', 393: 'anemone fish', 394: 'sturgeon', 395: 'gar, garfish, garpike, billfish, Lepisosteus osseus', 396: 'lionfish', 397: 'puffer, pufferfish, blowfish, globefish', 398: 'abacus', 399: 'abaya', 400: "academic gown, academic robe, judge's robe", 401: 'accordion, piano accordion, squeeze box', 402: 'acoustic guitar', 403: 'aircraft carrier, carrier, flattop, attack aircraft carrier', 404: 'airliner', 405: 'airship, dirigible', 406: 'altar', 407: 'ambulance', 408: 'amphibian, amphibious vehicle', 409: 'analog clock', 410: 'apiary, bee house', 411: 'apron', 412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', 413: 'assault rifle, assault gun', 414: 'backpack, back pack, knapsack, packsack, rucksack, haversack', 415: 'bakery, bakeshop, bakehouse', 416: 'balance beam, beam', 417: 'balloon', 418: 'ballpoint, ballpoint pen, ballpen, Biro', 419: 'Band Aid', 420: 'banjo', 421: 'bannister, banister, balustrade, balusters, handrail', 422: 'barbell', 423: 'barber chair', 424: 'barbershop', 425: 'barn', 426: 'barometer', 427: 'barrel, cask', 428: 'barrow, garden cart, lawn cart, wheelbarrow', 429: 'baseball', 430: 'basketball', 431: 'bassinet', 432: 'bassoon', 433: 'bathing cap, swimming cap', 434: 'bath towel', 435: 'bathtub, bathing tub, bath, tub', 436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', 437: 'beacon, lighthouse, beacon light, pharos', 438: 'beaker', 439: 'bearskin, busby, shako', 440: 'beer bottle', 441: 'beer glass', 442: 'bell cote, bell cot', 443: 'bib', 444: 'bicycle-built-for-two, tandem bicycle, tandem', 445: 'bikini, two-piece', 446: 'binder, ring-binder', 447: 'binoculars, field glasses, opera glasses', 448: 'birdhouse', 449: 'boathouse', 450: 'bobsled, bobsleigh, bob', 451: 'bolo tie, bolo, bola tie, bola', 452: 'bonnet, poke bonnet', 453: 'bookcase', 454: 'bookshop, bookstore, bookstall', 455: 'bottlecap', 456: 'bow', 457: 'bow tie, bow-tie, bowtie', 458: 'brass, memorial tablet, plaque', 459: 'brassiere, bra, bandeau', 460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', 461: 'breastplate, aegis, egis', 462: 'broom', 463: 'bucket, pail', 464: 'buckle', 465: 'bulletproof vest', 466: 'bullet train, bullet', 467: 'butcher shop, meat market', 468: 'cab, hack, taxi, taxicab', 469: 'caldron, cauldron', 470: 'candle, taper, wax light', 471: 'cannon', 472: 'canoe', 473: 'can opener, tin opener', 474: 'cardigan', 475: 'car mirror', 476: 'carousel, carrousel, merry-go-round, roundabout, whirligig', 477: "carpenter's kit, tool kit", 478: 'carton', 479: 'car wheel', 480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', 481: 'cassette', 482: 'cassette player', 483: 'castle', 484: 'catamaran', 485: 'CD player', 486: 'cello, violoncello', 487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone', 488: 'chain', 489: 'chainlink fence', 490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', 491: 'chain saw, chainsaw', 492: 'chest', 493: 'chiffonier, commode', 494: 'chime, bell, gong', 495: 'china cabinet, china closet', 496: 'Christmas stocking', 497: 'church, church building', 498: 'cinema, movie theater, movie theatre, movie house, picture palace', 499: 'cleaver, meat cleaver, chopper', 500: 'cliff dwelling', 501: 'cloak', 502: 'clog, geta, patten, sabot', 503: 'cocktail shaker', 504: 'coffee mug', 505: 'coffeepot', 506: 'coil, spiral, volute, whorl, helix', 507: 'combination lock', 508: 'computer keyboard, keypad', 509: 'confectionery, confectionary, candy store', 510: 'container ship, containership, container vessel', 511: 'convertible', 512: 'corkscrew, bottle screw', 513: 'cornet, horn, trumpet, trump', 514: 'cowboy boot', 515: 'cowboy hat, ten-gallon hat', 516: 'cradle', 517: 'crane', 518: 'crash helmet', 519: 'crate', 520: 'crib, cot', 521: 'Crock Pot', 522: 'croquet ball', 523: 'crutch', 524: 'cuirass', 525: 'dam, dike, dyke', 526: 'desk', 527: 'desktop computer', 528: 'dial telephone, dial phone', 529: 'diaper, nappy, napkin', 530: 'digital clock', 531: 'digital watch', 532: 'dining table, board', 533: 'dishrag, dishcloth', 534: 'dishwasher, dish washer, dishwashing machine', 535: 'disk brake, disc brake', 536: 'dock, dockage, docking facility', 537: 'dogsled, dog sled, dog sleigh', 538: 'dome', 539: 'doormat, welcome mat', 540: 'drilling platform, offshore rig', 541: 'drum, membranophone, tympan', 542: 'drumstick', 543: 'dumbbell', 544: 'Dutch oven', 545: 'electric fan, blower', 546: 'electric guitar', 547: 'electric locomotive', 548: 'entertainment center', 549: 'envelope', 550: 'espresso maker', 551: 'face powder', 552: 'feather boa, boa', 553: 'file, file cabinet, filing cabinet', 554: 'fireboat', 555: 'fire engine, fire truck', 556: 'fire screen, fireguard', 557: 'flagpole, flagstaff', 558: 'flute, transverse flute', 559: 'folding chair', 560: 'football helmet', 561: 'forklift', 562: 'fountain', 563: 'fountain pen', 564: 'four-poster', 565: 'freight car', 566: 'French horn, horn', 567: 'frying pan, frypan, skillet', 568: 'fur coat', 569: 'garbage truck, dustcart', 570: 'gasmask, respirator, gas helmet', 571: 'gas pump, gasoline pump, petrol pump, island dispenser', 572: 'goblet', 573: 'go-kart', 574: 'golf ball', 575: 'golfcart, golf cart', 576: 'gondola', 577: 'gong, tam-tam', 578: 'gown', 579: 'grand piano, grand', 580: 'greenhouse, nursery, glasshouse', 581: 'grille, radiator grille', 582: 'grocery store, grocery, food market, market', 583: 'guillotine', 584: 'hair slide', 585: 'hair spray', 586: 'half track', 587: 'hammer', 588: 'hamper', 589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier', 590: 'hand-held computer, hand-held microcomputer', 591: 'handkerchief, hankie, hanky, hankey', 592: 'hard disc, hard disk, fixed disk', 593: 'harmonica, mouth organ, harp, mouth harp', 594: 'harp', 595: 'harvester, reaper', 596: 'hatchet', 597: 'holster', 598: 'home theater, home theatre', 599: 'honeycomb', 600: 'hook, claw', 601: 'hoopskirt, crinoline', 602: 'horizontal bar, high bar', 603: 'horse cart, horse-cart', 604: 'hourglass', 605: 'iPod', 606: 'iron, smoothing iron', 607: "jack-o'-lantern", 608: 'jean, blue jean, denim', 609: 'jeep, landrover', 610: 'jersey, T-shirt, tee shirt', 611: 'jigsaw puzzle', 612: 'jinrikisha, ricksha, rickshaw', 613: 'joystick', 614: 'kimono', 615: 'knee pad', 616: 'knot', 617: 'lab coat, laboratory coat', 618: 'ladle', 619: 'lampshade, lamp shade', 620: 'laptop, laptop computer', 621: 'lawn mower, mower', 622: 'lens cap, lens cover', 623: 'letter opener, paper knife, paperknife', 624: 'library', 625: 'lifeboat', 626: 'lighter, light, igniter, ignitor', 627: 'limousine, limo', 628: 'liner, ocean liner', 629: 'lipstick, lip rouge', 630: 'Loafer', 631: 'lotion', 632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', 633: "loupe, jeweler's loupe", 634: 'lumbermill, sawmill', 635: 'magnetic compass', 636: 'mailbag, postbag', 637: 'mailbox, letter box', 638: 'maillot', 639: 'maillot, tank suit', 640: 'manhole cover', 641: 'maraca', 642: 'marimba, xylophone', 643: 'mask', 644: 'matchstick', 645: 'maypole', 646: 'maze, labyrinth', 647: 'measuring cup', 648: 'medicine chest, medicine cabinet', 649: 'megalith, megalithic structure', 650: 'microphone, mike', 651: 'microwave, microwave oven', 652: 'military uniform', 653: 'milk can', 654: 'minibus', 655: 'miniskirt, mini', 656: 'minivan', 657: 'missile', 658: 'mitten', 659: 'mixing bowl', 660: 'mobile home, manufactured home', 661: 'Model T', 662: 'modem', 663: 'monastery', 664: 'monitor', 665: 'moped', 666: 'mortar', 667: 'mortarboard', 668: 'mosque', 669: 'mosquito net', 670: 'motor scooter, scooter', 671: 'mountain bike, all-terrain bike, off-roader', 672: 'mountain tent', 673: 'mouse, computer mouse', 674: 'mousetrap', 675: 'moving van', 676: 'muzzle', 677: 'nail', 678: 'neck brace', 679: 'necklace', 680: 'nipple', 681: 'notebook, notebook computer', 682: 'obelisk', 683: 'oboe, hautboy, hautbois', 684: 'ocarina, sweet potato', 685: 'odometer, hodometer, mileometer, milometer', 686: 'oil filter', 687: 'organ, pipe organ', 688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO', 689: 'overskirt', 690: 'oxcart', 691: 'oxygen mask', 692: 'packet', 693: 'paddle, boat paddle', 694: 'paddlewheel, paddle wheel', 695: 'padlock', 696: 'paintbrush', 697: "pajama, pyjama, pj's, jammies", 698: 'palace', 699: 'panpipe, pandean pipe, syrinx', 700: 'paper towel', 701: 'parachute, chute', 702: 'parallel bars, bars', 703: 'park bench', 704: 'parking meter', 705: 'passenger car, coach, carriage', 706: 'patio, terrace', 707: 'pay-phone, pay-station', 708: 'pedestal, plinth, footstall', 709: 'pencil box, pencil case', 710: 'pencil sharpener', 711: 'perfume, essence', 712: 'Petri dish', 713: 'photocopier', 714: 'pick, plectrum, plectron', 715: 'pickelhaube', 716: 'picket fence, paling', 717: 'pickup, pickup truck', 718: 'pier', 719: 'piggy bank, penny bank', 720: 'pill bottle', 721: 'pillow', 722: 'ping-pong ball', 723: 'pinwheel', 724: 'pirate, pirate ship', 725: 'pitcher, ewer', 726: "plane, carpenter's plane, woodworking plane", 727: 'planetarium', 728: 'plastic bag', 729: 'plate rack', 730: 'plow, plough', 731: "plunger, plumber's helper", 732: 'Polaroid camera, Polaroid Land camera', 733: 'pole', 734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', 735: 'poncho', 736: 'pool table, billiard table, snooker table', 737: 'pop bottle, soda bottle', 738: 'pot, flowerpot', 739: "potter's wheel", 740: 'power drill', 741: 'prayer rug, prayer mat', 742: 'printer', 743: 'prison, prison house', 744: 'projectile, missile', 745: 'projector', 746: 'puck, hockey puck', 747: 'punching bag, punch bag, punching ball, punchball', 748: 'purse', 749: 'quill, quill pen', 750: 'quilt, comforter, comfort, puff', 751: 'racer, race car, racing car', 752: 'racket, racquet', 753: 'radiator', 754: 'radio, wireless', 755: 'radio telescope, radio reflector', 756: 'rain barrel', 757: 'recreational vehicle, RV, R.V.', 758: 'reel', 759: 'reflex camera', 760: 'refrigerator, icebox', 761: 'remote control, remote', 762: 'restaurant, eating house, eating place, eatery', 763: 'revolver, six-gun, six-shooter', 764: 'rifle', 765: 'rocking chair, rocker', 766: 'rotisserie', 767: 'rubber eraser, rubber, pencil eraser', 768: 'rugby ball', 769: 'rule, ruler', 770: 'running shoe', 771: 'safe', 772: 'safety pin', 773: 'saltshaker, salt shaker', 774: 'sandal', 775: 'sarong', 776: 'sax, saxophone', 777: 'scabbard', 778: 'scale, weighing machine', 779: 'school bus', 780: 'schooner', 781: 'scoreboard', 782: 'screen, CRT screen', 783: 'screw', 784: 'screwdriver', 785: 'seat belt, seatbelt', 786: 'sewing machine', 787: 'shield, buckler', 788: 'shoe shop, shoe-shop, shoe store', 789: 'shoji', 790: 'shopping basket', 791: 'shopping cart', 792: 'shovel', 793: 'shower cap', 794: 'shower curtain', 795: 'ski', 796: 'ski mask', 797: 'sleeping bag', 798: 'slide rule, slipstick', 799: 'sliding door', 800: 'slot, one-armed bandit', 801: 'snorkel', 802: 'snowmobile', 803: 'snowplow, snowplough', 804: 'soap dispenser', 805: 'soccer ball', 806: 'sock', 807: 'solar dish, solar collector, solar furnace', 808: 'sombrero', 809: 'soup bowl', 810: 'space bar', 811: 'space heater', 812: 'space shuttle', 813: 'spatula', 814: 'speedboat', 815: "spider web, spider's web", 816: 'spindle', 817: 'sports car, sport car', 818: 'spotlight, spot', 819: 'stage', 820: 'steam locomotive', 821: 'steel arch bridge', 822: 'steel drum', 823: 'stethoscope', 824: 'stole', 825: 'stone wall', 826: 'stopwatch, stop watch', 827: 'stove', 828: 'strainer', 829: 'streetcar, tram, tramcar, trolley, trolley car', 830: 'stretcher', 831: 'studio couch, day bed', 832: 'stupa, tope', 833: 'submarine, pigboat, sub, U-boat', 834: 'suit, suit of clothes', 835: 'sundial', 836: 'sunglass', 837: 'sunglasses, dark glasses, shades', 838: 'sunscreen, sunblock, sun blocker', 839: 'suspension bridge', 840: 'swab, swob, mop', 841: 'sweatshirt', 842: 'swimming trunks, bathing trunks', 843: 'swing', 844: 'switch, electric switch, electrical switch', 845: 'syringe', 846: 'table lamp', 847: 'tank, army tank, armored combat vehicle, armoured combat vehicle', 848: 'tape player', 849: 'teapot', 850: 'teddy, teddy bear', 851: 'television, television system', 852: 'tennis ball', 853: 'thatch, thatched roof', 854: 'theater curtain, theatre curtain', 855: 'thimble', 856: 'thresher, thrasher, threshing machine', 857: 'throne', 858: 'tile roof', 859: 'toaster', 860: 'tobacco shop, tobacconist shop, tobacconist', 861: 'toilet seat', 862: 'torch', 863: 'totem pole', 864: 'tow truck, tow car, wrecker', 865: 'toyshop', 866: 'tractor', 867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', 868: 'tray', 869: 'trench coat', 870: 'tricycle, trike, velocipede', 871: 'trimaran', 872: 'tripod', 873: 'triumphal arch', 874: 'trolleybus, trolley coach, trackless trolley', 875: 'trombone', 876: 'tub, vat', 877: 'turnstile', 878: 'typewriter keyboard', 879: 'umbrella', 880: 'unicycle, monocycle', 881: 'upright, upright piano', 882: 'vacuum, vacuum cleaner', 883: 'vase', 884: 'vault', 885: 'velvet', 886: 'vending machine', 887: 'vestment', 888: 'viaduct', 889: 'violin, fiddle', 890: 'volleyball', 891: 'waffle iron', 892: 'wall clock', 893: 'wallet, billfold, notecase, pocketbook', 894: 'wardrobe, closet, press', 895: 'warplane, military plane', 896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', 897: 'washer, automatic washer, washing machine', 898: 'water bottle', 899: 'water jug', 900: 'water tower', 901: 'whiskey jug', 902: 'whistle', 903: 'wig', 904: 'window screen', 905: 'window shade', 906: 'Windsor tie', 907: 'wine bottle', 908: 'wing', 909: 'wok', 910: 'wooden spoon', 911: 'wool, woolen, woollen', 912: 'worm fence, snake fence, snake-rail fence, Virginia fence', 913: 'wreck', 914: 'yawl', 915: 'yurt', 916: 'web site, website, internet site, site', 917: 'comic book', 918: 'crossword puzzle, crossword', 919: 'street sign', 920: 'traffic light, traffic signal, stoplight', 921: 'book jacket, dust cover, dust jacket, dust wrapper', 922: 'menu', 923: 'plate', 924: 'guacamole', 925: 'consomme', 926: 'hot pot, hotpot', 927: 'trifle', 928: 'ice cream, icecream', 929: 'ice lolly, lolly, lollipop, popsicle', 930: 'French loaf', 931: 'bagel, beigel', 932: 'pretzel', 933: 'cheeseburger', 934: 'hotdog, hot dog, red hot', 935: 'mashed potato', 936: 'head cabbage', 937: 'broccoli', 938: 'cauliflower', 939: 'zucchini, courgette', 940: 'spaghetti squash', 941: 'acorn squash', 942: 'butternut squash', 943: 'cucumber, cuke', 944: 'artichoke, globe artichoke', 945: 'bell pepper', 946: 'cardoon', 947: 'mushroom', 948: '<NAME>', 949: 'strawberry', 950: 'orange', 951: 'lemon', 952: 'fig', 953: 'pineapple, ananas', 954: 'banana', 955: 'jackfruit, jak, jack', 956: 'custard apple', 957: 'pomegranate', 958: 'hay', 959: 'carbonara', 960: 'chocolate sauce, chocolate syrup', 961: 'dough', 962: 'meat loaf, meatloaf', 963: 'pizza, pizza pie', 964: 'potpie', 965: 'burrito', 966: 'red wine', 967: 'espresso', 968: 'cup', 969: 'eggnog', 970: 'alp', 971: 'bubble', 972: 'cliff, drop, drop-off', 973: 'coral reef', 974: 'geyser', 975: 'lakeside, lakeshore', 976: 'promontory, headland, head, foreland', 977: 'sandbar, sand bar', 978: 'seashore, coast, seacoast, sea-coast', 979: 'valley, vale', 980: 'volcano', 981: 'ballplayer, baseball player', 982: 'groom, bridegroom', 983: 'scuba diver', 984: 'rapeseed', 985: 'daisy', 986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", 987: 'corn', 988: 'acorn', 989: 'hip, rose hip, rosehip', 990: 'buckeye, horse chestnut, conker', 991: 'coral fungus', 992: 'agaric', 993: 'gyromitra', 994: 'stinkhorn, carrion fungus', 995: 'earthstar', 996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', 997: 'bolete', 998: 'ear, spike, capitulum', 999: 'toilet tissue, toilet paper, bathroom tissue'} names_list = list(range(max(class_names) + 1)) for k, v in class_names.items(): names_list[k] = v # ## Preprocessing, zooming, predicting # + import numpy def preprocess(img): img2 = img.resize((224, 224)) X = numpy.asarray(img2).transpose((2, 0, 1)) X = X[numpy.newaxis, :3, :, :] / 255.0 return X.astype(numpy.float32) image_data = preprocess(image) image_data.shape, image_data.min(), image_data.max() # - from onnxruntime import InferenceSession sess = InferenceSession("squeezenet1.1.onnx") ([_.name for _ in sess.get_inputs()], [_.name for _ in sess.get_outputs()]) pred = sess.run(None, {'data': image_data}) pred[0].shape, list(zip(pred[0][0, :5], names_list)) named_pred = list(zip(pred[0][0, :], names_list)) named_pred.sort() named_pred[-5:] # ## Transfer Learning # # The class [OnnxTransformer](http://www.xavierdupre.fr/app/mlprodict/helpsphinx/mlprodict/sklapi/onnx_transformer.html?highlight=onnxtransformer#mlprodict.sklapi.onnx_transformer.OnnxTransformer) wraps a runtime into a class which follows scikit-learn API. from onnxruntime import InferenceSession sess = InferenceSession("squeezenet1.1.onnx") list(sess.get_inputs()) image_data.shape # + from mlprodict.sklapi import OnnxTransformer with open(model_file, 'rb') as f: content = f.read() tr = OnnxTransformer(content, runtime='onnxruntime1') tr.fit(None) onx_pred = tr.transform(image_data) named_onx_red = list(zip(onx_pred[0, :], names_list)) named_onx_red.sort() named_onx_red[-5:] # - # Let's normalize the probabilities within a pipeline. # + from sklearn.pipeline import Pipeline from sklearn.preprocessing import Normalizer pipe = Pipeline([ ('squeeze', tr), ('scaler', Normalizer(norm='l1')) ]) pipe.fit(image_data); # - onx_pred = pipe.transform(image_data) named_onx_pred = list(zip(onx_pred[0, :], names_list)) named_onx_pred.sort() named_onx_pred[-5:] # ## Merge ONNX graphs in a pipeline # The output probabilities are now normalized. What if we want to merge the ONNX got from the model zoo and the added normalizer... from mlprodict.onnx_conv import to_onnx new_onnx = to_onnx(pipe, image_data) # + from mlprodict.onnxrt import OnnxInference try: oinf_merged = OnnxInference(new_onnx, runtime="onnxruntime1") except RuntimeError as e: print(e) # - # The previous error is due to the fact the downloaded ONNX file was saved in a different opset. We need to reuse this one for the conversion of the whole pipeline. tr.opsets # We convert again. new_onnx = to_onnx(pipe, image_data, target_opset=12) oinf_merged = OnnxInference(new_onnx, runtime="onnxruntime1") res = oinf_merged.run({'X': image_data}) pred_prob = res["variable"] tr.opsets named_onx_pred_prob = list(zip(pred_prob[0, :], names_list)) named_onx_pred_prob.sort() named_onx_pred_prob[-5:] # + # oinf_merged.to_sequence() # Conv, Relu, DropOut, MaxPool, AveragePool # + # oinfpy = OnnxInference(new_onnx, runtime="python") # res = oinfpy.run({'X': image_data}) # pred_prob = res["variable"] # -
_doc/notebooks/transfer_learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Mega-Sena -- Probability, Statistics and Simulations # ## <NAME> # ### 2018-06-25 # # https://github.com/vicpires/MegaSena/blob/master/MegaSena.ipynb # ## Context # # The Mega-Sena is the largest lottery in Brazil and is held by the Caixa Econômica Federal bank. # # The Mega-Sena has drawings twice a week from a spinning acrylic cage with balls from 01 to 60. Until 2009, they consisted of picking balls from 2 spinning spherical cages. The balls were picked in pairs, in order to form a 2 digit decimal number from 01 to 60. The first cage had balls ranging from 0 to 5 for the first digit, and the other had balls ranging from 0 to 9 to for the second digit. In the event of a 00, it was replaced by a 60. When 6 unique 2 digit numbers were drawn, the drawing was concluded. # # Contestants may bet in the range from 6 to 15 numbers, out of 60, and scoring 4, 5 or 6 points will grant prizes. Bet prices escalate depending on how many possible groups of 6 numbers exist within the numbers chosen and they go from R\$ 3.50 for 6 numbers (only 1 possible game) to R\$ 17,517.50 for 15 numbers (5005 unique six numbers games). # # <p align="center"> # <img src="./Ticket.jpg" /> # </p> # # Sources: https://en.wikipedia.org/wiki/Mega-Sena ; https://pt.wikipedia.org/wiki/Mega-Sena # # Official page: http://loterias.caixa.gov.br/wps/portal/loterias/landing/megasena/ # ## Probability # + # A function to print probabilities def print_probability(p): print(f"\nOne chance in {1/p:,.0f} or {(p*100):.2f}% ({(p*100):.2E}%)\n") # + # The odds of having the first number drawn with a six numbers game p = 6/60 print_probability(p) # + # The odds of having the two first numbers drawn with a six numbers game p = 6/60 * 5/59 print_probability(p) # - # The number of unique six numbers games can be calculated with combinations # # $$ # \binom{n}{k} = \frac{n!}{k! \cdot (n - k)!} # \\ # {{60}\choose{6}} = \frac{60!}{6! \cdot (60 - 6)!} = \frac{60!}{6! \cdot 54!} = # \frac{60 \cdot 59 \cdot 58 \cdot 57 \cdot 56 \cdot 55 \cdot 54!} # {6 \cdot 5 \cdot 4 \cdot 3 \cdot 2 \cdot 1 \cdot 54!} = 50,063,860 # $$ # # + # The odds of having all six numbers drawn with a six numbers game from scipy.special import comb as combinations c6 = combinations(60, 6) print_probability(1/c6) # + # Other way of calculating the probability p = 6/60 * 5/59 * 4/58 * 3/57 * 2/56 * 1/55 print_probability(p) # + # Those odds are between always getting the same side on 25 coin flips... p = (1/2)**25 print_probability(p) # + # ... and getting always the same side on 26 coin flips p = (1/2)**26 print_probability(p) # + # The odds are a bit better for a seven numbers game... c = combinations(7, 6)/c6 print_probability(c) # + # ... and even more for a ten numbers game c = combinations(10, 6)/c6 print_probability(c) # + # Similar to getting always the same side on 18 coin tosses p = (1/2)**18 print_probability(p) # + # What about the R$ 17,517.50 fifteen numbers game c = combinations(15, 6)/c6 print_probability(c) # + # That is worse than getting always the same side on 13 coin flips p = (1/2)**13 print_probability(p) # - # ## Statistics # # The dataset with all Mega-Sena results can be downloaded as an HTML spreadsheet from the official government website: http://loterias.caixa.gov.br/wps/portal/loterias/landing/megasena/ # + # Using Python's regular expressions module to extract the data import re # + # The numbers are always two digits, from 01 to 60 number_regex = re.compile(r'\>\d{2}\<') # + # Dates are in the standard Brazilian format: DD/MM/YYY date_regex = re.compile(r'\d{2}\/\d{2}\/\d{4}') # + # A function to convert Brazilian dates (DD/MM/YYY) to ISO 8601 (YYYY-MM-DD) def br2iso(date): return f"{date[-4:]}-{date[3:5]}-{date[0:2]}" # + # Testing the function above with Christmas br2iso("25/12/2018") # + # Empty lists to store all drawing numbers and dates all_numbers = list() all_dates = list() # + # An empty dictionary to store how many times each number was drawn times_drawn = {key: 0 for key in range(1,61)} # + # Extracting the data from the dataset with open("D_MEGA.HTM", 'rb') as MegaSena_Spreadsheet: for row in MegaSena_Spreadsheet: date = date_regex.search(f"{row}") if (date is not None): all_dates.append(br2iso(date.group(0))) for _ in range(6): # Six numbers each drawing column = MegaSena_Spreadsheet.readline() number = number_regex.search(f"{column}") if (number is not None): number = int(number.group(0)[1:3]) all_numbers.append(number) times_drawn[number] += 1 # + # The number of drawings is equal to the number of dates total_drawings = len(all_dates) print(total_drawings) # + # all_numbers should be six times the number of drawings total_numbers = 6*total_drawings print(f"Expected: {total_numbers}") print(f"Got: {len(all_numbers)}") print(f"Match? {total_numbers == len(all_numbers)}!") # + # A list with a lists for each drawing all_drawings = list() for i in range(total_drawings): drawing = all_numbers[(6*i):(6*i+6)] all_drawings.append(sorted(drawing)) # + # all_drawings should have the same size as total_drawings print(f"Expected: {total_drawings}") print(f"Got: {len(all_drawings)}") print(f"Match? {total_drawings == len(all_drawings)}!") # + # Some games of the all_drawings list for i in [1, 100, 500, 1000, 1500, 2000, total_drawings]: print(f"Drawing {i} was on {all_dates[i-1]} and the numbers were: {all_drawings[i-1]}\n") # + # The drawing statistics for all numbers import numpy as np all_times_np = np.array([v for v in times_drawn.values()]) all_times_mean = int(np.mean(all_times_np)) all_times_sd = np.std(all_times_np) print(f"Drawn times -- Mean: {all_times_mean} ({np.mean(all_times_np):.2f})") print(f"Drawn times -- Standard Deviation: {all_times_sd:.2f}") # + # The drawing statistics for each number for number in times_drawn.keys(): times = times_drawn[number] percent = times / total_numbers * 100 print(f"Number {number:02d} was drawn {times} times ({percent:.3f}%)", end=" ") average = times - all_times_mean print(f"[{abs(average):2} times", end=" ") if (average >= 0): print("above", end=" ") else: print(f"below", end=" ") print("average]") # + # A list of number's popularity, sorted by the times it was drawn popular = [p[0] for p in sorted(times_drawn.items(), key=lambda d: d[1], reverse=True)] print(f"Most popular number: {popular[0]:02d}; Drawn: {times_drawn[popular[0]]} times.") print(f"\nLeast popular number: {popular[-1]:02d}; Drawn: {times_drawn[popular[-1]]} times.") # - print("The 15 most popular numbers are:") for i in range(15): number = popular[i] times = times_drawn[number] average = times - all_times_mean print(f"{(i+1):2d}: Number {number:02d} was drawn {times} times; {average} drawings above average.") print("The 15 least popular numbers are:") for i in range(15): number = popular[(59-i)] times = times_drawn[number] average = all_times_mean - times print(f"{(60-i)}: Number {number:02d} was drawn {times} times; {average} drawings below average.") # + # Plot setup import matplotlib.pyplot as plt from matplotlib import style # %matplotlib inline # MATLAB colors blue = [0.0000, 0.4470, 0.7410] green = [0.4660, 0.6740, 0.1880] yellow = [0.9290, 0.6940, 0.1250] orange = [0.8500, 0.3250, 0.0980] black = [0.2500, 0.2500, 0.2500] # + # Bar plot for the number of times numbers were drawn drawings = {key : 0 for key in sorted(times_drawn.values())} for value in times_drawn.values(): drawings[value] += 1 x = [k for k in range(len(drawings.keys()))] y = [v for v in drawings.values()] style.use('fivethirtyeight') plt.figure(figsize=(15, 10)) plt.bar(x, y, color=[blue, green, yellow]) plt.xticks(x, [f"{k}" for k in drawings.keys()], rotation=50) plt.title("Drawings distribution") plt.xlabel("Times drawn") plt.ylabel("Number of times drawn") plt.show() # + # A function to plot drawings statistics def plot_bars(x, y, title): mean = np.mean(y) sd = np.std(y) print(f"Mean: {mean:.2f} <> SD: {sd:.2f}") style.use('fivethirtyeight') plt.figure(figsize=(15, 10)) plt.bar(x, y, color=[blue, green, yellow]) plt.xticks(np.append(np.array(1), np.arange(5,61,5))) xs = [(min(x) - 1), (1 + max(x))] plt.plot(xs, ([mean + sd] * 2), linewidth=1, color=black, label="Mean + SD") plt.plot(xs, ([mean] * 2), linewidth=2, color=orange, label="Mean") plt.plot(xs, ([mean - sd] * 2), linewidth=1, color=black, label="Mean - SD") plt.title(title) plt.xlabel("Numbers") plt.ylabel("Drawings") plt.legend(loc=4) plt.show() # + # A plot of all Mega-Sena drawings title = f"All {total_drawings} Mega-Sena drawings" x = [k for k in times_drawn.keys()] y = [v for v in times_drawn.values()] plot_bars(x, y, title) # + # A function to plot a range of Mega-Sena drawings def range_plot(x): lower = (x[0] - 1) upper = (x[1] - 1) if (lower < upper): start_date = all_dates[lower] end_date = all_dates[upper] num_drawings = upper - lower + 1 title = f"Statistics of {num_drawings} drawings: {start_date} - {end_date}" numbers = all_numbers[(lower*6):(upper*6+6)] drawings = {key: 0 for key in range(1,61)} for number in numbers: drawings[number] += 1 x = [k for k in drawings.keys()] y = [v for v in drawings.values()] plot_bars(x, y, title) # + # A plot of the drawings from 2017 to 2018 range_plot([1891, total_drawings]) # + # Interactive plot of the Mega-Sena drawings (default: 2017 - 2018) from ipywidgets import widgets, interact, Layout s = widgets.IntRangeSlider(value=[1891, total_drawings], min=1, max=total_drawings, layout=Layout(width='95%')) interact(range_plot, x=s); # + # A function to print statistics for each number of the drawing def print_drawing_stats(last_drawing, this_drawing): for number in this_drawing: times = times_drawn[number] print(f"Number {number:02d}; drawn {times} times;", end=" ") average = times - all_times_mean print(f"{abs(average):2}", end=" ") if (average >= 0): print("above", end=" ") else: print(f"below", end=" ") print("average;", end=" ") print(f"In last drawing: ", end="") if (number in last_drawing): print("Yes!", end=" ") else: print("No. ", end=" ") popularity = popular.index(number) + 1 print(f"Popularity: {popularity:02d}", end="") if (popularity <= 15): print(" (top 15)") elif(popularity > 45): print(" (bottom 15)") else: print() # + # Statistics for the last five drawings for i in range(5): last_drawing = all_drawings[(i-6)] this_drawing = all_drawings[(i-5)] print(f"\nDrawing {total_drawings-4+i} - {all_dates[(total_drawings-5+i)]}") print_drawing_stats(last_drawing, this_drawing) # + # Plot the statistics for the last drawing last_date = all_dates[-1] last_drawing = all_drawings[-1] print(f"\nThe six numbers of the last drawing, #{total_drawings} on {last_date}, are: {last_drawing}.\n") print(f"Mean: {all_times_mean:.2f} <> SD: {all_times_sd:.2f}") style.use('fivethirtyeight') plt.figure(figsize=(15, 10)) xs = np.arange(6) plt.bar(xs, [times_drawn[number] for number in last_drawing], color=[blue, green, yellow]) plt.xticks(xs, [f"{number}" for number in last_drawing]) xs = [(min(xs) - 0.5), (0.5 + max(xs))] plt.plot(xs, ([all_times_mean + all_times_sd] * 2), linewidth=1, color=black, label="Mean + SD") plt.plot(xs, ([all_times_mean] * 2), linewidth=2, color=orange, label="Mean") plt.plot(xs, ([all_times_mean - all_times_sd] * 2), linewidth=1, color=black, label="Mean - SD") plt.legend(loc=4) plt.title(f"Statistics for drawing {total_drawings} on {last_date}") plt.xlabel("Numbers") plt.ylabel("Drawings") plt.show() # + # Was there ever a repeated drawing? drawings_st = set(tuple(d) for d in all_drawings) print(f"Total drawings: {len(all_drawings)}") print(f"Unique drawings: {len(drawings_st)}") print(f"Repeated drawings: {len(all_drawings) != len(drawings_st)}") # + # How many unique games are possible on the birthday range (01 to 31) cb = combinations(31, 6) print(f"There are {cb:,.0f} unique games in the range 01 to 31.") print(f"\nThat is {cb/c6*100:.2f}% of all {c6:,.0f} unique six number games.") # + # How many drawings were on the birthdays range? (01 to 31) birthday_drawings = list() for i in range(total_drawings): if (max(all_drawings[i]) <= 31): birthday_drawings.append((i, all_drawings[i])) print(f"""There were {len(birthday_drawings)} "birthday" games out of all {total_drawings} drawings.""") print(f"\nThat is {(len(birthday_drawings)/total_drawings*100):.2f}% of the winning drawings.") # - print("""All "birthday" drawings:\n""") for i, drawing in enumerate(birthday_drawings): print(f"{(i+1):02d}: {all_dates[drawing[0]]} - {drawing[1]}") # + # What about the lazy first six digits game? game = [x for x in range(1,7)] print(f"Is game {game} in the drawings? {game in all_drawings}!") # - # ## Simulation # + # A function to print drawing simulation statistics def print_simulation_stats(stats): percent = stats / simulation_iterations * 100 print(f"In {simulation_iterations:,} drawings numbers were picked twice on the same drawing", end=" ") print(f"{stats:,} times. ({percent:.2f}%)") print(f"\nOne redraw every {simulation_iterations*6/stats:.2f} balls on average.") # + # Until 2009 it was two balls, one for tens and one for units def draw(): twice = 0 # Store how many times identical numbers were drawn drawing = list() while (len(drawing) < 6): tens = np.random.choice(np.arange(0, 6, dtype=np.uint8)) units = np.random.choice(np.arange(0, 10, dtype=np.uint8)) number = (tens * 10) + units if (number == 0): # A double zero becomes 60 number = 60 if (number not in drawing): drawing.append(number) else: # Identical number, draw again twice += 1 drawing = sorted(drawing) return np.array(drawing, dtype=np.uint8), twice # + # Simulating an old drawing drawing = draw()[0] print(f"Drawing: {drawing}") # + # How often identical numbers would be drawn simulation_iterations = int(1e6) # One million twice = 0 for _ in range(simulation_iterations): twice += draw()[1] print_simulation_stats(twice) # + # What about balls from 01 to 60 with replacement def draw(): twice = 0 drawing = list() while (len(drawing) < 6): number = np.random.choice(np.arange(1, 61, dtype=np.uint8)) if (number not in drawing): drawing.append(number) else: twice += 1 drawing = sorted(drawing) return np.array(drawing, dtype=np.uint8), twice # + # Simulating again to see how often identical numbers would be drawn twice = 0 for _ in range(simulation_iterations): twice += draw()[1] print_simulation_stats(twice) # + # With no replacement numbers never repeat in a drawing def draw(size): game = np.random.choice(np.arange(1, 61, dtype=np.uint8), size, replace=False) return np.sort(game) # + # Simulating yet another drawing drawing = draw(6) print(f"Drawing: {drawing}") # + # It is also possible to use the function to make six and a fifteen numbers game game6 = draw(6) game15 = draw(15) print(f"Six numbers game: {game6}") print(f"\nFifteen numbers game: {game15}") # + # A function to calculate how many points a game did def get_points(drawing, game): points = 0 for number in game: if number in drawing: points += 1 return points # + # Calculating the points for the two games above print(f"\nDrawing: {drawing}") for game in [game6, game15]: print(f"\nGame {game} scored {get_points(drawing, game)} point(s).") # + # A function to simulate many games and get some statistics def simulate_games(drawing, game_size, iterations): points = {p: 0 for p in range(7)} for _ in range(iterations): game = draw(game_size) p = get_points(drawing, game) points[p] += 1 return points # + # A function to pretty print the simulated statistics of many games def print_sim_stats(stats): total = sum(stats.values()) for points, games in enumerate(stats.values()): print(f"{points} points: {games:,} games ({(games/total*100):.3f}%)") # + # Function to plot simulation statistics def plot_sim_stats(stats, title): style.use('fivethirtyeight') plt.figure(figsize=(15, 10)) plt.bar([k for k in stats.keys()], [v for v in stats.values()], color=[blue, green, yellow]) plt.title(title) plt.xlabel("Points") plt.ylabel("Games") plt.show() # + # Simulation of one million six numbers games sim6 = simulate_games(drawing, 6, simulation_iterations) print_sim_stats(sim6) plot_sim_stats(sim6, "One million six numbers games") # + # Simulation of one million fifteen numbers games sim15 = simulate_games(drawing, 15, simulation_iterations) print_sim_stats(sim15) plot_sim_stats(sim15, "One million fifteen numbers games") # + # Both simulations together style.use('fivethirtyeight') plt.figure(figsize=(15, 10)) plt.bar([k for k in sim6.keys()], [v for v in sim6.values()], color=green, label="Six", width=0.75) plt.bar([k for k in sim15.keys()], [v for v in sim15.values()], color=yellow, label="Fifteen", width=0.50) plt.title("Six and fifteen numbers games") plt.xlabel("Points") plt.ylabel("Games") plt.legend() plt.show() # - # ## Conclusions # # Lottery statistics are interesting, fun to analyze and simulate! # # Future drawings **cannot** be predicted since they have no correlation with previous drawings. # # The odds are against those who bet only on birthdays, the first 31 numbers, and forget about the other half. # # After all this I would say that Mega-Sena is a good investment option since with just only one dollar you have your, *teeny tiny*, chance of winning millions. # <NAME> # # 2018-06-25 # # https://github.com/vicpires/MegaSena/blob/master/MegaSena.ipynb
MegaSena.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- lst <- list('A','B',c(1,2,3,4),matrix(sample(1:15,12),nrow=3)) lst[3] lst[4] class(lst) lst <- list("<NAME>",22,"10/10/1996") names(lst)<- c("Name","Age","DOb") lst str(lst)
lists.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import keras keras.__version__ # # Predicting house prices: a regression example # # This notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments. # # ---- # # # In our two previous examples, we were considering classification problems, where the goal was to predict a single discrete label of an # input data point. Another common type of machine learning problem is "regression", which consists of predicting a continuous value instead # of a discrete label. For instance, predicting the temperature tomorrow, given meteorological data, or predicting the time that a # software project will take to complete, given its specifications. # # Do not mix up "regression" with the algorithm "logistic regression": confusingly, "logistic regression" is not a regression algorithm, # it is a classification algorithm. # ## The Boston Housing Price dataset # # # We will be attempting to predict the median price of homes in a given Boston suburb in the mid-1970s, given a few data points about the # suburb at the time, such as the crime rate, the local property tax rate, etc. # # The dataset we will be using has another interesting difference from our two previous examples: it has very few data points, only 506 in # total, split between 404 training samples and 102 test samples, and each "feature" in the input data (e.g. the crime rate is a feature) has # a different scale. For instance some values are proportions, which take a values between 0 and 1, others take values between 1 and 12, # others between 0 and 100... # # Let's take a look at the data: # + from keras.datasets import boston_housing (train_data, train_targets), (test_data, test_targets) = boston_housing.load_data() # - train_data.shape test_data.shape # # As you can see, we have 404 training samples and 102 test samples. The data comprises 13 features. The 13 features in the input data are as # follow: # # 1. Per capita crime rate. # 2. Proportion of residential land zoned for lots over 25,000 square feet. # 3. Proportion of non-retail business acres per town. # 4. Charles River dummy variable (= 1 if tract bounds river; 0 otherwise). # 5. Nitric oxides concentration (parts per 10 million). # 6. Average number of rooms per dwelling. # 7. Proportion of owner-occupied units built prior to 1940. # 8. Weighted distances to five Boston employment centres. # 9. Index of accessibility to radial highways. # 10. Full-value property-tax rate per $10,000. # 11. Pupil-teacher ratio by town. # 12. 1000 * (Bk - 0.63) ** 2 where Bk is the proportion of Black people by town. # 13. % lower status of the population. # # The targets are the median values of owner-occupied homes, in thousands of dollars: train_targets # # The prices are typically between \$10,000 and \$50,000. If that sounds cheap, remember this was the mid-1970s, and these prices are not # inflation-adjusted. # ## Preparing the data # # # It would be problematic to feed into a neural network values that all take wildly different ranges. The network might be able to # automatically adapt to such heterogeneous data, but it would definitely make learning more difficult. A widespread best practice to deal # with such data is to do feature-wise normalization: for each feature in the input data (a column in the input data matrix), we # will subtract the mean of the feature and divide by the standard deviation, so that the feature will be centered around 0 and will have a # unit standard deviation. This is easily done in Numpy: # + mean = train_data.mean(axis=0) train_data -= mean std = train_data.std(axis=0) train_data /= std test_data -= mean test_data /= std # - # # Note that the quantities that we use for normalizing the test data have been computed using the training data. We should never use in our # workflow any quantity computed on the test data, even for something as simple as data normalization. # ## Building our network # # # Because so few samples are available, we will be using a very small network with two # hidden layers, each with 64 units. In general, the less training data you have, the worse overfitting will be, and using # a small network is one way to mitigate overfitting. # + from keras import models from keras import layers def build_model(): # Because we will need to instantiate # the same model multiple times, # we use a function to construct it. model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(1)) model.compile(optimizer='rmsprop', loss='mse', metrics=['mae']) return model # - # # Our network ends with a single unit, and no activation (i.e. it will be linear layer). # This is a typical setup for scalar regression (i.e. regression where we are trying to predict a single continuous value). # Applying an activation function would constrain the range that the output can take; for instance if # we applied a `sigmoid` activation function to our last layer, the network could only learn to predict values between 0 and 1. Here, because # the last layer is purely linear, the network is free to learn to predict values in any range. # # Note that we are compiling the network with the `mse` loss function -- Mean Squared Error, the square of the difference between the # predictions and the targets, a widely used loss function for regression problems. # # We are also monitoring a new metric during training: `mae`. This stands for Mean Absolute Error. It is simply the absolute value of the # difference between the predictions and the targets. For instance, a MAE of 0.5 on this problem would mean that our predictions are off by # \$500 on average. # ## Validating our approach using K-fold validation # # # To evaluate our network while we keep adjusting its parameters (such as the number of epochs used for training), we could simply split the # data into a training set and a validation set, as we were doing in our previous examples. However, because we have so few data points, the # validation set would end up being very small (e.g. about 100 examples). A consequence is that our validation scores may change a lot # depending on _which_ data points we choose to use for validation and which we choose for training, i.e. the validation scores may have a # high _variance_ with regard to the validation split. This would prevent us from reliably evaluating our model. # # The best practice in such situations is to use K-fold cross-validation. It consists of splitting the available data into K partitions # (typically K=4 or 5), then instantiating K identical models, and training each one on K-1 partitions while evaluating on the remaining # partition. The validation score for the model used would then be the average of the K validation scores obtained. # In terms of code, this is straightforward: # + import numpy as np k = 4 num_val_samples = len(train_data) // k num_epochs = 100 all_scores = [] for i in range(k): print('processing fold #', i) # Prepare the validation data: data from partition # k val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples] val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples] # Prepare the training data: data from all other partitions partial_train_data = np.concatenate( [train_data[:i * num_val_samples], train_data[(i + 1) * num_val_samples:]], axis=0) partial_train_targets = np.concatenate( [train_targets[:i * num_val_samples], train_targets[(i + 1) * num_val_samples:]], axis=0) # Build the Keras model (already compiled) model = build_model() # Train the model (in silent mode, verbose=0) model.fit(partial_train_data, partial_train_targets, epochs=num_epochs, batch_size=1, verbose=0) # Evaluate the model on the validation data val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0) all_scores.append(val_mae) # - all_scores np.mean(all_scores) # # As you can notice, the different runs do indeed show rather different validation scores, from 2.1 to 2.9. Their average (2.4) is a much more # reliable metric than any single of these scores -- that's the entire point of K-fold cross-validation. In this case, we are off by \$2,400 on # average, which is still significant considering that the prices range from \$10,000 to \$50,000. # # Let's try training the network for a bit longer: 500 epochs. To keep a record of how well the model did at each epoch, we will modify our training loop # to save the per-epoch validation score log: # + from keras import backend as K # Some memory clean-up K.clear_session() # - num_epochs = 500 all_mae_histories = [] for i in range(k): print('processing fold #', i) # Prepare the validation data: data from partition # k val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples] val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples] # Prepare the training data: data from all other partitions partial_train_data = np.concatenate( [train_data[:i * num_val_samples], train_data[(i + 1) * num_val_samples:]], axis=0) partial_train_targets = np.concatenate( [train_targets[:i * num_val_samples], train_targets[(i + 1) * num_val_samples:]], axis=0) # Build the Keras model (already compiled) model = build_model() # Train the model (in silent mode, verbose=0) history = model.fit(partial_train_data, partial_train_targets, validation_data=(val_data, val_targets), epochs=num_epochs, batch_size=1, verbose=0) mae_history = history.history['val_mean_absolute_error'] all_mae_histories.append(mae_history) # We can then compute the average of the per-epoch MAE scores for all folds: average_mae_history = [ np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)] # Let's plot this: # + import matplotlib.pyplot as plt plt.plot(range(1, len(average_mae_history) + 1), average_mae_history) plt.xlabel('Epochs') plt.ylabel('Validation MAE') plt.show() # - # # It may be a bit hard to see the plot due to scaling issues and relatively high variance. Let's: # # * Omit the first 10 data points, which are on a different scale from the rest of the curve. # * Replace each point with an exponential moving average of the previous points, to obtain a smooth curve. # + def smooth_curve(points, factor=0.9): smoothed_points = [] for point in points: if smoothed_points: previous = smoothed_points[-1] smoothed_points.append(previous * factor + point * (1 - factor)) else: smoothed_points.append(point) return smoothed_points smooth_mae_history = smooth_curve(average_mae_history[10:]) plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history) plt.xlabel('Epochs') plt.ylabel('Validation MAE') plt.show() # - # # According to this plot, it seems that validation MAE stops improving significantly after 80 epochs. Past that point, we start overfitting. # # Once we are done tuning other parameters of our model (besides the number of epochs, we could also adjust the size of the hidden layers), we # can train a final "production" model on all of the training data, with the best parameters, then look at its performance on the test data: # Get a fresh, compiled model. model = build_model() # Train it on the entirety of the data. model.fit(train_data, train_targets, epochs=80, batch_size=16, verbose=0) test_mse_score, test_mae_score = model.evaluate(test_data, test_targets) test_mae_score # We are still off by about \$2,550. # ## Wrapping up # # # Here's what you should take away from this example: # # * Regression is done using different loss functions from classification; Mean Squared Error (MSE) is a commonly used loss function for # regression. # * Similarly, evaluation metrics to be used for regression differ from those used for classification; naturally the concept of "accuracy" # does not apply for regression. A common regression metric is Mean Absolute Error (MAE). # * When features in the input data have values in different ranges, each feature should be scaled independently as a preprocessing step. # * When there is little data available, using K-Fold validation is a great way to reliably evaluate a model. # * When little training data is available, it is preferable to use a small network with very few hidden layers (typically only one or two), # in order to avoid severe overfitting. # # This example concludes our series of three introductory practical examples. You are now able to handle common types of problems with vector data input: # # * Binary (2-class) classification. # * Multi-class, single-label classification. # * Scalar regression. # # In the next chapter, you will acquire a more formal understanding of some of the concepts you have encountered in these first examples, # such as data preprocessing, model evaluation, and overfitting.
.ipynb_checkpoints/3.7-predicting-house-prices-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from sklearn import datasets,linear_model import matplotlib.pyplot as plt # %matplotlib inline #from pylib.plot import plot_decision_boundary # - # Helper function to plot a decision boundary. # If you don't fully understand this function don't worry, it just generates the contour plot below. def plot_decision_boundary(pred_func): # Set min and max values and give it some padding x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 h = 0.01 # Generate a grid of points with distance h between them xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Predict the function value for the whole gid Z = pred_func(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # Plot the contour and training examples plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral) plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral) # Generate a dataset and plot it np.random.seed(0) X, y = sklearn.datasets.make_moons(200, noise=0.20) plt.scatter(X[:,0], X[:,1], s=40, c=y, cmap=plt.cm.Spectral) # ### logistic regression # # + # Train the logistic rgeression classifier clf = sklearn.linear_model.LogisticRegressionCV() clf.fit(X, y) # Plot the decision boundary plot_decision_boundary(lambda x: clf.predict(x)) plt.title("Logistic Regression") # - # # # NN # # + num_examples = len(X) # training set size nn_input_dim = 2 # input layer dimensionality nn_output_dim = 2 # output layer dimensionality # Gradient descent parameters (I picked these by hand) epsilon = 0.01 # learning rate for gradient descent reg_lambda = 0.01 # regularization strength # - # Helper function to evaluate the total loss on the dataset def calculate_loss(model): W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] # Forward propagation to calculate our predictions z1 = X.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # Calculating the loss corect_logprobs = -np.log(probs[range(num_examples), y]) data_loss = np.sum(corect_logprobs) # Add regulatization term to loss (optional) data_loss += reg_lambda/2 * (np.sum(np.square(W1)) + np.sum(np.square(W2))) return 1./num_examples * data_loss # Helper function to predict an output (0 or 1) def predict(model, x): W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2'] # Forward propagation z1 = x.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) return np.argmax(probs, axis=1) # This function learns parameters for the neural network and returns the model. # - nn_hdim: Number of nodes in the hidden layer # - num_passes: Number of passes through the training data for gradient descent # - print_loss: If True, print the loss every 1000 iterations def build_model(nn_hdim, num_passes=20000, print_loss=False): # Initialize the parameters to random values. We need to learn these. np.random.seed(0) W1 = np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim) b1 = np.zeros((1, nn_hdim)) W2 = np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim) b2 = np.zeros((1, nn_output_dim)) # This is what we return at the end model = {} # Gradient descent. For each batch... for i in range(0, num_passes): # Forward propagation z1 = X.dot(W1) + b1 a1 = np.tanh(z1) z2 = a1.dot(W2) + b2 exp_scores = np.exp(z2) probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True) # Backpropagation delta3 = probs delta3[range(num_examples), y] -= 1 dW2 = (a1.T).dot(delta3) db2 = np.sum(delta3, axis=0, keepdims=True) delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2)) dW1 = np.dot(X.T, delta2) db1 = np.sum(delta2, axis=0) # Add regularization terms (b1 and b2 don't have regularization terms) dW2 += reg_lambda * W2 dW1 += reg_lambda * W1 # Gradient descent parameter update W1 += -epsilon * dW1 b1 += -epsilon * db1 W2 += -epsilon * dW2 b2 += -epsilon * db2 # Assign new parameters to the model model = { 'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2} # Optionally print the loss. # This is expensive because it uses the whole dataset, so we don't want to do it too often. if print_loss and i % 1000 == 0: print("Loss after iteration %i: %f" %(i, calculate_loss(model))) return model # + # Build a model with a 3-dimensional hidden layer model = build_model(3, print_loss=True) # Plot the decision boundary plot_decision_boundary(lambda x: predict(model, x)) plt.title("Decision Boundary for hidden layer size 3") # - plt.figure(figsize=(16, 32)) hidden_layer_dimensions = [1, 2, 3, 4, 5, 20, 50] for i, nn_hdim in enumerate(hidden_layer_dimensions): plt.subplot(5, 2, i+1) plt.title('Hidden Layer size %d' % nn_hdim) model = build_model(nn_hdim) plot_decision_boundary(lambda x: predict(model, x)) plt.show()
Lectures/Lecture-06/code/NN00.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 import numpy as np import matplotlib.pyplot as plt img_ori = cv2.imread('junha.png') img_plt = plt.imread('junha.png') plt.figure(figsize=(12, 10)) plt.imshow(img_plt) # + dst = img_ori[500:550, 50:200] dst_plt = img_plt[500:550, 50:200] plt.figure(figsize=(12, 10)) plt.imshow(dst_plt) # + gray_ori = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) plt.figure(figsize=(12, 10)) plt.imshow(gray, cmap = 'gray') # - # + structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement) imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement) imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat) gray = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat) plt.figure(figsize=(12, 10)) plt.imshow(gray, cmap='gray') # + #다른 방법 img = img_ori[500:550, 50:200] img = cv2.resize(img, None, fx=5, fy=5) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) adaptive_threshold = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 85, 7) plt.figure(figsize=(12, 10)) plt.imshow(adaptive_threshold, cmap='gray') # - edges = cv2.Canny(adaptive_threshold,100,200) plt.figure(figsize=(12, 10)) plt.imshow(edges, cmap='gray')
opencvtest/opencvtest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Human Pose Estimation sec - 1.1 # # This is a Notebook coded on Google Colab # # * This notebook is used to: # - Load Custom Created functions # - Load pre-defined model architecture # - Load data from pre-defined pipelines # - Train the model with 50 epochs on colab with GPU # - A callback is defined to save the best model # + #Moving the data files from drive to current runtime, inorder to solve any bottleneck problems # # !cp drive/My\ Drive/Colab\ Notebooks/File.zip file.zip # # !unzip file.zip # # !cp ../model.h5 ../content/drive/My Drive/Colab Notebooks # - #Change the working directory import os os.chdir('Custom_Functions/') #Import all necessary libraries import pandas as pd, numpy as np, tensorflow from tensorflow.keras.losses import MSE as mse from tensorflow.keras.callbacks import ModelCheckpoint as mc #Importint the custome functions that returns the data from Data_prep import prep from Model_Generator import run from Metrics import coeff_determination, optimizer # + #Loading the action_joints dataset only to use the df.shape as output shape for our model df = pd.read_csv('../Data/Action_Dataset/action_joints.csv') df.columns= list(range(df.shape[1])) Train, Validation, Test = prep() #Returns data pipelines from the custom function model=run() #Returns the untrained model architecture # + adam=optimizer() #loads cutomized adam optimizer from the function metrics=[coeff_determination] #using the custom R2 metric model.compile(optimizer= optimizer, loss=mse, metrics=metrics) #Creating a callback to help save 'best model only' mcp=mc('../Custom_Models/Keras_Models_H5/final_model.h5', monitor='val_coeff_determination', patience=10, save_best_only=True, mode= 'max', baseline=None, restore_best_weights=True) #The best model is automatically saved at the given location '''Fit the model with data to enable training.. on Colab with GPU, this takes 35 minutes to run 50 epochs and return train r2 of 96.32 and val r2 of 92.13''' model.fit(Train, validation_data=Validation, epochs=50, callbacks=[mcp], steps_per_epoch= Train.samples//batch_size, validation_steps= Validation.samples//df.shape[0]) # - # # The Final Keypoints predition model can be loaded to run predictions directly or linked to a WebApplication or API # # * I further used this model to predict keypoints and serve as input to my [Hello/Namaste] classification model in sec 1.4 where I try to predict Hello/Namaste from live webcam feed. from Model_Generator import run2 from Data_prep import prep2 import tensorflow from tensorflow.keras import regularizers model=run2() X_Train, Y_Train, X_Val, Y_Val, transformer = prep2() # %%time model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) mcp=mc('../Custom_Models/Keras_Model_H5/final_classification_model.h5', monitor='val_loss', patience=20, save_best_only=True, mode= 'min', restore_best_weights=True) '''Fit the model with data to enable training.. on local cpu runtime [core i5, 8gb ram, 8gb graphics], this takes 4 seconds to run 50 epochs and return train acc of 99.7 and val acc of 99.7, which is considered 100%''' model.fit(X_Train, Y_Train, validation_data =(X_Val, Y_Val), epochs=50, callbacks= [mcp])
Model_Training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Valorización con Colateral # **Ametrano, <NAME>. and <NAME>**, Everything You Always Wanted to Know About Multiple Interest Rate Curve Bootstrapping but Were Afraid to Ask (April 2, 2013). Available at SSRN: https://ssrn.com/abstract=2219548 or http://dx.doi.org/10.2139/ssrn.2219548 # Se puede suponer que las cantidades captadas o colocadas por una contraparte que participa en el mercado OTC están asociadas a una cuenta de financiamiento (*funding account*) con el valor $B_{\alpha}\left(t\right)$ en $t$. El índice $\alpha$ denota la fuente específica de financiamiento. # # Se asume la siguiente dinámica: # $$ # \begin{equation} # dB_{\alpha}\left(t\right)=r_{\alpha}\left(t\right)B_{\alpha}\left(t\right)dt # \end{equation} # $$ # # # $$ # \begin{equation} # B_{\alpha}\left(0\right)=1 # \end{equation} # $$ # # # $$ # \begin{equation} # B_{\alpha}\left(t\right)=\exp\int_{0}^{t}r_{\alpha}\left(u\right)du # \end{equation} # $$ # Hemos ya identificado dos fuentes de financiamiento para el mercado OTC: # # - La fuente de financiamiento general (o de tesorería), denotada con $B_f$ asociada a la operativa usual en mercados monetarios y de bonos. Podemos pensar que, en este caso, $r_f=Libor+spread,\space SOFR+spread$ # # # - La cuenta de colateral, $B_c$, asociada a un CSA para la cual típicamente $r_c=ON$ (overnight). # ## Definición # # Un CSA perfecto es un CSA con las siguientes características: # # - margen inicial igual a 0 # - totalmente simétrico (las condiciones aplican a ambas contrapartes por igual) # - colateral sólo en *cash* # - threshold igual a 0 # - MTA igual a 0 # - periodicidad de cálculo continua # - tasa de colateral $r_c\left(t\right)$ # - traspaso instantáneo de colateral # - sin reinversión de colateral # # Como consecuencia se tiene que: # # $$ # \begin{equation} # B_c\left(t\right)=\Pi\left(t\right) # \end{equation} # $$ # # donde $\Pi\left(t\right)$ es el valor de la cartera de derivados asociada al CSA. # ## Teorema (valorización con colateral) # # Sea $\Pi$ un instrumento derivado que vence en $T$ escrito sobre el activo $X$ que obedece la siguiente ecuación diferencial estocástica: # # # $$ # \begin{equation} # dX\left(t\right)=\mu^P\left(t,X\right)X\left(t\right)dt+\sigma\left(t,X\right)X\left(t\right)dW^P\left(t\right), # \end{equation} # $$ # # # $$ # \begin{equation} # X\left(0\right)=X_0 # \end{equation} # $$ # # donde $t\in\mathbb{R}_+$, $X\in\mathbb{R}$, $\mu^P\in\mathbb{R}_+\times\mathbb{R}\rightarrow\mathbb{R}$, $\sigma\in\mathbb{R}_+\times\mathbb{R}\rightarrow\mathbb{R}_+$, $W^P\in\mathbb{R}$ es un movimiento browniano de dimension 1 sobre $\left(\Omega,\mathscr{F},P\right)$ y $P$ es la medida histórica (u objetiva). Asumiendo colateral perfecto, el precio del derivado $\Pi\left(t\right)$ en $t\lt T$ satisface: # # $$ # \begin{equation} # \overset{\wedge}{\mathcal{D}}_{rf}\Pi\left(t,X\right)=r_c\left(t\right)\Pi\left(t,X\right) # \end{equation} # $$ # # # $$ # \begin{equation} # \overset{\wedge}{\mathcal{D}}_{rf}=\frac{\partial}{\partial t}+r_f\left(t\right)X\left(t\right)\frac{\partial}{\partial X}+\frac{1}{2}\sigma^2\left(t\right)X^2\left(t\right)\frac{\partial^2}{\partial X^2} # \end{equation} # $$ # # # y está dado también por: # # $$ # \begin{equation} # \Pi\left(t,X\right)=\mathbb{E}_t^{Q_f}\left[D_c\left(t,T\right)\Pi\left(T,X\right)\right] # \end{equation} # $$ # # $$ # \begin{equation} # D_c\left(t,T\right)=\exp\left[-\int_t^Tr_c\left(u\right)du\right] # \end{equation} # $$ # # # donde $Q_f$ es la medida de probabilidad asociada a la cuenta de financiamiento $B_f$ de modo que: # # # $$ # \begin{equation} # dX\left(t\right)=r_f\left(t\right)X\left(t\right)dt+\sigma\left(t,X\right)X\left(t\right)dW^{Q_f}\left(t\right), # \end{equation} # $$ # ### Corolario (en la medida forward) # # Vale la expresión: # # $$ # \begin{equation} # \Pi\left(t,X\right)=P_c\left(t,T\right)\mathbb{E}_{t}^{Q_f^T}\left[\Pi\left(T,X\right)\right] # \end{equation} # $$ # # $$ # \begin{equation} # P_c\left(t,T\right)=\mathbb{E}^{Q_f}\left[D_c\left(t,T\right)\right] # \end{equation} # $$ # # donde $Q_f^T$ es la probabilidad asociada a $P_c\left(t,T\right)$ (la medida $T$-forward).
05_valorizacion_con_colateral.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Independent Component Analysis Lab # # In this notebook, we'll use Independent Component Analysis to retrieve original signals from three observations each of which contains a different mix of the original signals. This is the same problem explained in the ICA video. # # ## Dataset # Let's begin by looking at the dataset we have. We have three WAVE files, each of which is a mix, as we've mentioned. If you haven't worked with audio files in python before, that's okay, they basically boil down to being lists of floats. # # # Let's begin by loading our first audio file, **[ICA mix 1.wav](ICA mix 1.wav)** [click to listen to the file]: # + import numpy as np import wave, warnings warnings.simplefilter("ignore") # Read the wave file mix_1_wave = wave.open('ICA mix 1.wav','r') # - # Let's peak at the parameters of the wave file to learn more about it mix_1_wave.getparams() # So this file has only channel (so it's mono sound). It has a frame rate of 44100, which means each second of sound is represented by 44100 integers (integers because the file is in the common PCM 16-bit format). The file has a total of 264515 integers/frames, which means its length in seconds is: 264515/44100 # Let's extract the frames of the wave file, which will be a part of the dataset we'll run ICA against: # Extract Raw Audio from Wav File signal_1_raw = mix_1_wave.readframes(-1) signal_1 = np.fromstring(signal_1_raw, 'Int16') # signal_1 is now a list of ints representing the sound contained in the first file. 'length: ', len(signal_1) , 'first 100 elements: ',signal_1[:100] # If we plot this array as a line graph, we'll get the familiar wave form representation: # + import matplotlib.pyplot as plt fs = mix_1_wave.getframerate() timing = np.linspace(0, len(signal_1)/fs, num=len(signal_1)) plt.figure(figsize=(12,2)) plt.title('Recording 1') plt.plot(timing,signal_1, c="#3ABFE7") plt.ylim(-35000, 35000) display(plt.show()) # - # In the same way, we can now load the other two wave files, **[ICA mix 2.wav](ICA mix 2.wav)** and **[ICA mix 3.wav](ICA mix 3.wav)** # + mix_2_wave = wave.open('ICA mix 2.wav','r') #Extract Raw Audio from Wav File signal_raw_2 = mix_2_wave.readframes(-1) signal_2 = np.fromstring(signal_raw_2, 'Int16') mix_3_wave = wave.open('ICA mix 3.wav','r') #Extract Raw Audio from Wav File signal_raw_3 = mix_3_wave.readframes(-1) signal_3 = np.fromstring(signal_raw_3, 'Int16') plt.figure(figsize=(12,2)) plt.title('Recording 2') plt.plot(timing,signal_2, c="#3ABFE7") plt.ylim(-35000, 35000) plt.show() plt.figure(figsize=(12,2)) plt.title('Recording 3') plt.plot(timing,signal_3, c="#3ABFE7") plt.ylim(-35000, 35000) plt.show() # - # Now that we've read all three files, we're ready to [zip](https://docs.python.org/3/library/functions.html#zip) them to create our dataset. # # * Create dataset ```X``` by zipping signal_1, signal_2, and signal_3 into a single list # + X = list(zip(signal_1, signal_2, signal_3)) # Let's peak at what X looks like X[:10] # - # We are now ready to run ICA to try to retrieve the original signals. # # * Import sklearn's [FastICA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html) module # * Initialize FastICA look for three components # * Run the FastICA algorithm using fit_transform on dataset X # + # TODO: Import FastICA from sklearn.decomposition import FastICA # TODO: Initialize FastICA with n_components=3 fica = FastICA(n_components=3) # TODO: Run the FastICA algorithm using fit_transform on dataset X ica_result = fica.fit_transform(X) # - # ```ica_result``` now contains the result of FastICA, which we hope are the original signals. It's in the shape: ica_result.shape # Let's split into separate signals and look at them result_signal_1 = ica_result[:,0] result_signal_2 = ica_result[:,1] result_signal_3 = ica_result[:,2] # Let's plot to see how the wave forms look # + # Plot Independent Component #1 plt.figure(figsize=(12,2)) plt.title('Independent Component #1') plt.plot(result_signal_1, c="#df8efd") plt.ylim(-0.010, 0.010) plt.show() # Plot Independent Component #2 plt.figure(figsize=(12,2)) plt.title('Independent Component #2') plt.plot(result_signal_2, c="#87de72") plt.ylim(-0.010, 0.010) plt.show() # Plot Independent Component #3 plt.figure(figsize=(12,2)) plt.title('Independent Component #3') plt.plot(result_signal_3, c="#f65e97") plt.ylim(-0.010, 0.010) plt.show() # - # Do some of these look like musical wave forms? # # The best way to confirm the result is to listen to resulting files. So let's save as wave files and verify. But before we do that, we'll have to: # * convert them to integer (so we can save as PCM 16-bit Wave files), otherwise only some media players would be able to play them and others won't # * Map the values to the appropriate range for int16 audio. That range is between -32768 and +32767. A basic mapping can be done by multiplying by 32767. # * The sounds will be a little faint, we can increase the volume by multiplying by a value like 100 # + from scipy.io import wavfile # Convert to int, map the appropriate range, and increase the volume a little bit result_signal_1_int = np.int16(result_signal_1*32767*100) result_signal_2_int = np.int16(result_signal_2*32767*100) result_signal_3_int = np.int16(result_signal_3*32767*100) # Write wave files wavfile.write("result_signal_1.wav", fs, result_signal_1_int) wavfile.write("result_signal_2.wav", fs, result_signal_2_int) wavfile.write("result_signal_3.wav", fs, result_signal_3_int) # - # The resulting files we have now are: [note: make sure to lower the volume on your speakers first, just in case some problem caused the file to sound like static] # * [result_signal_1.wav](result_signal_1.wav) # * [result_signal_2.wav](result_signal_2.wav) # * [result_signal_3.wav](result_signal_3.wav) # # # # Music: # * Piano - The Carnival of the Animals - XIII. The Swan (Solo piano version). Performer: <NAME> # * Cello - Cello Suite no. 3 in C, BWV 1009 - I. Prelude. Performer: European Archive
[Lab] Independent Component Analysis/Independent Component Analysis Lab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # HIV MODEL # <NAME> # + # Configure Jupyter so figures appear in the notebook # %matplotlib inline # Configure Jupyter to display the assigned value after an assignment # %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' # import functions from the modsim.py module from modsim import * # + state=State(R=1000, L=0, E=0, V=100) #starting numbers of cells-stocks-these values will change as simulation runs #R is activated, uninfected cells #L is latently infected cells #E is actively infected cells #V is free virons #numbers taken from the Phillips paper system=System(π=100, σ=2, ρ=0.1, β=.00027, α=.036, δ=.33, μ=.00136, Γ=1.36, τ=.2, dt=0.1) #dt is explicit time step for Euler's method step #flows-greek letters are constant values throughtout the simulation # - def run_simulation(system, update_func): """Runs a simulation of the system. system: System object update_func: function that updates state returns: TimeFrame """ unpack(system) frame = TimeFrame(columns=state.index) frame.row[0] = state for t in linrange(0, 120): #not linrange(t0, t_end) like in prey-pred run_simulation b/c start and end times aren't defined in the system created above frame.row[t+1] = update_func(frame.row[t], t, system) return frame def update_func(state, t, system): """Update the HIV Model state: State(R, L, E, V) t: time system: System object returns: State(R, L, E, V) """ unpack(system) #system has greek letters, which aren't changing R, L, E, V = state dR = ((Γ * τ) - (μ * R) - (β * R * V)) * dt #Euler's method with explicit time step dL = ((ρ * β * R * V) - (μ * L) - (α * L)) * dt dE = ((1-ρ) * (β * R * V) + (α * L) - (δ * E) )* dt dV = ((π * E) - (σ * V) )* dt #updates stocks R += dR L += dL E += dE V += dV return State(R=R, L=L, E=E, V=V) results = run_simulation(system, update_func) results.head() # + plot(results.R, label='activated, uninfected cells') plot(results.E, label='latently infected cells') plot(results.L, label='actively infected cells') decorate(title='Number of R, L, and E Cells vs. Time', xlabel='Time (Days)', ylabel='Number of Cells (Cells)') #plot(results.V, '-y', label='free virons') taken out b/c V is so large we can't see R, E, and L # - # V is on a graph of its own because the number of cells is so large we cannot see how R, L, and E change in the simulation plot(results.V, '-r', label='free virons') decorate(title='Number of Free Virions vs. Time', xlabel='Time (Days)', ylabel='Number of Cells (Cells)')
code/HIV_Model_Julia.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.7 64-bit (''venv'': venv)' # metadata: # interpreter: # hash: ed2973e18a45e46195aea83f71ebc09fe8d3266afb265a31fa421a956b7d8fb0 # name: 'Python 3.7.7 64-bit (''venv'': venv)' # --- # # Sentiment analysis with Textblob-FR # + import sys from textblob import Blobber from textblob_fr import PatternTagger, PatternAnalyzer # - # ## Fonction # + tb = Blobber(pos_tagger=PatternTagger(), analyzer=PatternAnalyzer()) def get_sentiment(input_text): blob = tb(input_text) pola, subj = blob.sentiment perc = f"{100*abs(pola):.0f}" if pola > 0: sent = f"{perc}% positive" elif pola < 0: sent = f"{perc}% negative" else: sent = "neutral" if subj > 0: fact = f"{100*subj:.0f}% subjective" else: fact = "perfectly objective" print(f"This text is {sent} and {fact}.") # - # ## Analyser le sentiment d'une phrase # + tags=[] get_sentiment("Ce conseil municipal est vraiment super intéressant.") # + tags=[] get_sentiment("Cette phrase est négative et je ne suis pas content !") # -
module3/s4_sentiment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt import pandas as pd # # Traveling Salesman Problem # The Travelling Salsesman Problem (TSP) is stated as follows: # "Given a list of cities and the distances between each pair of cities, what is the shortest possible route that visits each city and returns to the origin city?". [[1]](https://en.wikipedia.org/wiki/Travelling_salesman_problem). # This is an **optimization** (minimization) problem, where the objective function is the total length of the route. # # ## Definitions # The cities are defined with their x and y coordinates. # # $$ # X = \left[\begin{array}{cc} x_{1} & y_{1}\\ x_{2} & y_{2}\\ \vdots & \vdots\\ x_{n} & y_{n} \end{array}\right] # $$ # # where $n$ is the number of cities. # The matrix of distances $A$ is a matrix where each element represent the distance between the i-th and j-th cities. # # $$ # A = \left[\begin{array}{cccc} # 0 & a_{12} & \dots & a_{1n}\\ # a_{21} & 0 & \dots & a_{2n}\\ # \vdots & \vdots & \ddots & \vdots\\ # a_{n1} & a_{n2} & \dots & 0 # \end{array}\right] # $$ # # where $a_{ij}=a_{ji}=||x_{i}-x_{j}||$. Two properties of this matrix are: # - it is a square matrix; # - it is symmetric; # - the element on the diagonal are zeros. # # ## Simulation # Random cities are initialized. For reproducibility, the seed of the random number generator is set to a certain value. np.random.seed(13) n = 25 x = np.random.rand(n, 2) plt.scatter(x[:,0], x[:,1]) plt.title(str(n) + ' random cities') # Then the matrix of distances $A$ is defined. The information of the starting and ending cities is given in the data dictionary, with the keyword *depot*. # + # matrix of distances A = np.zeros((n, n)) for i in range(n): for j in range(i): A[i, j] = np.linalg.norm(x[i]-x[j]) A[j, i] = A[i, j] data = { 'distance_matrix': A, 'depot': 0, } # - # Then, a function to compute the length of the route is defined. def computeDistance(route, data): """ Compute the total length of the route. The order into which cities are visited is defined into the route array; each element represent a city. Then, the total distance is computed with the matrix of distances. """ depot = data['depot'] distanceMatrix = data['distance_matrix'] totalDistance = 0 x = np.int64(route) x = np.insert(route, [0, n-1], data['depot']) for i in range(np.shape(route)[0]-1): totalDistance += distanceMatrix[route[i], route[i+1]] return totalDistance # ## Optimization # The TSP is a NP-hard combinatory problem; a robust solver is therefore needed. In this case, the **simulated annealing algorithm** [[2]](https://en.wikipedia.org/wiki/Simulated_annealing) has been used. This works as follows: # 1. Start with an initial random solution, which has a value $f(x_0)$ of the objective function. By now, this is the best solution $x_0 = x_{best}$. # 1. Select a local neighbor $x_{new}$ of this solution and compute $f(x_{new})$. The local neighbor is created with an inversion of a sub-route of $x_0$. # 1. If $\Delta E = f(x_{new}) - f(x_{best})\le 0$, then the new solution is accepted: $x_{best} = x_{new}$. # 1. If $\Delta E > 0$, the solution could still be accepted. This happens with a probability $P(\Delta E) = e^{-\frac{\Delta E}{kT}}$. The temperature $T$ is a fundamental variable of the algorithm. As the number of iterations grows, the temperature decrease (with a geometric progression: $T(i)=T(i-1)\cdot c_r$, where $c_r$ is the cooling ratio). So, the greater the iteration number, the less probability there is to accept a solution which does not optimize the objective function. # 1. Repeat steps 2-4 until convergence. def SimulatedAnnealing(fun, x0, args=(), max_iter=25000, tol=1e-9, T0=2000, cr=0.995): best_x = x0 T = T0 for i in range(max_iter): # new candidate solution new_x = best_x.copy() id1 = np.random.randint(best_x.shape[0]) id2 = np.random.randint(best_x.shape[0]) new_x[id1:id2] = new_x[id1:id2][::-1] delta_E = fun(new_x, args) - fun(best_x, args) # Metropolis if delta_E <= 0: best_x = new_x else: probability = np.exp(-delta_E/T) if probability > np.random.rand(): best_x = new_x if delta_E <= tol: pass T *= cr print(fun(best_x, args)) return best_x a=SimulatedAnnealing(computeDistance, np.arange(1,n), args=data) a = np.insert(a, [0, n-1], data['depot']) plt.plot(x[:,0], x[:,1], 'o') routes = x[a] plt.plot(routes[:,0], routes[:,1]) plt.title('Best route') # ## MATLAB implementation # The same algorithm has been implemented in MATLAB, which makes it easier to monitor the convergence of the solution. # Here it is reported the monitoring panel developed in MATLAB. # # ![SegmentLocal](TSP.gif "segment")
TravellingSalesman/TSP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="jfU3sTnwn9ju" # # Track positions of the investment companies. # Companies that are present in the following analysis: [Ark Invest](https://ark-invest.com/), [BlackRock](https://www.ishares.com/us), [Invesco](https://www.invesco.com/us/financial-products/etfs/performance), [Global X](https://www.globalxetfs.com/). # # After the analysis of the companies' portfolios we are going to leverage ideas of <NAME> from his book 'What works on Wall Streat' to choose the best stocks from the ETFs with the growth potential according to his growth strategy. # + id="QDoOi78fArjh" author = '<NAME>' author_url = 'https://www.linkedin.com/in/nikolaimelnikov/' # + [markdown] id="qMGXv1eadn2s" # Imporing all the necessary libraries for the project # + id="01VwWfrpHHiu" # !pip install -U git+https://github.com/mariostoev/finviz # + id="_vLtjsHT6Hsh" import pandas as pd import requests from io import StringIO import finviz from finviz.screener import Screener import matplotlib.pyplot as plt import seaborn as sns sns.set_theme(style="whitegrid") import nest_asyncio #for finviz screener nest_asyncio.apply() headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:66.0) Gecko/20100101 Firefox/66.0"} # + [markdown] id="A8Ft88pl64VX" # # Ark Invest # # **[ARKK](https://ark-funds.com/arkk):** Main ARK's ETF. # **[ARKQ](https://ark-funds.com/arkq):** ARK Autonomous Technology & Robotics. # # **[ARKW](https://ark-funds.com/arkw):** ARK Next Generation Internet. # **[ARKG](https://ark-funds.com/arkg):** ARK Genomic Revolution. # # **[ARKF](https://ark-funds.com/fintech-etf):** ARK FinTech Innovation. # **[PRNT](https://ark-funds.com/3d-printing-etf):** The 3D Printing. # + id="9xRShwORXPqg" ark_holdings = { 'url_arkk': 'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_INNOVATION_ETF_ARKK_HOLDINGS.csv', 'url_arkq': 'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_AUTONOMOUS_TECHNOLOGY_&_ROBOTICS_ETF_ARKQ_HOLDINGS.csv', 'url_arkw': 'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_NEXT_GENERATION_INTERNET_ETF_ARKW_HOLDINGS.csv', 'url_arkg': 'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_GENOMIC_REVOLUTION_MULTISECTOR_ETF_ARKG_HOLDINGS.csv', 'url_arkf': 'https://ark-funds.com/wp-content/fundsiteliterature/csv/ARK_FINTECH_INNOVATION_ETF_ARKF_HOLDINGS.csv', 'url_prnt': 'https://ark-funds.com/wp-content/fundsiteliterature/csv/THE_3D_PRINTING_ETF_PRNT_HOLDINGS.csv' } ark = pd.DataFrame(columns=['fund', 'shares', 'market value($)', 'weight(%)']) for url in ark_holdings.values(): req = requests.get(url, headers=headers) url_data = StringIO(req.text) df = pd.read_csv(url_data) df['ticker'] = df['ticker'].str.split(' ').str[0] df.set_index('ticker', inplace=True) df.drop(['date','cusip','company'], axis=1, inplace=True) df.dropna(inplace=True) ark = pd.concat([ark, df]) # + id="EkVuMmREqfaa" ark['total_value($)'] = ark.groupby(ark.index)['market value($)'].sum() ark['total_shares'] = ark.groupby(ark.index)['shares'].sum() ark = ark[~ark.index.duplicated(keep='first')] ark.drop(['fund','shares','market value($)','weight(%)'], axis=1, inplace=True) ark.dropna(inplace=True) ark['weight(%)'] = ark.apply(lambda x: (x['total_value($)']/ark['total_value($)'].sum())*100 , axis=1) ark.sort_values(by=['weight(%)'], ascending=False, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="B1ceSctuahy0" outputId="49643348-552f-419f-a935-71196b6f8ad9" ark # + id="4mbKkbE8naZg" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="739df7e3-f2b6-48f5-fe32-4743911d9f4f" ax = ark['weight(%)'].head(15).plot(kind='bar') ax.axhline(ark['weight(%)'].mean(), color='red', linewidth=2, label='Mean') plt.text(13,0.6,'Mean') ax.set(ylabel='Weight(%)', title='Top tickers in Ark Investment') plt.show() # + [markdown] id="CsTiP2DVRmds" # # iShares # # **[IWF](https://www.ishares.com/us/products/239706/):** Russell 1000 Growth. # **[IJH](https://www.ishares.com/us/products/239763/):** Core S&P Mid-Cap. # # **[IVW](https://www.ishares.com/us/products/239725/):** S&P 500 Growth. # **[QUAL](https://www.ishares.com/us/products/256101/):** MSCI USA Quality Factor. # # **[IRBO](https://www.ishares.com/us/products/297905/):** Robotics and Artificial Intelligence Multisector. # **[IBB](https://www.ishares.com/us/products/239699/):** Nasdaq Biotechnology. # # **[SOXX](https://www.ishares.com/us/products/239705/):** PHLX Semiconductor. # **[IGV](https://www.ishares.com/us/products/239771/):** Expanded Tech-Software Sector. # # **[IXN](https://www.ishares.com/us/products/239750/):** Global Tech. # **[ICLN](https://www.ishares.com/us/products/239738/):** Global Clean Energy. # # **[IEMG](https://www.ishares.com/us/products/244050/):** Core MSCI Emerging Markets. # + id="Y5pBTYgQRlvQ" iShares_holdings = { 'iwf': 'https://www.ishares.com/us/products/239706/ishares-russell-1000-growth-etf/1467271812596.ajax?fileType=csv&fileName=IWF_holdings&dataType=fund', 'ijh': 'https://www.ishares.com/us/products/239763/ishares-core-sp-midcap-etf/1467271812596.ajax?fileType=csv&fileName=IJH_holdings&dataType=fund', 'ivw': 'https://www.ishares.com/us/products/239725/ishares-sp-500-growth-etf/1467271812596.ajax?fileType=csv&fileName=IVW_holdings&dataType=fund', 'qual': 'https://www.ishares.com/us/products/256101/ishares-msci-usa-quality-factor-etf/1467271812596.ajax?fileType=csv&fileName=QUAL_holdings&dataType=fund', 'irbo': 'https://www.ishares.com/us/products/297905/fund/1467271812596.ajax?fileType=csv&fileName=IRBO_holdings&dataType=fund', 'ibb': 'https://www.ishares.com/us/products/239699/ishares-nasdaq-biotechnology-etf/1467271812596.ajax?fileType=csv&fileName=IBB_holdings&dataType=fund', 'soxx': 'https://www.ishares.com/us/products/239705/ishares-phlx-semiconductor-etf/1467271812596.ajax?fileType=csv&fileName=SOXX_holdings&dataType=fund', 'igv': 'https://www.ishares.com/us/products/239771/ishares-north-american-techsoftware-etf/1467271812596.ajax?fileType=csv&fileName=IGV_holdings&dataType=fund', 'ixn': 'https://www.ishares.com/us/products/239750/ishares-global-tech-etf/1467271812596.ajax?fileType=csv&fileName=IXN_holdings&dataType=fund', 'icln': 'https://www.ishares.com/us/products/239738/ishares-global-clean-energy-etf/1467271812596.ajax?fileType=csv&fileName=ICLN_holdings&dataType=fund', 'iemg': 'https://www.ishares.com/us/products/244050/ishares-core-msci-emerging-markets-etf/1467271812596.ajax?fileType=csv&fileName=IEMG_holdings&dataType=fund' } iShares = pd.DataFrame(columns=['Market Value', 'Weight (%)', 'Shares']) for url in iShares_holdings.values(): req = requests.get(url, headers=headers) url_data = StringIO(req.text) df = pd.read_csv(url_data, skiprows=9) df['Ticker'] = df['Ticker'].str.split(' ').str[0] df.set_index('Ticker', inplace=True) df.drop(['Name','Sector','Asset Class','CUSIP','ISIN','SEDOL','Location','Exchange','Currency','Market Currency','Accrual Date','Price','FX Rate','Notional Value'], axis=1, inplace=True) df.dropna(inplace=True) df = df[~df.index.duplicated(keep='first')] df = df.replace(',','', regex=True) df = df.astype('float64') df = df[(df['Market Value'] > 0) & (df['Weight (%)'] > 0)] iShares = pd.concat([iShares, df]) # + id="6_4IW9UtnNP-" iShares['total_value($)'] = iShares.groupby(iShares.index)['Market Value'].sum() iShares['total_shares'] = iShares.groupby(iShares.index)['Shares'].sum() iShares = iShares[~iShares.index.duplicated(keep='first')] iShares.drop(['Market Value','Weight (%)','Shares'], axis=1, inplace=True) iShares.dropna(inplace=True) iShares['weight(%)'] = iShares.apply(lambda x: (x['total_value($)']/iShares['total_value($)'].sum())*100 , axis=1) iShares.sort_values(by=['weight(%)'], ascending=False, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="DJmlhHtpafX9" outputId="14fda665-34df-43c3-c590-5c9959ddebf5" iShares # + id="MNJ5ALynnft9" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="5007589e-f73e-47dd-8fcf-6183dceadfc2" ax = iShares['weight(%)'].head(15).plot(kind='bar') ax.axhline(iShares['weight(%)'].mean(), color='red', linewidth=2, label='Mean') plt.text(13,0.2,'Mean') ax.set(ylabel='Weight(%)', title='Top tickers in BlackRock\'s iShares') plt.show() # + [markdown] id="-mHD-wW211SA" # # Invesco # # **[QQQ](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=QQQ):** Includes 100 of the largest international and domestic companies listed on the Nasdaq. # **[PTF](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PTF):** DWA Technology Momentum. # # **[PBW](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PBW):** WilderHill Clean Energy. # **[TAN](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=TAN):** Solar. # # **[PBE](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PBE):** Dynamic Biotechnology & Genome. # **[PSI](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PSI):** Dynamic Semiconductors. # # **[PZD](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PZD):** Cleantech™. # **[PPA](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PPA):** Aerospace & Defense. # # **[PYZ](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PYZ):** Basic Materials Momentum. # **[PEZ](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PEZ):** Consumer Cyclicals Momentum. # # **[PRN](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PRN):** Industrials Momentum. # **[PTF](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=PTF):** Technology Momentum. # # **[SPGP](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=SPGP):** S&P 500 GARP. # **[SPVM](https://www.invesco.com/us/financial-products/etfs/holdings?audienceType=Investor&ticker=SPVM):** S&P 500 Value with Momentum. # + id="m-HYh1PI12Oj" invesco_holdings = { 'qqq': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=QQQ', 'ptf': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Investor&action=download&ticker=PTF', 'pbw': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Investor&action=download&ticker=PBW', 'tan': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Investor&action=download&ticker=TAN', 'pbe': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Investor&action=download&ticker=PBE', 'psi': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PSI', 'pzd': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PZD', 'ppa': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PPA', 'pyz': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PYZ', 'pez': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PEZ', 'prn': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PRN', 'ptf': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=PTF', 'spgp': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=SPGP', 'spvm': 'https://www.invesco.com/us/financial-products/etfs/holdings/main/holdings/0?audienceType=Advisor&action=download&ticker=SPVM' } invesco = pd.DataFrame(columns=['MarketValue', 'Weight', 'Shares/Par Value']) for url in invesco_holdings.values(): req = requests.get(url, headers=headers) url_data = StringIO(req.text) df = pd.read_csv(url_data) df['Ticker'] = df['Holding Ticker'].str.split(' ').str[0] df.drop(['Fund Ticker','Security Identifier','Name','Class of Shares','Sector','Date','Holding Ticker'], axis=1, inplace=True) df.set_index('Ticker', inplace=True) df = df[~df.index.duplicated(keep='first')] df = df.replace(',','', regex=True) df = df.astype('float64') df = df[(df['MarketValue'] > 0) & (df['Weight'] > 0)] invesco = pd.concat([invesco, df]) # + id="xyYYNDrKwmEM" invesco['total_value($)'] = invesco.groupby(invesco.index)['MarketValue'].sum() invesco['total_shares'] = invesco.groupby(invesco.index)['Shares/Par Value'].sum() invesco = invesco[~invesco.index.duplicated(keep='first')] invesco.drop(['MarketValue','Weight','Shares/Par Value'], axis=1, inplace=True) invesco.dropna(inplace=True) invesco['weight(%)'] = invesco.apply(lambda x: (x['total_value($)']/invesco['total_value($)'].sum())*100 , axis=1) invesco.sort_values(by=['weight(%)'], ascending=False, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="qnVjhVdzTY7A" outputId="4e88f472-70f1-4a68-fced-130b662d0ba4" invesco # + id="3iYbKuyFnkKU" colab={"base_uri": "https://localhost:8080/", "height": 314} outputId="63cb7439-da31-4c42-b4d3-16bf59c07999" ax = invesco['weight(%)'].head(15).plot(kind='bar') ax.axhline(invesco['weight(%)'].mean(), color='red', linewidth=2, label='Mean') plt.text(13,0.6,'Mean') ax.set(ylabel='Weight(%)', title='Top tickers in Invesco') plt.show() # + [markdown] id="kazO7G3bkGNG" # # Global X # # **[BOTZ](https://www.globalxetfs.com/funds/botz/):** Robotics & Artificial Intelligence. # **[LIT](https://www.globalxetfs.com/funds/lit/):** Lithium & Battery Tech. # # **[CLOU](https://www.globalxetfs.com/funds/clou/):** Cloud Computing. # **[FINX](https://www.globalxetfs.com/funds/finx/):** FinTech. # # **[PAVE](https://www.globalxetfs.com/funds/pave/):** U.S. Infrastructure Development. # **[BUG](https://www.globalxetfs.com/funds/bug/):** Cybersecurity. # # **[HERO](https://www.globalxetfs.com/funds/hero/):** Video Games & Esports. # **[DRIV](https://www.globalxetfs.com/funds/driv/):** Autonomous & Electric Vehicles. # # **[EDOC](https://www.globalxetfs.com/funds/edoc/):** Telemedicine & Digital Health. # **[SNSR](https://www.globalxetfs.com/funds/snsr/):** Internet of Things. # # **[EBIZ](https://www.globalxetfs.com/funds/ebiz/):** E-commerce. # **[GNOM](https://www.globalxetfs.com/funds/gnom/):** Genomics & Biotechnology. # # **[MILN](https://www.globalxetfs.com/funds/miln/):** Millennials Thematic. # **[RNRG](https://www.globalxetfs.com/funds/rnrg/):** Renewable Energy Producers. # # **[BFIT](https://www.globalxetfs.com/funds/bfit/):** Health & Wellness Thematic. # **[VPN](https://www.globalxetfs.com/funds/vpn/):** Data Center REITs & Digital Infrastructure # # + id="73XEedi0kFQD" globalX_holdings = { 'botz': 'https://www.globalxetfs.com/funds/botz/?download_full_holdings=true', 'lit': 'https://www.globalxetfs.com/funds/lit/?download_full_holdings=true', 'clou': 'https://www.globalxetfs.com/funds/clou/?download_full_holdings=true', 'finx': 'https://www.globalxetfs.com/funds/finx/?download_full_holdings=true', 'pave': 'https://www.globalxetfs.com/funds/pave/?download_full_holdings=true', 'bug': 'https://www.globalxetfs.com/funds/bug/?download_full_holdings=true', 'hero': 'https://www.globalxetfs.com/funds/hero/?download_full_holdings=true', 'driv': 'https://www.globalxetfs.com/funds/driv/?download_full_holdings=true', 'edoc': 'https://www.globalxetfs.com/funds/edoc/?download_full_holdings=true', 'snsr': 'https://www.globalxetfs.com/funds/snsr/?download_full_holdings=true', 'ebiz': 'https://www.globalxetfs.com/funds/ebiz/?download_full_holdings=true', 'gnom': 'https://www.globalxetfs.com/funds/gnom/?download_full_holdings=true', 'miln': 'https://www.globalxetfs.com/funds/miln/?download_full_holdings=true', 'rnrg': 'https://www.globalxetfs.com/funds/rnrg/?download_full_holdings=true', 'bfit': 'https://www.globalxetfs.com/funds/bfit/?download_full_holdings=true', 'vpn': 'https://www.globalxetfs.com/funds/vpn/?download_full_holdings=true' } globalX = pd.DataFrame(columns=['Market Value ($)', '% of Net Assets', 'Shares Held']) for url in globalX_holdings.values(): req = requests.get(url, headers=headers) url_data = StringIO(req.text) df = pd.read_csv(url_data, skiprows=2) df.drop(['Name','SEDOL','Market Price ($)'], axis=1, inplace=True) df['Ticker'] = df['Ticker'].str.split(' ').str[0] df.set_index('Ticker', inplace=True) df = df[~df.index.duplicated(keep='first')] df = df.replace(',','', regex=True) df = df.astype('float64') df = df[(df['Market Value ($)'] > 0) & (df['% of Net Assets'] > 0)] df.dropna(inplace=True) globalX = pd.concat([globalX, df]) # + id="sluAhfF1VKe6" globalX['total_value($)'] = globalX.groupby(globalX.index)['Market Value ($)'].sum() globalX['total_shares'] = globalX.groupby(globalX.index)['Shares Held'].sum() globalX = globalX[~globalX.index.duplicated(keep='first')] globalX.drop(['Market Value ($)','% of Net Assets','Shares Held'], axis=1, inplace=True) globalX.dropna(inplace=True) globalX['weight(%)'] = globalX.apply(lambda x: (x['total_value($)']/globalX['total_value($)'].sum())*100 , axis=1) globalX.sort_values(by=['weight(%)'], ascending=False, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="8xEyUXmOVYrP" outputId="d851e8e2-5918-4e54-8c3e-49d49f23ab9b" globalX # + id="w-ri7qDFnoDN" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="35ee6a67-086e-4cd0-f87f-2b70482785dd" ax = globalX['weight(%)'].head(15).plot(kind='bar') ax.axhline(globalX['weight(%)'].mean(), color='red', linewidth=2, label='Mean') plt.text(13,0.2,'Mean') ax.set(ylabel='Weight(%)', title='Top tickers in Global X') plt.show() # + [markdown] id="diRC9_oyPpGi" # # Combined ETF # # MOST OCURRING TICKERS AND TICKERS THAT ARE OVERSOLD IN THE LAST TIME BUT NOT BY FUNDS # # # + id="OHg-uscIns2T" etfs_list = [ark, iShares, invesco, globalX] etfs = pd.concat(etfs_list) # + id="X1NwtibSYo09" etfs['Total Value($)'] = etfs.groupby(etfs.index)['total_value($)'].sum() etfs['Total Shares'] = etfs.groupby(etfs.index)['total_shares'].sum() etfs = etfs[~etfs.index.duplicated(keep='first')] etfs.drop(['total_value($)','weight(%)','total_shares'], axis=1, inplace=True) etfs.dropna(inplace=True) etfs['Weight(%)'] = etfs.apply(lambda x: (x['Total Value($)']/etfs['Total Value($)'].sum())*100 , axis=1) etfs.sort_values(by=['Weight(%)'], ascending=False, inplace=True) # + colab={"base_uri": "https://localhost:8080/", "height": 514} id="BlML--M6ZJmc" outputId="f3a90f1a-1901-4e69-9298-daa5559630f7" etfs.head(15) #combined etf # + [markdown] id="a-n8ZjsqxiQx" # ## Here we use FinViz scanner to scan for the stocks with low Price-to-Book ratio. # # <NAME> in his book 'What works on Wall Streat' suggests to use companies with a value of P/B under 1.4, FinViz does not give an oppotuinity to use exactly 1.4, that's why we are going to use a value of 2. Also we are leveraging price-to-cashflow here. # + id="4P_BJHxFt15I" filters_growth = ['fa_pb_u2','fa_pfcf_u20','cap_smallover','fa_pe_u40'] stock_list_growth = Screener(filters=filters_growth, table='Valuation', order='Volume') stock_list_growth.to_csv("stocks_growth.csv") stocks_growth = pd.read_csv('stocks_growth.csv', index_col=['Ticker']) # + colab={"base_uri": "https://localhost:8080/", "height": 390} id="1je8y2QzQfP-" outputId="005d4aec-4b34-4e0e-b903-88c299805609" stocks_growth.tail(10) # + id="uA9S-BR-ST2Z" tickers = stocks_growth.index.to_list() buys = etfs[etfs.index.isin(tickers)][(etfs['Weight(%)'] > 0.01)] buys['Weight(%)'] = buys.apply(lambda x: (x['Total Value($)']/buys['Total Value($)'].sum())*100 , axis=1) buys['Price']=stocks_growth[stocks_growth.index.isin(tickers)]['Price'] # + colab={"base_uri": "https://localhost:8080/", "height": 359} id="Es4-9p9kUnq1" outputId="db2ee0a5-a25c-4e93-c11a-d4e87907acfb" buys.head(10) # + id="nVQ7vWU-YfdC" colab={"base_uri": "https://localhost:8080/", "height": 360} outputId="389ae5bb-fbee-40ba-c8fb-663b4640b80d" plt.figure(figsize=(10,5)) ax = buys['Weight(%)'].sort_values(ascending=False).plot(kind='bar') ax.axhline(buys['Weight(%)'].mean(), color='red', linewidth=2, label='Mean') plt.text(20,2.5,'Mean') ax.set(ylabel='Weight(%)', title='Recommended growth portfolio') plt.show() # + [markdown] id="gntVArJaTI22" # NON US STOCKS # + colab={"base_uri": "https://localhost:8080/", "height": 204} id="NFbUe-CqVq3v" outputId="ecdd9907-ce43-4ff5-f57f-9941710fe591" etfs[etfs.index.str.isdigit()].head(5) #Stocks on Foreign Stocks Markets that are hard to buy but worth mentioning. # + [markdown] id="dDtpXbEIn1Xj" # # After we analyzed ETFs and compared stocks in them with a simple FinViz' screener we got a portfolio, where weight of the each stock is equal to the weight of the stock in the complete ETFs portfolio. It is not a financial advise, leverage the acquired insight in your own further analysis.
ETFs_Portfolio_Analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Levy Stable models of Stochastic Volatility # # This tutorial demonstrates inference using the Levy [Stable](http://docs.pyro.ai/en/stable/distributions.html#stable) distribution through a motivating example of a non-Gaussian stochastic volatilty model. # # Inference with stable distribution is tricky because the density `Stable.log_prob()` is not defined. In this tutorial we demonstrate two approaches to inference: (i) using the [poutine.reparam](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.handlers.reparam) effect to transform models in to a tractable form, and (ii) using the likelihood-free loss [EnergyDistance](http://docs.pyro.ai/en/latest/inference_algos.html#pyro.infer.energy_distance.EnergyDistance) with SVI. # # # #### Summary # # - [Stable.log_prob()](http://docs.pyro.ai/en/stable/distributions.html#stable) is undefined. # - Stable inference requires either reparameterization or a likelihood-free loss. # - Reparameterization: # - The [poutine.reparam()](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.handlers.reparam) handler can transform models using various [strategies](http://docs.pyro.ai/en/latest/infer.reparam.html). # - The [StableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.StableReparam) strategy can be used for Stable distributions in SVI or HMC. # - The [LatentStableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.LatentStableReparam) strategy is a little cheaper, but cannot be used for likelihoods. # - The [DiscreteCosineReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.discrete_cosine.DiscreteCosine) strategy improves geometry in batched latent time series models. # - Likelihood-free loss with SVI: # - The [EnergyDistance](http://docs.pyro.ai/en/latest/inference_algos.html#pyro.infer.energy_distance.EnergyDistance) loss allows stable distributions in the guide and in model likelihoods. # # #### Table of contents # # - [Daily S&P data](#data) # - [Fitting a single distribution to log returns](#fitting) using `EnergyDistance` # - [Modeling stochastic volatility](#modeling) using `poutine.reparam` # ## Daily S&P 500 data <a class="anchor" id="data"></a> # # The following daily closing prices for the S&P 500 were loaded from [Yahoo finance](https://finance.yahoo.com/quote/%5EGSPC/history/). # + import math import os import torch import pyro import pyro.distributions as dist from matplotlib import pyplot from torch.distributions import constraints from pyro import poutine from pyro.contrib.examples.finance import load_snp500 from pyro.infer import EnergyDistance, Predictive, SVI, Trace_ELBO from pyro.infer.autoguide import AutoDiagonalNormal from pyro.infer.reparam import DiscreteCosineReparam, StableReparam from pyro.optim import ClippedAdam from pyro.ops.tensor_utils import convolve # %matplotlib inline assert pyro.__version__.startswith('1.5.0') pyro.enable_validation(True) smoke_test = ('CI' in os.environ) # - df = load_snp500() dates = df.Date.to_numpy() x = torch.tensor(df["Close"]).float() x.shape pyplot.figure(figsize=(9, 3)) pyplot.plot(x) pyplot.yscale('log') pyplot.ylabel("index") pyplot.xlabel("trading day") pyplot.title("S&P 500 from {} to {}".format(dates[0], dates[-1])); # Of interest are the log returns, i.e. the log ratio of price on two subsequent days. pyplot.figure(figsize=(9, 3)) r = (x[1:] / x[:-1]).log() pyplot.plot(r, "k", lw=0.1) pyplot.title("daily log returns") pyplot.xlabel("trading day"); pyplot.figure(figsize=(9, 3)) pyplot.hist(r.numpy(), bins=200) pyplot.yscale('log') pyplot.ylabel("count") pyplot.xlabel("daily log returns") pyplot.title("Empirical distribution. mean={:0.3g}, std={:0.3g}".format(r.mean(), r.std())); # ## Fitting a single distribution to log returns <a class="anchor" id="fitting"></a> # # Log returns appear to be heavy-tailed. First let's fit a single distribution to the returns. To fit the distribution, we'll use a likelihood free statistical inference algorithm [EnergyDistance](http://docs.pyro.ai/en/latest/inference_algos.html#pyro.infer.energy_distance.EnergyDistance), which matches fractional moments of observed data and can handle data with heavy tails. def model(): stability = pyro.param("stability", torch.tensor(1.9), constraint=constraints.interval(0, 2)) skew = 0. scale = pyro.param("scale", torch.tensor(0.1), constraint=constraints.positive) loc = pyro.param("loc", torch.tensor(0.)) with pyro.plate("data", len(r)): return pyro.sample("r", dist.Stable(stability, skew, scale, loc), obs=r) # + # %%time pyro.clear_param_store() pyro.set_rng_seed(1234567890) num_steps = 1 if smoke_test else 201 optim = ClippedAdam({"lr": 0.1, "lrd": 0.1 ** (1 / num_steps)}) svi = SVI(model, lambda: None, optim, EnergyDistance()) losses = [] for step in range(num_steps): loss = svi.step() losses.append(loss) if step % 20 == 0: print("step {} loss = {}".format(step, loss)) print("-" * 20) pyplot.figure(figsize=(9, 3)) pyplot.plot(losses) pyplot.yscale("log") pyplot.ylabel("loss") pyplot.xlabel("SVI step") for name, value in sorted(pyro.get_param_store().items()): if value.numel() == 1: print("{} = {:0.4g}".format(name, value.squeeze().item())) # - samples = poutine.uncondition(model)().detach() pyplot.figure(figsize=(9, 3)) pyplot.hist(samples.numpy(), bins=200) pyplot.yscale("log") pyplot.xlabel("daily log returns") pyplot.ylabel("count") pyplot.title("Posterior predictive distribution"); # This is a poor fit, but that was to be expected since we are mixing all time steps together: we would expect this to be a scale-mixture of distributions (Normal, or Stable), but are modeling it as a single distribution (Stable in this case). # ## Modeling stochastic volatility <a class="anchor" id="modeling"></a> # # We'll next fit a stochastic volatility model. # Let's begin with a constant volatility model where log price $p$ follows Brownian motion # # $$ # \log p_t = \log p_{t-1} + w_t \sqrt h # $$ # # where $w_t$ is a sequence of standard white noise. We can rewrite this model in terms of the log returns $r_t=\log(p_t\,/\,p_{t-1})$: # # $$ # r_t = w_t \sqrt h # $$ # # Now to account for [volatility clustering](https://en.wikipedia.org/wiki/Volatility_clustering) we can generalize to a stochastic volatility model where volatility $h$ depends on time $t$. Among the simplest such models is one where $h_t$ follows geometric Brownian motion # # $$ # \log h_t = \log h_{t-1} + \sigma v_t # $$ # # where again $v_t$ is a sequence of standard white noise. The entire model thus consists of a geometric Brownian motion $h_t$ that determines the diffusion rate of another geometric Brownian motion $p_t$: # # $$ # \log h_t = \log h_{t-1} + \sigma v_t \\ # \log p_t = \log p_{t-1} + w_t \sqrt h_t # $$ # # Usually $v_1$ and $w_t$ are both Gaussian. We will generalize to a Stable distribution for $w_t$, learning three parameters (stability, skew, and location), but still scaling by $\sqrt h_t$. # # Our Pyro model will sample the increments $v_t$ and record the computation of $\log h_t$ via [pyro.deterministic](http://docs.pyro.ai/en/stable/primitives.html#pyro.deterministic). Note that there are many ways of implementing this model in Pyro, and geometry can vary depending on implementation. The following version seems to have good geometry, when combined with reparameterizers. def model(data): # Note we avoid plates because we'll later reparameterize along the time axis using # DiscreteCosineReparam, breaking independence. This requires .unsqueeze()ing scalars. h_0 = pyro.sample("h_0", dist.Normal(0, 1)).unsqueeze(-1) sigma = pyro.sample("sigma", dist.LogNormal(0, 1)).unsqueeze(-1) v = pyro.sample("v", dist.Normal(0, 1).expand(data.shape).to_event(1)) log_h = pyro.deterministic("log_h", h_0 + sigma * v.cumsum(dim=-1)) sqrt_h = log_h.mul(0.5).exp().clamp(min=1e-8, max=1e8) # Observed log returns, assumed to be a Stable distribution scaled by sqrt(h). r_loc = pyro.sample("r_loc", dist.Normal(0, 1e-2)).unsqueeze(-1) r_skew = pyro.sample("r_skew", dist.Uniform(-1, 1)).unsqueeze(-1) r_stability = pyro.sample("r_stability", dist.Uniform(0, 2)).unsqueeze(-1) pyro.sample("r", dist.Stable(r_stability, r_skew, sqrt_h, r_loc * sqrt_h).to_event(1), obs=data) # We use two reparameterizers: [StableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.StableReparam) to handle the `Stable` likelihood (since `Stable.log_prob()` is undefined), and [DiscreteCosineReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.discrete_cosine.DiscreteCosineReparam) to improve geometry of the latent Gaussian process for `v`. We'll then use `reparam_model` for both inference and prediction. reparam_model = poutine.reparam(model, {"v": DiscreteCosineReparam(), "r": StableReparam()}) # + # %%time pyro.clear_param_store() pyro.set_rng_seed(1234567890) num_steps = 1 if smoke_test else 1001 optim = ClippedAdam({"lr": 0.05, "betas": (0.9, 0.99), "lrd": 0.1 ** (1 / num_steps)}) guide = AutoDiagonalNormal(reparam_model) svi = SVI(reparam_model, guide, optim, Trace_ELBO()) losses = [] for step in range(num_steps): loss = svi.step(r) / len(r) losses.append(loss) if step % 50 == 0: median = guide.median() print("step {} loss = {:0.6g}".format(step, loss)) print("-" * 20) for name, (lb, ub) in sorted(guide.quantiles([0.325, 0.675]).items()): if lb.numel() == 1: lb = lb.detach().squeeze().item() ub = ub.detach().squeeze().item() print("{} = {:0.4g} ± {:0.4g}".format(name, (lb + ub) / 2, (ub - lb) / 2)) pyplot.figure(figsize=(9, 3)) pyplot.plot(losses) pyplot.ylabel("loss") pyplot.xlabel("SVI step") pyplot.xlim(0, len(losses)) pyplot.ylim(min(losses), 20) # - # It appears the log returns exhibit very little skew, but exhibit a stability parameter slightly but significantly less than 2. This contrasts the usual Normal model corresponding to a Stable distribution with skew=0 and stability=2. We can now visualize the estimated volatility: # + fig, axes = pyplot.subplots(2, figsize=(9, 5), sharex=True) pyplot.subplots_adjust(hspace=0) axes[1].plot(r, "k", lw=0.2) axes[1].set_ylabel("log returns") axes[1].set_xlim(0, len(r)) # We will pull out median log returns using the autoguide's .median() and poutines. with torch.no_grad(): pred = Predictive(reparam_model, guide=guide, num_samples=20, parallel=True)(r) log_h = pred["log_h"] axes[0].plot(log_h.median(0).values, lw=1) axes[0].fill_between(torch.arange(len(log_h[0])), log_h.kthvalue(2, dim=0).values, log_h.kthvalue(18, dim=0).values, color='red', alpha=0.5) axes[0].set_ylabel("log volatility") stability = pred["r_stability"].median(0).values.item() axes[0].set_title("Estimated index of stability = {:0.4g}".format(stability)) axes[1].set_xlabel("trading day"); # - # Observe that volatility roughly follows areas of large absolute log returns. Note that the uncertainty is underestimated, since we have used an approximate `AutoDiagonalNormal` guide. For more precise uncertainty estimates, one could use [HMC](http://docs.pyro.ai/en/stable/mcmc.html#hmc) or [NUTS](http://docs.pyro.ai/en/stable/mcmc.html#nuts) inference.
examples/stable.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] colab_type="text" id="view-in-github" # <a href="https://colab.research.google.com/github/AI4Finance-LLC/ElegantRL/blob/master/eRL_demo_StockTrading.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="c1gUG3OCJ5GS" # # **Stock Trading Application in ElegantRL** # # # # # # + [markdown] id="FGXyBBvL0dR2" # # **Part 1: Problem Formulation** # Formally, we model stock trading as a Markov Decision Process (MDP), and formulate the trading objective as maximization of expected return: # # # # * **State s = [b, p, h]**: a vector that includes the remaining balance b, stock prices p, and stock shares h. p and h are vectors with D dimension, where D denotes the number of stocks. # * **Action a**: a vector of actions over D stocks. The allowed actions on each stock include selling, buying, or holding, which result in decreasing, increasing, or no change of the stock shares in h, respectively. # * **Reward r(s, a, s’)**: The asset value change of taking action a at state s and arriving at new state s’. # * **Policy π(s)**: The trading strategy at state s, which is a probability distribution of actions at state s. # * **Q-function Q(s, a)**: the expected return (reward) of taking action a at state s following policy π. # * **State-transition**: After taking the actions a, the number of shares h is modified, as shown in Fig 3, and the new portfolio is the summation of the balance and the total value of the stocks. # + [markdown] id="jESlu_WtD_JS" # # **Part 2: Stock Trading Environment Design** # # **State Space and Action Space** # # # * **State Space**: We use a 181-dimensional vector consists of seven parts of information to represent the state space of multiple stocks trading environment: [b, p, h, M, R, C, X], where b is the balance, p is the stock prices, h is the number of shares, M is the Moving Average Convergence Divergence (MACD), R is the Relative Strength Index (RSI), C is the Commodity Channel Index (CCI), and X is the Average Directional Index (ADX). # * **Action Space**: As a recap, we have three types of actions: selling, buying, and holding for a single stock. We use the negative value for selling, positive value for buying, and zero for holding. In this case, the action space is defined as {-k, …, -1, 0, 1, …, k}, where k is the maximum share to buy or sell in each transaction. # # # **Easy-to-customize Features** # # # * **initial_capital**: the initial capital that the user wants to invest. # * **tickers**: the stocks that the user wants to trade with. # * **initial_stocks**: the initial amount of each stock and the default could be zero. # * **buy_cost_pct, sell_cost_pct**: the transaction fee of each buying or selling transaction. # * **max_stock**: the user is able to define the maximum number of stocks that are allowed to trade per transaction. Users can also set the maximum percentage of capitals to invest in each stock. # * **tech_indicator_list**: the list of financial indicators that are taken into account, which is used to define a state. # * **start_date, start_eval_date, end_eval_date**: the training and backtesting time intervals. Thee time dates (or timestamps) are used, once the training period is specified, the rest is backtesting. # # + [markdown] id="DbamGVHC3AeW" # # **Part 3: Install ElegantRL and related packages** # + colab={"base_uri": "https://localhost:8080/"} id="U35bhkUqOqbS" outputId="e67021c0-4daf-408c-f6c2-e34b442ad45a" # !pip install git+https://github.com/AI4Finance-LLC/ElegantRL.git # !pip install yfinance stockstats # + [markdown] id="UVdmpnK_3Zcn" # # **Part 4: Import Packages** # # # * **ElegantRL** # * **yfinance**: yfinance aims to solve this problem by offering a reliable, threaded, and Pythonic way to download historical market data from Yahoo! finance. # * **StockDataFrame**: stockstats inherits and extends pandas.DataFrame to support Stock Statistics and Stock Indicators. # # # + id="1VM1xKujoz-6" from elegantrl.run import * from elegantrl.agent import AgentPPO, AgentDDPG from elegantrl.envs.FinRL.StockTrading import StockTradingEnv, check_stock_trading_env import yfinance as yf from stockstats import StockDataFrame as Sdf # + [markdown] id="3n8zcgcn14uq" # # **Part 5: Specify Agent and Environment** # # * **args.agent**: firstly chooses one DRL algorithm to use from agent.py. In this application, we prefer to choose DDPG and PPO agent. # * **args.env**: creates the environment, and the user can either customize own environment or preprocess environments from OpenAI Gym and PyBullet Gym from env.py. In this application, we create the self-designed stock trading environment. # # # > Before finishing initialization of **args**, please see Arguments() in run.py for more details about adjustable hyper-parameters. # # # # + colab={"base_uri": "https://localhost:8080/"} id="E03f6cTeajK4" outputId="2e47cb16-baa0-487d-ca71-ac6c2d249e6b" # Environment tickers = [ 'AAPL', 'ADBE', 'ADI', 'ADP', 'ADSK', 'ALGN', 'ALXN', 'AMAT', 'AMD', 'AMGN', 'AMZN', 'ASML', 'ATVI', 'BIIB', 'BKNG', 'BMRN', 'CDNS', 'CERN', 'CHKP', 'CMCSA', 'COST', 'CSCO', 'CSX', 'CTAS', 'CTSH', 'CTXS', 'DLTR', 'EA', 'EBAY', 'FAST', 'FISV', 'GILD', 'HAS', 'HSIC', 'IDXX', 'ILMN', 'INCY', 'INTC', 'INTU', 'ISRG', 'JBHT', 'KLAC', 'LRCX', 'MAR', 'MCHP', 'MDLZ', 'MNST', 'MSFT', 'MU', 'MXIM', 'NLOK', 'NTAP', 'NTES', 'NVDA', 'ORLY', 'PAYX', 'PCAR', 'PEP', 'QCOM', 'REGN', 'ROST', 'SBUX', 'SIRI', 'SNPS', 'SWKS', 'TTWO', 'TXN', 'VRSN', 'VRTX', 'WBA', 'WDC', 'WLTW', 'XEL', 'XLNX'] # finrl.config.NAS_74_TICKER tech_indicator_list = [ 'macd', 'boll_ub', 'boll_lb', 'rsi_30', 'cci_30', 'dx_30', 'close_30_sma', 'close_60_sma'] # finrl.config.TECHNICAL_INDICATORS_LIST gamma = 0.99 max_stock = 1e2 initial_capital = 1e6 initial_stocks = np.zeros(len(tickers), dtype=np.float32) buy_cost_pct = 1e-3 sell_cost_pct = 1e-3 start_date = '2008-03-19' start_eval_date = '2016-01-01' end_eval_date = '2021-01-01' # Agent env = StockTradingEnv('./', gamma, max_stock, initial_capital, buy_cost_pct, sell_cost_pct, start_date, start_eval_date, end_eval_date, tickers, tech_indicator_list, initial_stocks, if_eval=False) agent = AgentPPO() # AgentSAC(), AgentTD3(), AgentDDPG() args = Arguments(agent=agent, env=env) args.agent.if_use_gae = True args.agent.lambda_entropy = 0.04 args.env_eval = StockTradingEnv('./', gamma, max_stock, initial_capital, buy_cost_pct, sell_cost_pct, start_date, start_eval_date, end_eval_date, tickers, tech_indicator_list, initial_stocks, if_eval=True) args.env.target_reward = 3 args.env_eval.target_reward = 3 # Hyperparameters args.gamma = gamma args.break_step = int(2e5) args.net_dim = 2 ** 9 args.max_step = args.env.max_step args.max_memo = args.max_step * 4 args.batch_size = 2 ** 10 args.repeat_times = 2 ** 3 args.eval_gap = 2 ** 4 args.eval_times1 = 2 ** 3 args.eval_times2 = 2 ** 5 args.if_allow_break = False args.rollout_num = 2 # the number of rollout workers (larger is not always faster) # + [markdown] id="z1j5kLHF2dhJ" # # **Part 6: Train and Evaluate the Agent** # # > The training and evaluating processes are all finished inside function **train_and_evaluate_mp()**, and the only parameter for it is **args**. It includes the fundamental objects in DRL: # # * agent, # * environment. # # > And it also includes the parameters for training-control: # # * batch_size, # * target_step, # * reward_scale, # * gamma, etc. # # > The parameters for evaluation-control: # # * break_step, # * random_seed, etc. # # # # # # + colab={"base_uri": "https://localhost:8080/"} id="KGOPSD6da23k" outputId="2fdbfc4b-80a2-4659-ace5-b3bde0f36d8e" train_and_evaluate_mp(args) # the training process will terminate once it reaches the target reward. # + [markdown] id="JPXOxLSqh5cP" # Understanding the above results:: # * **Step**: the total training steps. # * **MaxR**: the maximum reward. # * **avgR**: the average of the rewards. # * **stdR**: the standard deviation of the rewards. # * **objA**: the objective function value of Actor Network (Policy Network). # * **objC**: the objective function value (Q-value) of Critic Network (Value Network). # + [markdown] id="v6jvgYPnHMpf" # # **Part 7: Backtest and Draw the Graph** # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="3jCdezgzQUhh" outputId="a1ea5861-1ce9-4d44-c94d-3c236eb48428" agent = AgentPPO() env = StockTradingEnv(cwd='./', if_eval=True) args = Arguments(agent=agent, env=env) args.if_remove = False args.cwd = './AgentPPO/StockTradingEnv-v1_0' args.init_before_training() env.draw_cumulative_return(args, torch)
eRL_demo_StockTrading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Persistence of change # # Here we adopt a very simple strategy to assess whether changes persist across an author's career. # # We consider the sequence of books an author has written across their career, considering only books published in different years (duplicates in the same year are dropped). Then we create many "triplets" of sequential books, balancing these so different authors have an equal chance to be represented, as long as they have written at least three books. # # For instance a triplet centered at time *t* would be: $book_{t-1}, book_{t}, book_{t+1}$. # # We then ask whether the average distance between topic proportions for the two single steps--from $t-1$ to $t$ and from $t$ to $t+1$--is larger or smaller than the total distance from $t-1$ to $t+1$. # # If there's no persistent change over time for individuals, then all values for books are just $authormean \pm \epsilon$, and the mean distance across two steps should be the same as the mean distance for one step. # # If, however, change is persistent, the two-step distance should be greater than the one-step distance. # # We can then ask whether this ratio varies across topics, and whether it varies with age at $t-1$ or with the total distance across the triplet. # # When we're considering topics individually, we measure distance simply as $|t_1 - t_2|$. When we don't need to specify topic, we can measure distance more generally using euclidean distance. (We don't use cosine distance here because its failure to satisfy triangle inequality creates an issue for the method we're attempting to implement.) import pandas as pd import numpy as np from collections import Counter from matplotlib import pyplot as plt import seaborn as sns from scipy.stats import pearsonr, zscore import random from scipy.spatial.distance import euclidean import statsmodels.api as sm # #### Load topic data # # Then limit it to books whose authors have three or more books. bookdata = pd.read_csv('../topicdata/bookleveltopicdata.tsv', sep = '\t', low_memory = False) print(bookdata.shape) bookdata = bookdata.loc[bookdata.authof3ormore == True, : ] bookdata.shape authors = list(set(bookdata.hathi_author)) random.shuffle(authors) len(authors) bookdata.head() # ## How many triplets are there? # + tripletcount = 0 for auth in authors: authworks = bookdata.loc[bookdata.hathi_author == auth, :].copy() numworks = len(authworks) if numworks > 2: tripletcount = tripletcount + numworks - 2 print(tripletcount) # - # ## Measuring triplets # # We create a dataframe where each row represents a triplet measurement. Each row will contain # # 1. The dimension measured, which could be a natural number for a single topic dimension, or -1 for euclidean distance in the whole topic space. We'll select on this column. # # 2. The average of both single-step measurements. # # 3. The double-step measurement. # # 4. The author's age at t-1. # # 5. The age gap from t-1 to t+1. def maketriplets(authorselection): # We create a list for each of the 1-5 columns described above dimensions = [] singlesteps = [] doublesteps = [] initialages = [] agegaps = [] ctr = 0 errors = 0 for auth in authorselection: authworks = bookdata.loc[bookdata.hathi_author == auth, :].copy() # get all the author's books authworks = authworks.sample(frac = 1) # shuffle them authworks = authworks.drop_duplicates(subset = 'firstpub', keep = 'first') # keep only one per date authworks = authworks.sort_values(by = 'firstpub') # sort them by date if len(authworks) < 3: # this could happen if an author has only 3 works and 2 are date-duplicates errors += 1 continue centerpoints = [x for x in range(1, len(authworks) - 1)] # a triad cannot be centered on the first # or last book rownum = random.choice(centerpoints) # randomly choose an allowable book as triad center prevage = int(authworks.iloc[rownum - 1, 207]) # also get author's age at start of triad thisage = int(authworks.iloc[rownum, 207]) nextage = int(authworks.iloc[rownum + 1, 207]) agegap = nextage - prevage prevvector = authworks.iloc[rownum - 1, 0:200] thisvector = authworks.iloc[rownum, 0:200] nextvector = authworks.iloc[rownum + 1, 0:200] from1to2 = euclidean(prevvector, thisvector) from2to3 = euclidean(thisvector, nextvector) from1to3 = euclidean(prevvector, nextvector) dimensions.append(-1) singlesteps.append(from1to2 + from2to3) # if the distances were cumulative & in the same direction doublesteps.append(from1to3) initialages.append(prevage) agegaps.append(agegap) for tnum in range(0, 200): # and then for each topic tnum prevwork = authworks.iat[rownum -1, tnum] # get the previous value of tnum thiswork = authworks.iat[rownum, tnum] # the one at triad center nextwork = authworks.iat[rownum + 1, tnum] # and the next value of tnum from1to3 = abs(nextwork - prevwork) age1to2 = int(np.mean([prevage, thisage])) from1to2 = abs(thiswork - prevwork) age2to3 = int(np.mean([thisage, nextage])) from2to3 = abs(nextwork - thiswork) ifcumulative = from1to2 + from2to3 # if the changes were in the same direction dimensions.append(tnum) singlesteps.append(ifcumulative) doublesteps.append(from1to3) initialages.append(prevage) agegaps.append(agegap) ctr += 1 if ctr % 500 == 1: print(ctr) triplets = pd.DataFrame({'dim': dimensions, 'single': singlesteps, 'double': doublesteps, 'initialage': initialages, 'agegap': agegaps}) print('Errors: ', errors) return triplets # + equalauthors = random.choices(authors, k = 20000) # randomly select author names # with replacement, from a list # where each author is present once, without regard to # of books # We're selecting most authors much more than once, but then, a lot of them # have more than one possible triplet triplets = maketriplets(equalauthors) # - triplets.shape # ## Aggregate triplets by topic # # We go through the triplet frame and aggregate by topic. # # For each topic we measure # # 1. How much the distance from book 1 to book 3 exceeds the distance we would expect if all books are distributed randomly around the same mean value. (If sequence didn't matter, we would expect 1->3 to be the mean of 1->2 and 2->3.) We call this "cumulative drift" in the measured topic, and express it as excess over expected random variation. # + dimensions = [] drifts = [] for dimension in range(0, 200): df = triplets.loc[triplets.dim == dimension , : ] thisdrift = (np.mean(df.double) / (np.mean(df.single) / 2)) - 1 dimensions.append(dimension) drifts.append(thisdrift) topicmeans = pd.DataFrame({'topic': dimensions, 'drift': drifts}) # - topicmeans.hist(column = 'drift', bins = 25) # ## Careers # + # standardize all the topic columns career = bookdata.copy() for i in range(200): career.iloc[ : , i] = zscore(career.iloc[ : , i]) # + dimensions = [] careerspans = [] careerchanges = [] careerabschanges = [] ctr = 0 for auth in authors: authworks = career.loc[career.hathi_author == auth, :].copy() # get all the author's books authworks = authworks.sort_values(by = 'firstpub') # sort them by date cspan = authworks.iat[len(authworks) - 1, 207] - authworks.iat[0, 207] for tnum in range(200): thischange = authworks.iat[len(authworks) - 1, tnum] - authworks.iat[0, tnum] dimensions.append(tnum) careerspans.append(cspan) careerchanges.append(thischange) careerabschanges.append(abs(thischange)) ctr += 1 if ctr % 500 == 1: print(ctr) # + careervariation = pd.DataFrame({'topic': dimensions, 'spans': careerspans, 'changes': careerchanges, 'abschanges': careerabschanges}) weightedchanges = [] weightedabschanges = [] tnums = [] for tnum, df in careervariation.groupby('topic'): weightedchanges.append(np.average(df.changes, weights = df.spans)) weightedabschanges.append(np.average(df.abschanges, weights = df.spans)) tnums.append(tnum) careerdf = pd.DataFrame({'topic': tnums, 'change': weightedchanges, 'abschanges': weightedabschanges}) # - pearsonr(np.abs(careerdf.change), topicmeans.drift) pearsonr(careerdf.abschanges, topicmeans.drift) topicmeans['meancareerchange'] = careerdf.change.values topicmeans['abscareerchange'] = careerdf.abschanges.values topicmeans.to_csv('changepersistence.tsv', sep = '\t', index = False) # ## Evaluate overall relationship to age # # Here we're looking at the Euclidean distance in topic space, for all topics at once. euc = triplets.loc[triplets.dim == -1, : ] # Dimension -1 is the euclidean distance in topic space euc = euc.assign(midage = euc.initialage + (euc.agegap / 2)) predictors = euc.loc[ : , ['agegap']] predictors = sm.add_constant(predictors, prepend = False) mod = sm.OLS(euc['double'], predictors) res = mod.fit() residuals = res.resid doubleresiduals = residuals + np.mean(euc['double']) mod = sm.OLS(euc['single'], predictors) res = mod.fit() residuals = res.resid singleresiduals = residuals + np.mean(euc['single']) euc = euc.assign(doubleresid = doubleresiduals) euc = euc.assign(singleresid = singleresiduals) byage = euc.groupby('midage').agg({'dim': 'count', 'single': 'mean', 'double': 'mean', 'initialage': 'mean', 'agegap': 'mean', 'singleresid': 'mean', 'doubleresid': 'mean'}) byage['drift'] = (byage.double / (byage.single / 2)) - 1 byage['correcteddrift'] = (byage.doubleresid / (byage.singleresid / 2)) - 1 byage.reset_index(inplace = True, drop = False) byage.iloc[0:95, : ].plot(x = 'midage', y = 'doubleresid') byage.iloc[0:95, : ].plot(x = 'midage', y = 'correcteddrift') pearsonr(byage.initialage, byage.correcteddrift) pearsonr(byage.midage, byage.doubleresid) pearsonr(byage.midage, byage.agegap) pearsonr(byage.midage, byage.correcteddrift) euc = euc.assign(drift = (euc.double / (euc.single / 2)) - 1) predictors = euc.loc[ : , ['midage', 'agegap']] predictors = sm.add_constant(predictors, prepend = False) predictors.head() mod = sm.OLS(euc['drift'], predictors) res = mod.fit() print(res.summary()) res.params res.pvalues len(authors) # ### Visualize cumulative drift as a function of age # + meanratio = [] upperratio = [] lowerratio = [] midyear = [] for yr in range(23, 68, 5): df = euc.loc[(euc.midage >= yr) & (euc.midage < yr + 5), :] meanratio.append(np.mean(df.doubleresid) / (np.mean(df.singleresid) / 2) - 1) midyear.append(yr + 2) # given the way>= and < work above this and not 2.5 is right bootstraps = [] for iter in range(10000): dfsample = df.sample(frac = 1, replace = True) bootstraps.append(np.mean(dfsample.doubleresid) / (np.mean(dfsample.singleresid) / 2) - 1) bootstraps.sort() lowerratio.append(bootstraps[250]) upperratio.append(bootstraps[9749]) displayframe = pd.DataFrame({'observed': meanratio, 'midpoint': midyear, 'upper': upperratio, 'lower': lowerratio}) # - # Draw plot with error band and extra formatting to match seaborn style plt.rcParams.update({'font.sans-serif':'Avenir'}) plt.rcParams.update({'font.size': 15}) fig, ax = plt.subplots(figsize=(9,6)) ax.plot(displayframe.midpoint, displayframe.observed, label='cumulative change') ax.plot(displayframe.midpoint, displayframe.lower, color='tab:blue', alpha=0.1) ax.plot(displayframe.midpoint, displayframe.upper, color='tab:blue', alpha=0.1) ax.fill_between(displayframe.midpoint, displayframe.lower, displayframe.upper, alpha=0.2) ax.set_xlabel('age at midpoint of triplet') ax.set_ylabel('amount 1 => 3 exceeds single-step change') ax.set_ylim(0, 0.12) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.gcf().set_facecolor('white') plt.savefig('/Users/tunder/Dropbox/python/cohort/figures/cumulativedrift.png', bbox_inches = 'tight', dpi = 300) plt.show() # #### absolute value of change # + meanratio = [] upperratio = [] lowerratio = [] midyear = [] for yr in range(23, 68, 5): df = euc.loc[(euc.midage >= yr) & (euc.midage < yr + 5), :] meanratio.append(np.mean(df.doubleresid)) midyear.append(yr + 2) # given the way>= and < work above this and not 2.5 is right bootstraps = [] for iter in range(10000): dfsample = df.sample(frac = 1, replace = True) bootstraps.append(np.mean(dfsample.doubleresid)) bootstraps.sort() lowerratio.append(bootstraps[250]) upperratio.append(bootstraps[9749]) displayframe2 = pd.DataFrame({'observed': meanratio, 'midpoint': midyear, 'upper': upperratio, 'lower': lowerratio}) # - # Draw plot with error band and extra formatting to match seaborn style plt.rcParams.update({'font.sans-serif':'Avenir'}) plt.rcParams.update({'font.size': 15}) fig, ax = plt.subplots(figsize=(9,6)) ax.plot(displayframe2.midpoint, displayframe2.observed, label='cumulative change') ax.plot(displayframe2.midpoint, displayframe2.lower, color='tab:blue', alpha=0.1) ax.plot(displayframe2.midpoint, displayframe2.upper, color='tab:blue', alpha=0.1) ax.fill_between(displayframe2.midpoint, displayframe2.lower, displayframe2.upper, alpha=0.2) ax.set_xlabel('age at midpoint of triplet') ax.set_ylabel('abs distance from book 1 to book 3') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) plt.gcf().set_facecolor('white') plt.savefig('/Users/tunder/Dropbox/python/cohort/figures/absolutedistance1to3.png', bbox_inches = 'tight', dpi = 300) plt.show() # ## Model of totally random change, for reference. # + averageonestep = [] realchange = [] startpoint = np.zeros(200) for i in range(40000): a = startpoint + np.random.normal(size = 200) b = a + np.random.normal(size = 200) c = b + np.random.normal(size = 200) realchange.append(euclidean(a, c)) averageonestep.append((euclidean(a, b) + euclidean(b, c)) / 2) print(np.mean(realchange) / np.mean(averageonestep)) # - # I'm pretty sure that's not a random figure, but is in fact the square root of 2. # # Which means the parameter we're assessing would be .414 in a random walk. # ## What if we sample authors proportional to productivity? # + weightedauthors = random.choices(bookdata.hathi_author.values, k = 10000) # randomly select author names # with replacement, from a list where authors appear once for each book weightedtriads = maketriplets(weightedauthors) # - euc = weightedtriads.loc[weightedtriads.dim == -1, : ] # Dimension -1 is the euclidean distance in topic space euc = euc.assign(midage = euc.initialage + (euc.agegap / 2)) byage = euc.groupby('midage').agg({'dim': 'count', 'single': 'mean', 'double': 'mean', 'initialage': 'mean', 'agegap': 'mean'}) byage['ratio'] = byage.double / byage.single byage.reset_index(inplace = True, drop = False) byage.plot(x = 'midage', y = 'ratio') pearsonr(byage.midage, byage.ratio) euc = euc.assign(ratio = euc.double / euc.single) predictors = euc.loc[ : , ['midage', 'agegap']] predictors = sm.add_constant(predictors, prepend = False) mod = sm.OLS(euc['ratio'], predictors) res = mod.fit() print(res.summary()) # ## Autocorrelation # # This portion is not used because it turns out that there's no great way to measure autocorrelation across lots of short time series at different scales. def makeautocorr(authorselection): # We create a list for each of the 1-5 columns described above dimensions = [] tminus1 = [] tminus0 = [] meanage = [] agegaps = [] ctr = 0 errors = 0 for auth in authorselection: authworks = bookdata.loc[bookdata.hathi_author == auth, :].copy() # get all the author's books authworks = authworks.sample(frac = 1) # shuffle them authworks = authworks.drop_duplicates(subset = 'firstpub', keep = 'first') # keep only one per date authworks = authworks.sort_values(by = 'firstpub') # sort them by date if len(authworks) < 3: # this could happen if an author has only 3 works and 2 are date-duplicates errors += 1 continue centerpoints = [x for x in range(1, len(authworks) - 1)] # a triad cannot be centered on the first # or last book rownum = random.choice(centerpoints) # randomly choose an allowable book as triad center prevage = int(authworks.iloc[rownum - 1, 207]) # also get author's age at start of triad thisage = int(authworks.iloc[rownum, 207]) nextage = int(authworks.iloc[rownum + 1, 207]) agegap = nextage - prevage for tnum in range(0, 200): # and then for each topic tnum # authworks.iloc[ : , tnum] = zscore(authworks.iloc[ : , tnum]) prevwork = authworks.iat[rownum -1, tnum] # get the previous value of tnum thiswork = authworks.iat[rownum, tnum] # the one at triad center nextwork = authworks.iat[rownum + 1, tnum] # and the next value of tnum dimensions.append(tnum) tminus1.append(prevwork) tminus0.append(thiswork) meanage.append((prevage + thisage) / 2) agegaps.append((thisage - prevage)) dimensions.append(tnum) tminus1.append(thiswork) tminus0.append(nextwork) meanage.append((thisage + nextage) / 2) agegaps.append((nextage - thisage)) ctr += 1 if ctr % 500 == 1: print(ctr) autocorr = pd.DataFrame({'dim': dimensions, 'minus1': tminus1, 'minus0': tminus0, 'meanage': meanage, 'agegap': agegaps}) print('Errors: ', errors) return autocorr # + equalauthors = random.choices(authors, k = 1000) # randomly select author names # with replacement, from a list # where each author is present once, without regard to # of books # We're selecting most authors much more than once, but then, a lot of them # have more than one possible triplet autocorr = makeautocorr(equalauthors) # + corrs = [] for i in range(200): df = autocorr.loc[autocorr.dim == i, : ] r = pearsonr(df.minus1, df.minus0)[0] corrs.append(r) # - pearsonr(corrs, topicmeans.ratio) sns.scatterplot(x = corrs, y = topicmeans.ratio) sns.scatterplot(x = df.minus0, y = df.minus1)
tripletdistance/PersistenceOfChange.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # #!pip install chainer # #!pip install chainercv # + # # https://github.com/chainer/chainer/blob/master/examples/dcgan/net.py # import numpy as np import chainer import chainer.functions as F import chainer.links as L def add_noise(device, h, sigma=0.2): if chainer.config.train: #xp = device.xp xp = device randn = xp.random.randn(*h.shape) return h + sigma * randn else: return h class Generator(chainer.Chain): def __init__(self, n_hidden, bottom_width=4, ch=512, wscale=0.02): super(Generator, self).__init__() self.n_hidden = n_hidden self.ch = ch self.bottom_width = bottom_width with self.init_scope(): w = chainer.initializers.Normal(wscale) self.l0 = L.Linear(self.n_hidden, bottom_width * bottom_width * ch, initialW=w) self.dc1 = L.Deconvolution2D(ch, ch // 2, 4, 2, 1, initialW=w) self.dc2 = L.Deconvolution2D(ch // 2, ch // 4, 4, 2, 1, initialW=w) self.dc3 = L.Deconvolution2D(ch // 4, ch // 8, 4, 2, 1, initialW=w) self.dc4 = L.Deconvolution2D(ch // 8, 3, 3, 1, 1, initialW=w) self.bn0 = L.BatchNormalization(bottom_width * bottom_width * ch) self.bn1 = L.BatchNormalization(ch // 2) self.bn2 = L.BatchNormalization(ch // 4) self.bn3 = L.BatchNormalization(ch // 8) def make_hidden(self, batchsize): dtype = chainer.get_dtype() return np.random.uniform(-1, 1, (batchsize, self.n_hidden, 1, 1)).astype(dtype) def forward(self, z): h = F.reshape(F.relu(self.bn0(self.l0(z))), (len(z), self.ch, self.bottom_width, self.bottom_width)) h = F.relu(self.bn1(self.dc1(h))) h = F.relu(self.bn2(self.dc2(h))) h = F.relu(self.bn3(self.dc3(h))) x = F.sigmoid(self.dc4(h)) return x class Discriminator(chainer.Chain): def __init__(self, bottom_width=4, ch=512, wscale=0.02, device='xyz'): w = chainer.initializers.Normal(wscale) super(Discriminator, self).__init__() self.device = device with self.init_scope(): self.c0_0 = L.Convolution2D(3, ch // 8, 3, 1, 1, initialW=w) self.c0_1 = L.Convolution2D(ch // 8, ch // 4, 4, 2, 1, initialW=w) self.c1_0 = L.Convolution2D(ch // 4, ch // 4, 3, 1, 1, initialW=w) self.c1_1 = L.Convolution2D(ch // 4, ch // 2, 4, 2, 1, initialW=w) self.c2_0 = L.Convolution2D(ch // 2, ch // 2, 3, 1, 1, initialW=w) self.c2_1 = L.Convolution2D(ch // 2, ch // 1, 4, 2, 1, initialW=w) self.c3_0 = L.Convolution2D(ch // 1, ch // 1, 3, 1, 1, initialW=w) self.l4 = L.Linear(bottom_width * bottom_width * ch, 1, initialW=w) self.bn0_1 = L.BatchNormalization(ch // 4, use_gamma=False) self.bn1_0 = L.BatchNormalization(ch // 4, use_gamma=False) self.bn1_1 = L.BatchNormalization(ch // 2, use_gamma=False) self.bn2_0 = L.BatchNormalization(ch // 2, use_gamma=False) self.bn2_1 = L.BatchNormalization(ch // 1, use_gamma=False) self.bn3_0 = L.BatchNormalization(ch // 1, use_gamma=False) def forward(self, x): device = self.device h = add_noise(device, x) h = F.leaky_relu(add_noise(device, self.c0_0(h))) h = F.leaky_relu(add_noise(device, self.bn0_1(self.c0_1(h)))) h = F.leaky_relu(add_noise(device, self.bn1_0(self.c1_0(h)))) h = F.leaky_relu(add_noise(device, self.bn1_1(self.c1_1(h)))) h = F.leaky_relu(add_noise(device, self.bn2_0(self.c2_0(h)))) h = F.leaky_relu(add_noise(device, self.bn2_1(self.c2_1(h)))) h = F.leaky_relu(add_noise(device, self.bn3_0(self.c3_0(h)))) return self.l4(h) # + # # https://github.com/chainer/chainer/blob/master/examples/dcgan/updater.py # from chainer import Variable class DCGANUpdater(chainer.training.updaters.StandardUpdater): def __init__(self, *args, **kwargs): self.gen, self.dis = kwargs.pop('models') super(DCGANUpdater, self).__init__(*args, **kwargs) def loss_dis(self, dis, y_fake, y_real): batchsize = len(y_fake) L1 = F.sum(F.softplus(-y_real)) / batchsize L2 = F.sum(F.softplus(y_fake)) / batchsize loss = L1 + L2 chainer.report({'loss': loss}, dis) return loss def loss_gen(self, gen, y_fake): batchsize = len(y_fake) loss = F.sum(F.softplus(-y_fake)) / batchsize chainer.report({'loss': loss}, gen) return loss def update_core(self): gen_optimizer = self.get_optimizer('gen') dis_optimizer = self.get_optimizer('dis') batch = self.get_iterator('main').next() device = self.device x_real = Variable(self.converter(batch, device)) / 255. gen, dis = self.gen, self.dis batchsize = len(batch) y_real = dis(x_real) #z = Variable(device.xp.asarray(gen.make_hidden(batchsize))) z = Variable(chainer.cuda.cupy.asarray(gen.make_hidden(batchsize))) x_fake = gen(z) y_fake = dis(x_fake) dis_optimizer.update(self.loss_dis, dis, y_fake, y_real) gen_optimizer.update(self.loss_gen, gen, y_fake) # + # # https://github.com/chainer/chainer/blob/master/examples/dcgan/visualize.py # import os from PIL import Image import chainer.backends.cuda def out_generated_image(gen, dis, rows, cols, seed, dst): @chainer.training.make_extension() def make_image(trainer): np.random.seed(seed) n_images = rows * cols xp = gen.xp z = Variable(xp.asarray(gen.make_hidden(n_images))) with chainer.using_config('train', False): x = gen(z) x = chainer.backends.cuda.to_cpu(x.array) np.random.seed() x = np.asarray(np.clip(x * 255, 0.0, 255.0), dtype=np.uint8) _, _, H, W = x.shape x = x.reshape((rows, cols, 3, H, W)) x = x.transpose(0, 3, 1, 4, 2) x = x.reshape((rows * H, cols * W, 3)) preview_dir = '{}/preview'.format(dst) preview_path = preview_dir + '/image{:0>8}.png'.format(trainer.updater.iteration) if not os.path.exists(preview_dir): os.makedirs(preview_dir) Image.fromarray(x).save(preview_path) return make_image # - # Setup an optimizer def make_optimizer(model, alpha=0.0002, beta1=0.5): optimizer = chainer.optimizers.Adam(alpha=alpha, beta1=beta1) optimizer.setup(model) optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(0.0001), 'hook_dec') return optimizer # + import os from chainer import training from chainer.training import extensions args_device = 0 args_batchsize = 50 args_n_hidden = 100 args_epoch = 1000 args_dataset = '' args_out = 'result' args_snapshot_interval = 1000 args_display_interval = 100 args_seed = 0 #device = chainer.cuda.get_device(args_device) #device.use() chainer.cuda.Device(args_device).use() device = chainer.cuda.Device(args_device) print(chainer.__version__) print('Device: {}'.format(device)) print('# Minibatch-size: {}'.format(args_batchsize)) print('# n_hidden: {}'.format(args_n_hidden)) print('# epoch: {}'.format(args_epoch)) print('') # Set up a neural network to train gen = Generator(n_hidden=args_n_hidden) dis = Discriminator(device=chainer.cuda.cupy) gen.to_gpu(device) # Copy the model to the device dis.to_gpu(device) opt_gen = make_optimizer(gen) opt_dis = make_optimizer(dis) if args_dataset == '': # Load the CIFAR10 dataset if args.dataset is not specified train, _ = chainer.datasets.get_cifar10(withlabel=False, scale=255.) else: all_files = os.listdir(args_dataset) image_files = [f for f in all_files if ('png' in f or 'jpg' in f)] print('{} contains {} image files'.format(args.dataset, len(image_files))) train = chainer.datasets.ImageDataset(paths=image_files, root=args_dataset) # + # Setup an iterator train_iter = chainer.iterators.SerialIterator(train, args_batchsize) # Setup an updater updater = DCGANUpdater( models=(gen, dis), iterator=train_iter, optimizer={'gen': opt_gen, 'dis': opt_dis}, device=args_device) # + # Setup a trainer trainer = training.Trainer(updater, (args_epoch, 'epoch'), out=args_out) snapshot_interval = (args_snapshot_interval, 'iteration') display_interval = (args_display_interval, 'iteration') trainer.extend(extensions.snapshot(filename='snapshot_iter_{.updater.iteration}.npz'), trigger=snapshot_interval) trainer.extend(extensions.snapshot_object(gen, 'gen_iter_{.updater.iteration}.npz'), trigger=snapshot_interval) trainer.extend(extensions.snapshot_object(dis, 'dis_iter_{.updater.iteration}.npz'), trigger=snapshot_interval) trainer.extend(extensions.LogReport(trigger=display_interval)) trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'gen/loss', 'dis/loss',]), trigger=display_interval) trainer.extend(extensions.ProgressBar(update_interval=500)) trainer.extend(out_generated_image(gen, dis, 10, 10, args_seed, args_out), trigger=snapshot_interval) trainer.run() # - from chainercv.utils.image import read_image from chainercv.visualizations import vis_image # %matplotlib inline # 1 epoch vis_image(read_image('result/preview/image00001000.png')) # 50 epoch, 3 hours vis_image(read_image('result/preview/image00050000.png')) # 100 epoch, 6 hours vis_image(read_image('result/preview/image00100000.png')) # 280 epoch, 18 hours vis_image(read_image('result/preview/image00280000.png')) # + import pandas as pd import matplotlib.pyplot as plt result = pd.read_json('result/log') result[['gen/loss','dis/loss']].plot()
01_dcgan_cifar10/dcgan_using_chainer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # To open this notebook in Google Colab and start coding, click on the Colab icon below. # <table style="border:2px solid orange" align="left"> # <td style="border:2px solid orange "> # <a target="_blank" href="https://colab.research.google.com/github/neuefische/ds-welcome-package/blob/main/programming/5_Intro_to_OOP.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> # </td> # </table> # --- # # Introduction to Object-oriented programming # # In this notebook we will only cover a very superficial introduction, when you start to really program seriously do go beyond this lesson. # # <img src="https://upload.wikimedia.org/wikipedia/commons/1/1c/Alice_through_the_looking_glass.jpg" # alt="Alice through the looking glass" # style="float: left; margin-right: 10px; height: 300px" /> # **Object-oriented programming (OOP)** is a way of writing code using "objects". Computer programs that are not object-oriented are a list of instructions for the computer, which is called procedural programming. In OOP computer programs use objects that talk to one another to change the data in those objects and to work the way the user wants. OOP allows code reusability, in different parts of the program or by different developers. # # Pyhton, like most programming languages, is a mix of different ways of writing computer programs. For example, Python allows both object-oriented programming and procedural programming. # ## Features # # In object-oriented programming everything is an object. And there are different types of objects: # - Variables, known as **attributes** in OOP, can hold different types of information. For example, an attrubute like *age* for a person could be of integer data type and *names* could be a list of strings. # - Procedures, known as **methods** in OOP, are lists of instructions telling the computer to take input, do some calculations or chage data and return an output. For example a person can say their name. In procedural programming they are called functions. # - **Classes** are collections of different attributes and methods. # # **Objects** is a term used to refer to instances of classes. # ## Example # # Let's take the example with person. We can create a **Human** class with three attributes: *age*, *names*, and *friend*. And two methods, to *tell the age* and *say the names*. The \_\_init\_\_ method is special method classes have called Constructor. class Human(object): def __init__(self, names, age, friend = None): self.names = names self.age = age self.friend = friend def say_name(self): print("My name is "+' '.join(self.names)) #concatenate names def how_old_am_i(self): print(f"I am {self.age} years old") #create a new Human object with 2 names and age 10 tarrant = Human(['Tarrant', 'Hightopp'], 20) #create a new Human who is 10 has three names and is friends whith tarrant alice = Human(['Alice', 'Pleasance', 'Liddell'], 10, friend=tarrant) #Let's try the methods tarrant.say_name() tarrant.how_old_am_i() alice.say_name() alice.how_old_am_i() #let's find out the name of Alice's friend #the friend is in this case an object instance of class Human alice.friend.say_name() type(alice.friend) # ## Check your understanding # # Create a *Animal* class with the following attributes: color, name, type, favourite snack and owner. # Add the attributes to call the cat and find out the favourite snack. # # Create a new **Animal** object called *Chessur*, of color grey, who loves salmon and is owned by alice. Can you guess what type of animal Chessur is? # How old is alice? # + # Code here #class .. # - # ## Classes we use every day # # Did you know that **integers** and **strings** are classes too? They have then a bunch of attributes and methods associated with them. You can access the attributes and methods using the *dir* function in Python. name = 'alice' print(type(name)) dir(name) # As you can see there are some method that begin and end with double underscores. These are called **special methods** and they aren't usually called by those names. Instead, they're called by some internal Pyuthon functions, for example **\_\_len\_\_** method is called by the **len** function. # # To see what a specific method does you can use teh **help** function. help(name.upper) # ### Exercise # # What does the **title** method do? # # Call it on the name object. # + # Code here # - # ## Object comparison # # When comparing objects there are two operators: **==** and **is** # # The **==** operator compares the values of objects (the data they hold), while **is** compares their identites. We often care about values and not identities, so **==** appears more often. However, if you are comparing a viriable to a singleton, then using *is* makes sense. The most often case is checking if a variable is bound to *None*. # # ``` # tarrant.friend is None # this will return True # alice.friend is not None # this will return True # ``` # # **==** is the syntactic sugar to **\_\_eq\_\_** # # When to use **is** and why: if you want to check if something exists, if you want to check that you have indeed distinct objects, so that you do not accidently edit the wrong object. The is operator is faster than ==, so use it when applcable. # # # Fun fact: for Germans this might comparison is supported by the language itself, where you have **gleich** and **dasselbe** # looking at lists, where copies are shallow by default l1 = [3, [55,44], (7,8,9)] l2 = list(l1) # this creates a copy of list l1 print(l2) print(l1 == l2) print(l1 is l2) # ## Check your understanding # + # looking at integers creating some new integers a = 5 b = 5 c = 10 print(a == b) print(a == c) print(a is b) # - # This only happens because.. Python caches small integers.. try it out with 1000 .. is 1000 the same as 10**3 ? # # What about strings? If you have a variable `x = 'a'` is 'aa' equal or the same as x*2 ? # # What happens if you assign l1 to l3 with `l3 = l1`, are they distinct lists or the same? How would you check that? What is the value of l1 if you .. run `l3.append('a')` # code x = 'a' # ## Inheritance and Code Reuse # # Just like people have parents, grandparents and so on.. objects have an ancestry. The principle of inheritance lets programmers build relationships between concepts and group them together. It allows us to reduce code duplication by generalizing our code. # # For our *Human* example we could have the *ImaginaryHuman* class.. as we can all agree that some of the poeple in Alice in Wonderland were imaginary.. One can add more properties and new methods or just use **pass** if no changes are needed. class ImaginaryHuman(Human): pass red = ImaginaryHuman(["The Red Queen"], 100) red.say_name() # ### Check your understanding # # Create an imaginary pet class and make a new object.. you can use as inspiration [Alice in wonderland fandom](https://aliceinwonderland.fandom.com/wiki/Alice) # # Is the new object of the same type as object you made for Chessur? How can you check that? # + #code # - # # Summary # # Congratulations! Hope you had fun with Alice and classes and OOP. # # Here is what you should be familiar by now with: # * how to create a class, with attributes and methods # * how to create an instance of a class # * how to inherit from a class # * when to use is vs ==
programming/5_Intro_to_OOP.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Covid-19 India Visualization and Animation # ## Description # # Coronavirus disease 2019 (COVID-19) is an infectious disease caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2).[6] The disease was first identified in December 2019 in Wuhan, the capital of China's Hubei province, and has since spread globally, resulting in the ongoing 2019–20 coronavirus pandemic.[7][8] Common symptoms include fever, cough, and shortness of breath.[9] Other symptoms may include muscle pain, sputum production, diarrhea, sore throat, loss of smell, and abdominal pain.[4][10][11] While the majority of cases result in mild symptoms, some progress to viral pneumonia and multi-organ failure.[7][12] As of 3 April 2020, more than 1,040,000[5] cases of COVID-19 have been reported in more than two hundred countries and territories,[13] resulting in over 55,100 deaths.[5] More than 221,000 people have recovered.[5] # # The virus is mainly spread during close contact,[a] and by small droplets produced when people cough, sneeze, or talk.[14][16][15] Respiratory droplets may be produced during breathing but the virus is not generally airborne.[14][16] People may also contract COVID-19 by touching a contaminated surface and then their face.[14][15] It is most contagious when people are symptomatic, although spread may be possible before symptoms appear.[15] The virus can survive on surfaces up to 72 hours.[17] Time from exposure to onset of symptoms is generally between two and fourteen days, with an average of five days.[9][18] The standard method of diagnosis is by reverse transcription polymerase chain reaction (rRT-PCR) from a nasopharyngeal swab.[19] The infection can also be diagnosed from a combination of symptoms, risk factors and a chest CT scan showing features of pneumonia.[20][21] # # Recommended measures to prevent infection include frequent hand washing, social distancing (maintaining physical distance from others, especially from those with symptoms), covering coughs and sneezes with a tissue or inner elbow, and keeping unwashed hands away from the face.[22][23] The use of masks is recommended for those who suspect they have the virus and their caregivers.[24] Recommendations for mask use by the general public vary, with some authorities recommending against their use, some recommending their use, and others requiring their use.[25][26][27] Currently, there is no vaccine or specific antiviral treatment for COVID-19. Management involves treatment of symptoms, supportive care, isolation, and experimental measures.[28] # # The World Health Organization (WHO) declared the 2019–20 coronavirus outbreak a Public Health Emergency of International Concern (PHEIC)[29][30] on 30 January 2020, and a pandemic on 11 March 2020.[8] Local transmission of the disease has been recorded in many countries across all six WHO regions.[31] # # Source https://en.wikipedia.org/wiki/Coronavirus_disease_2019 # + hide_input=true jupyter={"source_hidden": true} # import libs import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from IPython.display import Markdown, display # %matplotlib inline from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) import cufflinks as cf cf.go_offline() import plotly.express as px import plotly.graph_objects as go # Load covid_19_india covid_19_india_df = pd.read_csv('./data/covid_19_india.csv', index_col='Sno') covid_19_india_df.Date = pd.to_datetime(covid_19_india_df.Date, format='%d/%m/%y') # + [markdown] hide_input=true # ## All cases # + hide_input=true jupyter={"outputs_hidden": true, "source_hidden": true} date_table = covid_19_india_df.groupby(['Date', 'State/UnionTerritory']).sum().reset_index().set_index('Date') total = date_table.loc[date_table.last_valid_index()].sum() confirmed_count = total.Confirmed death_count = total.Deaths cured_count = total.Cured # + hide_input=true cured = "<span style='color:{}; font-size:1.4em;'>{}</span>".format('green', 'Cured - '+str(cured_count)) confirmed = "<span style='color:{}; font-size:1.4em;'>{}</span>".format('blue', 'Confirmed cases - '+str(confirmed_count)) deaths = "<span style='color:{}; font-size:1.4em;'>{}</span>".format('red', 'Deaths - '+str(death_count)) display(Markdown(cured)) display(Markdown(confirmed)) display(Markdown(deaths)) # + hide_input=true jupyter={"source_hidden": true} from plotly.subplots import make_subplots fig = make_subplots(rows=1, cols=3, shared_yaxes=True, subplot_titles=("Total deaths - " + str(death_count),"Total confirmed - " + str(confirmed_count), "Total cured - " + str(cured_count))) # Create and style traces fig.add_trace(go.Scatter(x=date_group.Date, y=date_group.Deaths, name='Deaths', line=dict(color='firebrick', width=1), mode='lines+markers'), row=1, col=1) fig.add_trace(go.Scatter(x=date_group.Date, y=date_group.Confirmed, name = 'Confirmed', line=dict(color='royalblue', width=1), mode='lines+markers',), row=1, col=2) fig.add_trace(go.Scatter(x=date_group.Date, y=date_group.Cured, name='Cured', line=dict(color='green', width=1), mode='lines+markers',), row=1, col=3) fig.update_layout( title="All Cases", xaxis_title="Date", yaxis_title="Count", font=dict( family="Courier New, monospace", size=18, color="black" ) ) fig.update_xaxes(ticks="inside") fig.update_yaxes(ticks="inside", col=1) fig.show() # + hide_input=true jupyter={"source_hidden": true} fig = go.Figure(data=[ go.Bar(name='Confirmed', x=date_group.Date, y=date_group.Confirmed), go.Bar(name='Deaths', x=date_group.Date, y=date_group.Deaths), go.Bar(name='Cured', x=date_group.Date, y=date_group.Cured) ]) # Change the bar mode fig.update_layout(autosize=False, width=1000, height=750, title="All Cases", xaxis_title="Date", yaxis_title="Count", barmode='relative') fig.show() # - # ## Confirmed cases # + hide_input=true jupyter={"source_hidden": true} date_group = covid_19_india_df.groupby(['Date']).sum() date_group.reset_index(inplace=True) date_group.sort_values('Date',inplace=True) import plotly.express as px fig = px.bar(date_group, x='Date', y='Confirmed', color='Confirmed') fig.update_layout( title="Confirmed cases", font=dict( family="Courier New, monospace", size=18, color="black" ) ) fig.show() # - # ## Deaths # + hide_input=true jupyter={"source_hidden": true} fig = px.bar(date_group, x='Date', y='Deaths', color='Deaths') fig.update_layout( title="Deaths", font=dict( family="Courier New, monospace", size=18, color="black" ) ) fig.show() # - # ## Cured # + hide_input=true jupyter={"source_hidden": true} fig = px.bar(date_group, x='Date', y='Cured', color='Cured') fig.update_layout( title="Cured trend", font=dict( family="Courier New, monospace", size=18, color="black" ) ) fig.show() # - # ## State wise trend # + hide_input=true jupyter={"source_hidden": true} fig = go.Figure(data=[ go.Bar(name='Confirmed', x=date_table.loc[date_table.last_valid_index()]['State/UnionTerritory'], y=date_table.loc[date_table.last_valid_index()].Confirmed), go.Bar(name='Deaths', x=date_table.loc[date_table.last_valid_index()]['State/UnionTerritory'], y=date_table.loc[date_table.last_valid_index()].Deaths), go.Bar(name='Cured', x=date_table.loc[date_table.last_valid_index()]['State/UnionTerritory'], y=date_table.loc[date_table.last_valid_index()].Cured) ]) # Change the bar mode fig.update_layout( autosize=False, width=1000, height=750, title="State wise cases", xaxis_title="State", yaxis_title="Count", barmode='stack') fig.show() # - # ### Confirmed cases pie chart # + hide_input=true jupyter={"source_hidden": true} fig = px.pie(date_table.loc[date_table.last_valid_index()], values='Confirmed', names='State/UnionTerritory', title='Confirmed cases') fig.update_layout( autosize=False, width=900, height=650) fig.update_traces(textposition='inside', textinfo='percent+label') fig.show() # - # ### Deaths pie chart # + hide_input=true jupyter={"source_hidden": true} fig = px.pie(date_table.loc[date_table.last_valid_index()], values='Deaths', names='State/UnionTerritory', title='Deaths') fig.update_layout( autosize=False, width=900, height=650) fig.update_traces(textposition='inside', textinfo='percent+label') fig.show() # - # ### Cured pie chart # + hide_input=true jupyter={"source_hidden": true} fig = px.pie(date_table.loc[date_table.last_valid_index()], values='Cured', names='State/UnionTerritory', title='Cured') fig.update_layout( autosize=False, width=900, height=650) fig.update_traces(textposition='inside', textinfo='percent+label') fig.show() # - # ### State wise confirmed cases animation # + hide_input=true jupyter={"source_hidden": true} import matplotlib hex_colors_dic = {} rgb_colors_dic = {} hex_colors_only = [] for name, hex in matplotlib.colors.cnames.items(): hex_colors_only.append(hex) hex_colors_dic[name] = hex rgb_colors_dic[name] = matplotlib.colors.to_rgb(hex) dummy = pd.DataFrame() dummy['State'] = covid_19_india_df['State/UnionTerritory'].unique() import plotly.io as pio def animate_bar_chart(): frames = [] grouped = covid_19_india_df[['Date', 'State/UnionTerritory', 'Confirmed', 'Deaths', 'Cured']].groupby(['Date']) #colors = px.colors.n_colors('rgb(127, 60, 141)', 'rgb(165, 170, 153)', 33, colortype='rgb') colors = np.random.choice(hex_colors_only, 33).tolist() for name, group in iter(grouped): merged = pd.merge(group, dummy, how='outer', left_on='State/UnionTerritory', right_on='State') merged.fillna(0, inplace=True) merged.sort_values('State', inplace=True) frames.append(go.Frame(data = [go.Bar(x = merged['State'].tolist(), y=merged['Confirmed'].tolist(), marker_color=colors)], layout=go.Layout(title='Confirmed cases - '+group.Date.iloc[0].strftime('%Y-%m-%d')))) fig = go.Figure( data = [go.Bar(x = merged['State'].tolist(), y = [0] * len(merged['State'].tolist()))], frames=frames, layout=go.Layout( width=1000, height=750, xaxis=dict(type='category'), yaxis=dict(range=[0, 600], autorange=False), title="Confirmed cases", xaxis_title="State", yaxis_title="Count", updatemenus=[dict( type="buttons", buttons=[dict(label="Play", method="animate", args=[None])])])) fig.show() animate_bar_chart() # -
.ipynb_checkpoints/Covid-19 India Analysis-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- #Importing Dependancies import sklearn from sklearn.utils import shuffle from sklearn.neighbors import KNeighborsClassifier import pandas as pd import numpy as np from sklearn import linear_model, preprocessing cars_dataset = pd.read_csv('cars.csv',sep=',') print(cars_dataset.head()) le = preprocessing.LabelEncoder() buying = le.fit_transform(list(cars_dataset['buying'])) maint = le.fit_transform(list(cars_dataset['maint'])) door = le.fit_transform(list(cars_dataset['door'])) persons = le.fit_transform(list(cars_dataset['persons'])) lug_boot = le.fit_transform(list(cars_dataset['lug_boot'])) safety = le.fit_transform(list(cars_dataset['safety'])) cls = le.fit_transform(list(cars_dataset['class'])) X = list(zip(buying,maint,door,persons,lug_boot,safety)) y = list(cls) x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, test_size = 0.1) Friday = KNeighborsClassifier(n_neighbors=9) Friday.fit(x_train, y_train) acc = model.score(x_test, y_test) print("Friday at",(acc*100),'% Accuracy.') predictions = Friday.predict(x_test) # 0 1 2 3 decoder = ['unacc','acc','good','vgood'] for i in range(len(predictions)): print("Friday Predicted : ",decoder[predictions[i]],"Actual Class : ",decoder[y_test[i]])
day four/Day 4 - KNN Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Navigation # # --- # # You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started! # # ### 1. Start the Environment # # Run the next code cell to install a few packages. This line will take a few minutes to run! # !pip -q install ./python # The environment is already saved in the Workspace and can be accessed at the file path provided below. Please run the next code cell without making any changes. # + from unityagents import UnityEnvironment import numpy as np # please do not modify the line below env = UnityEnvironment(file_name="/data/Banana_Linux_NoVis/Banana.x86_64") # - # Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] brain # ### 2. Examine the State and Action Spaces # # Run the code cell below to print some information about the environment. # + # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents in the environment print('Number of agents:', len(env_info.agents)) # number of actions action_size = brain.vector_action_space_size print('Number of actions:', action_size) # examine the state space state = env_info.vector_observations[0] print('States look like:', state) state_size = len(state) print('States have length:', state_size) # - # ### 3. Take Random Actions in the Environment # # In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment. # # Note that **in this coding environment, you will not be able to watch the agent while it is training**, and you should set `train_mode=True` to restart the environment. # + env_info = env.reset(train_mode=True)[brain_name] # reset the environment state = env_info.vector_observations[0] # get the current state score = 0 # initialize the score while True: action = np.random.randint(action_size) # select an action env_info = env.step(action)[brain_name] # send the action to the environment next_state = env_info.vector_observations[0] # get the next state reward = env_info.rewards[0] # get the reward done = env_info.local_done[0] # see if episode has finished score += reward # update the score state = next_state # roll over the state to next time step if done: # exit loop if episode finished break print("Score: {}".format(score)) # - # When finished, you can close the environment. # + #env.close() # - # ### 4. It's Your Turn! # # Now it's your turn to train your own agent to solve the environment! A few **important notes**: # - When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following: # ```python # env_info = env.reset(train_mode=True)[brain_name] # ``` # - To structure your work, you're welcome to work directly in this Jupyter notebook, or you might like to start over with a new file! You can see the list of files in the workspace by clicking on **_Jupyter_** in the top left corner of the notebook. # - In this coding environment, you will not be able to watch the agent while it is training. However, **_after training the agent_**, you can download the saved model weights to watch the agent on your own machine! from collections import deque import matplotlib.pyplot as plt # %matplotlib inline # + import torch import torch.nn as nn import torch.nn.functional as F import math from torch.autograd import Variable class NoisyLinear(nn.Module): def __init__(self, in_features, out_features, init_std =0.4): super(NoisyLinear, self).__init__() self.in_features = in_features self.out_features = out_features self.init_std = init_std self.training = True ## set this to false in the act method after the training is over self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features)) self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features)) ## Adding noise to weight that will not be trained self.register_buffer('weight_epsilon',torch.FloatTensor(out_features, in_features) ) self.bias_mu = nn.Parameter(torch.FloatTensor(out_features)) self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features)) ## Adding noise to bias that will not be trained self.register_buffer('bias_epsilon',torch.FloatTensor(out_features) ) self.reset_parameter() ##reset mu and sigma self.reset_noise() ##reset epsilon def forward(self,X): if self.training: weight = self.weight_mu + self.weight_sigma.mul(Variable(self.weight_epsilon)) bias = self.bias_mu + self.bias_sigma.mul(Variable(self.bias_epsilon)) else: weight = self.weight_mu + self.weight_sigma bias = self.bias_mu + self.bias_sigma return F.linear(X, weight, bias) def reset_parameter(self): ''' Initialize mu and sigma for weight and bias ''' mu_range = 1/math.sqrt(self.in_features) self.weight_mu.data.uniform_(-mu_range, mu_range) self.bias_mu.data.uniform_(-mu_range, mu_range) self.weight_sigma.data.fill_(self.init_std*mu_range) self.bias_sigma.data.fill_(self.init_std*mu_range) def reset_noise(self): ''' Generate noise for epsilon. These noise weights are not trainable by the model ''' epsilon_out = self.scale_noise(self.out_features) epsilon_in = self.scale_noise(self.in_features) ##.ger() gives the matrix out of 2 vector numtiplication ## epsilon_out is out*1 ## epsilon_in is in*1 ## the out of .ger() is out*in self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in)) ## dont use the same random value for bias. ## call scale_coise once again for bias self.bias_epsilon.copy_(self.scale_noise(self.out_features)) def scale_noise(self, size): ''' Gives the noise output of a particular size after scaling ''' X = torch.randn(size) X = X.sign().mul(X.abs().sqrt()) return X # + ##Network class QNetwork(nn.Module): """Network Model.""" def __init__(self, state_size, action_size, seed,fc1_size =64, fc2_size=128): ##def __init__(self, state_size, action_size, seed,fc1_size =256, fc2_size=512): ''' Initialize Parameters ''' super(QNetwork, self).__init__() self.state_size = state_size self.action_size = action_size self.seed = torch.manual_seed(seed) self.FC1_size = fc1_size self.FC2_size = fc2_size self.FC1 = nn.Linear(self.state_size, self.FC1_size) self.FC2 = nn.Linear(self.FC1_size, self.FC2_size) ##Kaustav ##self.FC3 = nn.Linear(self.FC2_size, self.FC2_size*2) ##Kaustav #self.FC2 = nn.Linear(self.FC1_size, self.FC2_size) #self.FC3 = nn.Linear(self.FC2_size, self.action_size) #self.noisy1 = NoisyLinear(self.FC1_size,self.FC2_size) #self.noisy2 = NoisyLinear(self.FC2_size,action_size) ##Noisy DQN and Dueling DQN self.noisy_advantage1 = NoisyLinear(self.FC2_size,self.FC2_size) self.noisy_advantage2 = NoisyLinear(self.FC2_size,action_size) ##Noisy DQN and Dueling DQN self.noisy_value1 = NoisyLinear(self.FC2_size,self.FC2_size) self.noisy_value2 = NoisyLinear(self.FC2_size,1) def forward(self, state): """ Build a network that maps state -> action values. """ #x = F.relu(self.FC1(state)) #x = F.relu(self.noisy1(x)) #x = self.noisy2(x) x = F.relu(self.FC1(state)) x = F.relu(self.FC2(x)) ##x = F.relu(self.FC3(x)) #advantage advantage = F.relu(self.noisy_advantage1(x)) advantage = self.noisy_advantage2(x) #value value = F.relu(self.noisy_value1(x)) value = self.noisy_value2(x) ##Dueling DQN x = value + advantage - advantage.mean() return x def reset_noise(self): self.noisy_value1.reset_noise() self.noisy_value2.reset_noise() self.noisy_advantage1.reset_noise() self.noisy_advantage2.reset_noise() # + import numpy as np import random from collections import namedtuple, deque import torch.optim as optim ##HYPERPARAMETERS BUFFER_SIZE = int(1e5) ##replay buffer size BATCH_SIZE = 128 ##minibatch size GAMMA = .99 ##discount factor TAU = 1e-3 # for soft update of target parameters LR = 5e-4 # learning rate ##LR = 1e-4 #LR = 1e-5 UPDATE_EVERY = 4 ## how often local network gets copied to target network if torch.cuda.is_available(): device = torch.device("cuda") else: device = torch.device("cpu") # + ## Define the Replay Buffer ( Deque) class ReplayBuffer: """ Fixed-size buffer to store experience tuples. """ def __init__(self,action_size, buffer_size, batch_size, seed): self.seed = random.seed(seed) self.buffer_size = buffer_size self.batch_size = batch_size self.action_size = action_size self.experience = namedtuple("Experience", field_names =["state", "actions","rewards","next_state", "done"]) self.memory = deque(maxlen= buffer_size) def add(self, state, action,rewards, next_state, done): experience = self.experience(state, action,rewards,next_state, done) self.memory.append(experience) def sample(self): ''' Randomly sample a batch from experience ''' experiences = random.sample(self.memory,k = self.batch_size) state = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device) actions = torch.from_numpy(np.vstack([e.actions for e in experiences if e is not None])).long().to(device) ## dont change to float rewards = torch.from_numpy(np.vstack([e.rewards for e in experiences if e is not None])).float().to(device) next_state = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device) done = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None])).float().to(device) return state,actions, rewards, next_state, done def __len__(self): """ Return the current size of internal memory. """ return len(self.memory) # + ## Define the Agent class Agent(): ''' Interacts with and learns from the environment. ''' def __init__(self,state_size,action_size, seed,batch_size= BATCH_SIZE,buffer_size= BUFFER_SIZE,lr=LR , gamma= GAMMA ): self.state_size = state_size self.action_size = action_size self.buffer_size = buffer_size self.batch_size = batch_size self.lr = lr self.gamma = gamma self.seed = random.seed(seed) # Q-Network self.qnetwork_local = QNetwork(self.state_size, self.action_size, seed).to(device) self.qnetwork_target = QNetwork(self.state_size , self.action_size, seed).to(device) self.optimizer = optim.Adam(self.qnetwork_local.parameters(),lr= self.lr) self.criterion = nn.MSELoss() for param in self.qnetwork_target.parameters(): param.requires_grad = False # Replay memory self.memory = ReplayBuffer(action_size, buffer_size, batch_size, seed) # Initialize time step (for updating every UPDATE_EVERY steps) self.t_step = 0 def step(self,state,actions,rewards, next_state,done): self.memory.add(state,actions,rewards, next_state,done) self.t_step =(self.t_step + 1)% UPDATE_EVERY ## dont learn whenever 1 batch is added. ##Rather wait for UPDATE_EVERY batch to be added before we call learn once if self.t_step==0: if len(self.memory)> self.batch_size: experiences = self.memory.sample() self.learn(experiences, self.gamma) def learn(self,experiences, gamma): """ Update value parameters using given batch of experience tuples. """ state, actions, rewards,next_state, done = experiences #target_q = self.qnetwork_target(next_state).detach() #target_values = rewards + gamma*torch.max(target_q, 1)[0].unsqueeze(1)*(1-done) #current_values= self.qnetwork_local(state).gather(1, actions) #Double DQN current_values= self.qnetwork_local(state).gather(1, actions) target_q = self.qnetwork_local(next_state) target_q = torch.max(target_q, 1)[1].unsqueeze(1) target_values = rewards + gamma*self.qnetwork_target(next_state).gather(1, target_q)*(1-done) # Compute loss loss = self.criterion(current_values,target_values) ##optimization step self.optimizer.zero_grad() loss.backward() self.optimizer.step() # ----- Update the target network ----- if self.t_step==0: ## when it comes here self.t_step will always be 0 anyways. ## So you may remove this if condition self.soft_update(self.qnetwork_local, self.qnetwork_target,TAU) ##Noisy DQN self.qnetwork_local.reset_noise() self.qnetwork_target.reset_noise() def soft_update(self, qnetwork_local, qnetwork_target,tau): ''' Update the target Q Network ''' for local_parms, target_parms in zip(qnetwork_local.parameters(), qnetwork_target.parameters()): target_parms.data.copy_(tau*local_parms.data + (1.0 -tau)*target_parms.data) def act(self, state, epsilon =0.0): """ Returns actions for given state as per current policy. """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) self.qnetwork_local.eval() with torch.no_grad(): actions_values = self.qnetwork_local(state) ##Back to train mode self.qnetwork_local.train() # Epsilon-greedy action selection if random.random() > epsilon: return np.argmax(actions_values.cpu().data.numpy()) else: return np.random.choice(np.arange(self.action_size)) # - def dqn(n_episodes = 2000, max_t = 1000, eps_start = 1.0, eps_end = .01, eps_decay= .995, beta_start=0.4,beta_frames= 1000, trained_score=16): ''' Train the network Please ignore beta_start and beta_frames parameters as of now ''' scores =[] scores_window = deque(maxlen = 100) eps = eps_start for episode in range(1,n_episodes+1): #print(episode) score = 0 ##beta= min(1.0, beta_start + episode*(1- beta_start)/beta_frames ) ##reset the environment env_info = env.reset(train_mode=True)[brain_name] state = env_info.vector_observations[0] for t in range(max_t): action = agent.act(state, eps) env_info = env.step(action)[brain_name] next_state = env_info.vector_observations[0] rewards = env_info.rewards[0] #done = env_info.local_done[0] #convert done from bool to float # if env_info.local_done[0]: done =1 else: done =0 ##agent.step(state, action,rewards,next_state , done,beta ) agent.step(state, action,rewards,next_state , done ) state = next_state score += rewards if done: break scores.append(score) scores_window.append(score) eps = max(eps_end, eps*eps_decay) ## reduce randomness epsilon as we learn print('\rEpisode {}\tAverage Score: {:.2f}'.format(episode, np.mean(scores_window)), end="") if episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(episode, np.mean(scores_window))) ##if np.mean(scores_window)>=200.0: if np.mean(scores_window)>=trained_score: print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(episode-100, np.mean(scores_window))) torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth') break return scores # + env_observation_space = 37 env_action_space = 4 agent = Agent(env_observation_space ,env_action_space, seed=0 ) scores = dqn() # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(scores)) , scores) plt.xlabel('Episodes') plt.ylabel('Scores') plt.show() # -
Navigation Bananas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Tipe Data dan Operator # # Setiap pemrograman memiliki tipe data bawaan, begitu juga Python. Dalam notebook ini, kita akan mulai berkenalan dengan tipe data yang ada dalam Python dan juga bagaimana kita bisa menggunakan Python untuk melakukan operasi-operasi yang biasa kita lakukan menggunakan kalkulator. # + [markdown] tags=[] # ## Python Sebagai Kalkulator # # Salah satu tujuan awal manusia membuat sebuah program komputer adalah untuk membantu melakukan komputasi, mulai dari yang sederhana sampai komputasi yang bagi manusia terlalu rumit untuk dikerjakan. Python tentu saja dengan sangat mudah bisa melakukan komputasi macam itu. # # Di matematika, kita mengenal beberapa operasi aritmatika seperti penjumlahan, pengurangan, perkalian, dan lainnya. Dari situ, kita mengenal operator `+` untuk penjumlahan, `x` untuk perkalian, dan seterusnya. Di Python, kita bisa melakukan operasi yang sama dengan notasi operator yang hampir sama. # # Beberapa operator di Python: # * `+` penjumlahan # * `-` pengurangan # * `*` perkalian # * `/` pembagian # * `**` operasi eksponen/pangkat (bahasa pemrograman yang lain mungkin menggunakan `^`) # * `//` pembagian yang hasillnya dibulatkan ke bawah. # - # ```{note} # `print` adalah fungsi bawaan dari Python yang digunakan untuk menampilkan nilai masukan (*input*) apapun ke luaran sebagai **teks** # ``` # # Sebagai contoh penggunaan operator di atas, kita bisa melakukan beberapa operasi sederhana seperti berikut. print(25 + 30) print(10 - 5 * 20) print(4 / 2) print(4 % 2) print(12 ** 2) print(111 // 5) # ## Variabel dan Operator Penugasan # # Pendefinisian variabel dalam Python sangat sederhana. Kita bisa mendefinisikan variabel dengan menggunakan operator `=`, sebagai contoh, `current_year = 2021`. Di sini, `current_year` adalah **nama variabel**, **=** adalah operator penugasan (*assignment operator*), yang diberi nilai **2021**. # # Jika kita ingin mendefinisikan lebih dari satu variabel, kita bisa menuliskan seperti di bawah ini. # # ```{code-block} python # x = 10 # y = 7 # z = 12 # ``` # # atau, cara pendefinisian yang lebih *pythonic*, # # ```{code-block} python # x, y, z = 10, 7, 12 # ``` # # Dalam pendefinisian variabel, ada beberapa aturan yang perlu diperhatikan: # 1. Hanya gunakan **huruf**, **angka**, dan **garis bawah (`_`)**. # 2. **Jangan** menggunakan **spasi**. # 3. **Diawali** dengan **huruf** atau **garis bawah** # 4. Tidak bisa menggunakan **kata kunci yang sudah dipakai** yang merupakan bawaan dari Python. Contoh kata kunci bawaan yang tidak bisa kita pakai seperti pada gambar di bawah ini dan selengkapnya bisa dilihat [di sini](https://pentangle.net/python/handbook/node52.html). # 5. Cara yang lebih *pythonic* untuk menamai variabel adalah dengan menggunakan semua **hurus kecil** dan setiap suku kata **dipisahkan** oleh **garis bawah (`_`)**. # # ![](../assets/images/python-keywords.png) # # ```{note} # Cara kita menamai sebuah variabel disebut dengan **snake case** karena kita cenderung menghubungkan atau memisahkan setiap suku kata dengan garis bawah, layaknya tubuh ular. # ``` # akan muncul error karena ada spasi my population = 12091849 # akan muncul error karena diawali dengan angka 1_var = 100 # + current_year = 2021 last_year = 2020 print(current_year, last_year) # + # ini adalah baris komentar dalam Python (baris yang dimulai dengan `#`) x, y, z = 10, 11, 12 print(x) print(y) print(z) # - # ## Operator Penugasan Lain # # Selain `=`, ada beberapa operator penugasan lain yang mungkin berguna dan menyingkat penulisan seperti `+=`, `-=`, atau juga `*=`. # # Sebagai contoh, jika kita ingin melakukan perubahan variabel `current_year`, kita bisa menulis # # ```python # current_year = current_year + 1 # ``` # # Karena kita hanya akan memperbarui nilai `current_year` dan akan masih menggunakan nama variabel yang sama, kita bisa menggunakan operator `+=`. Sehinga, akan jadi seperti # # ```{code-block} python # current_year += 1 # ``` # # Mari kita praktikkan ⬇ # + my_population = 12091840 my_population = my_population + 1000000 print(my_population) my_population *= 0.5 print(my_population) # - # Selain operator penugasan di atas, ada juga operator perbandingan yang digunakan untuk membandingkan suatu nilai dengan nilai lainnya. Berikut adalah tabel operator perbandingan. # # (comparison-operator)= # # |Symbol Use Case|Bool|Operation| # |---------------|----|---------| # 5 < 3 | False | Less Than # 5 > 3 | True | Greater Than # 3 <= 3 | True | Less Than or Equal To # 3 >= 5 | False | Greater Than or Equal To # 3 == 5 | False | Equal To # 3 != 5 | True | Not Equal To # # Hasil dari operasi perbandingan tersebut adalah sebuah `Boolean`, yaitu salah satu tipe data yang menyatakan suatu objek bernilai **benar** (`True`) atau **salah** (`False`). # ## Tipe Data dalam Python # # ### Numerik # # Tipe data pertama yang akan kita bahas adalah tipe data numerik. Dalam tipe data numerik, terdapat dua jenis tipe data: # * `int`, untuk nilai-nilai bilangan bulat # * `float`, untuk nilai-nilai bilangan desimal atau real # * `complex`, untuk nilai-nilai bilanga kompleks # # ```{note} # Untuk mengetahui tipe data suatu objek dalam Python, kita bisa menggunakan fungsi bawaan Python yaitu `type`, misal `type(10)`. # ``` # # #### Integers # # Dengan kita menulis `1`, `-10`, `1000000`, `-999`, dan sejenisnya, kita sudah menginisiasi data integer. Sebagai contoh, kita bisa mencari tahu secara langsung tipe data dari nilai tersebut atau juga dengan mendefinisikannya ke dalam sebuah variabel. # # ```python # print(type(1), type(-10)) # ``` # # atau dengan memasukkannya ke dalam variabel seperti berikut. # # ```python # value = -100 # print(type(value)) # ``` print(type(10)) print(type(1), type(-10)) # #### Float # # Tipe data `float` adalah semua nilai desimal seperti `0.14`, `-10.2092`, dan bahkan `100.0`. Contoh terakhir bertipe data `float` karena meskipun dalam matematika sama dengan `100`, tapi karena kita membubuhi notasi desimal (`.`), maka Python akan menganggap objek tersebut bertipe data `float`. print(3.15, type(3.14)) print(.5101, type(.05101)) print(10.0, type(10.0)) # Python juga menyediakan fungsi bawaan `float` dan `int` yang salah satu kegunaanny adalah untuk mengubah tipe data yang awalnya integer menjadi float dan sebaliknya. # # ````{tip} # fungsi `float` dan `int` juga bisa digunakan untuk mendefinisikan nilai kosong (`0`) jika kita memanggilnya tanpa ada argumen # # ```python # simple_int = int() # simple_float = float() # ``` # # ```` # + # konversi tipe data dari float ke int, dan sebaliknya x_int = 10 y_float = 3.14 x_float = float(x_int) y_int = int(y_float) print(x_int, y_int) print(x_float, y_float) print(float(y_float)) # + simple_int = int() simple_float = float() print(simple_int, simple_float) # - # ### Teks # # Suatu teks, dalam Python, diwakili dengan tipe data `str` (baca: *string*). Teks adalah salah satu tipe data barisan dalam Python. Ada beberapa cara untuk mendefiniskan teks, yaitu membungkus teks dengan: # * *double quotes* (`""`) # * *single quotes* (`''`) # * *triple double quotes* (`""" """`) # * atau, *triple single quotes* (`''' '''`) # # Kita bisa menggunakan *double qoutes* atau *single qoute* untuk menulis teks dalam satu baris yang sama dan menggunakan *triple double quotes* atau *triple single quotes* untuk lebih dari satu baris. # # ````{admonition} Eksplorasi # Buat 3 variabel yang memuat masing-masing teks di bawah ini sesuai dengan nama variabel yang sudah ditentukan: # * nama variabel → `bootcamp` # ``` # Selamat datang di Bitlabs! # ``` # * nama variabel → `novel` # ``` # "Arif, cepat ke sini!", ujar ibunya. # ``` # * nama variabel → `text` # ``` # Mungkin al'ad sudah tidak sanggup. # # Tapi, apa mungkin? # ``` # ```` # + bootcamp = ... novel = ... text = ... print(bootcamp, novel, text) # - # ### Boolean # # Jika kita lihat kembali tabel [operator perbandingan](comparison-operator) di atas, semua nilai yang dikembalikan bertipe `boolean` atau `bool`. Nilai boolean terdiri dari 2 jenis, `True` dan `False`. Dalam bahasa pemrograman lain, penulisannya mungkin akan sedikit berbeda, seperti javascript menggunakan huruf kecil `true`. # # Nilai `True` dan `False` juga sering dikonversi ke dalam bentuk numerik yang memiliki nilai `1` dan `0` secara berturut-turut. Mari kita coba di cell bawah ini. print(5 < 3) print(3 <= 5) print(5 != 5.) print("bit" == "Bit") print("abjad" > "Abjad") print("abjad" > "abjaf") print("1" >= "10") is_more = x > z print(is_more, type(is_more)) # Kita juga bisa menggabungkan beberapa perbandingan menggunakan logika penggabungan `and`, `or`, atau `not`. Selengkapnya dapat dilihat pada tabel di bawah ini. # # |Logical Use Case|Bool|Operation| # |---------------|----|---------| # 15 < 13 `and` 5 == 5 | False | `and` - Dievaluasi `True` jika semua pernyataan benar # 15 > 13 `or` 5 != 5. | True | `or` - Dievaluasi `True` jika salah satu pernyataan benar # `not` 15 < 13 | True | `not` - Kebalikan dari nilai boolean # # ```{admonition} Eksplorasi # Lengkapilah tabel kebenaran di bawah ini jika kita definisikan `p = q = 1`. # # | p | q | p `and` q | p `or` q | # | :-: | :-: | :---------: | :---------: | # | p | q | `True` | `True` | # | p | `not` q | ... | ... | # | `not` p | q | ... | ... | # | `not` p | `not` q | `False` | `False` | # ``` # + p = q = 1 p_and_notq = ... notp_and_q = ... notp_and_notq = ... p_or_notq = ... notp_or_q = ... notp_or_notq = ... # - # Jika kita memasukkan nilai boolean ke dalam fungsi bawaan `int` atau `float`, maka nilai `True` akan berubah menjadi `1` dan `1.0` untuk integer dan float secara berturut-turut, dan sebaliknya untuk `False`. # # Python juga menyediakan fungsi bawaan `bool` yang bisa mengonversi tipe data lain ke dalam bentuk boolean. # # ```python # print(bool(0)) # print(bool(1)) # print(bool("False")) # print(bool("")) # print(bool(0.)) # print(bool) # ```
book/python/data-types-and-operators.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Yandex DataSphere Kernel # language: python # name: python3 # --- # + [markdown] id="71AQJg3CDMn9" cellId="hsbyb4tyki9nx32utdtjpk" # # Deep learning for computer vision # # # This notebook will teach you to build and train convolutional networks for image recognition. Brace yourselves. # + [markdown] id="LaGKim3ykLXp" cellId="rxq6pql5x4tle9o386ep" # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/yandexdataschool/Practical_DL/blob/spring20/seminar3/seminar3_pytorch.ipynb) # + [markdown] id="2MaELIpIDMoA" cellId="34l2kkk7t84llsmzyxus1" # # Tiny ImageNet dataset # This week, we shall focus on the image recognition problem on Tiny Image Net dataset # * 100k images of shape 3x64x64 # * 200 different classes: snakes, spaiders, cats, trucks, grasshopper, gull, etc. # # + id="rS_-00tYDMoB" cellId="g2i37mixtk9kkxkki1y8" # #!L import torchvision import torch from torchvision import transforms # + id="sCvh1ICbHNCE" cellId="k1eayz1ur2mqly9zrk5my" # #!S:bash wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1UksGhGn63aQLAfGrAkGzdx69U6waEHPR' -O tinyim3.png wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=19qsD0o7pfAI8UYxgDY18sdRjV0Aantn2' -O tiny_img.py wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=12IrLjz8pss4284xsBAJt6CW6yELPH4tL' -O tiniim.png # + id="5rQhiYyRDMoG" cellId="5nh892g5zpl9qv5fki8vpk" # #!L from tiny_img import download_tinyImg200 data_path = '.' download_tinyImg200(data_path) # + id="5vq5Cm0ADMoK" cellId="jrzsbgniodgtg1hif324k9" # #!L dataset = torchvision.datasets.ImageFolder('tiny-imagenet-200/train', transform=transforms.ToTensor()) test_dataset = torchvision.datasets.ImageFolder('tiny-imagenet-200/val', transform=transforms.ToTensor()) train_dataset, val_dataset = torch.utils.data.random_split(dataset, [80000, 20000]) test_dataset, val_dataset = torch.utils.data.random_split(val_dataset, [10000, 10000]) # + id="tY6OUeOODMoN" cellId="6md8io0fesfby4r9per3jb" # #!L batch_size = 50 train_batch_gen = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1) # + id="HBgW-gzwDMoQ" cellId="hsq566ut87vokpkiq68" # #!L val_batch_gen = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=1) # + [markdown] id="swKtJaVyDMoU" cellId="rjwf5t0s4f8zbnfmyu7q" # ## Image examples ## # + [markdown] id="h5wImXEaDMoV" cellId="nbxuu26h8hhcgzzgeh5nh" # # # <tr> # <td> <img src="https://github.com/yandexdataschool/Practical_DL/blob/sem3spring2019/week03_convnets/tinyim3.png?raw=1" alt="Drawing" style="width:90%"/> </td> # <td> <img src="https://github.com/yandexdataschool/Practical_DL/blob/sem3spring2019/week03_convnets/tinyim2.png?raw=1" alt="Drawing" style="width:90%"/> </td> # </tr> # # + [markdown] id="Do-qRQp8DMoW" cellId="w71ngep3y8jm2s3xt0qg" # <tr> # <td> <img src="https://github.com/yandexdataschool/Practical_DL/blob/sem3spring2019/week03_convnets/tiniim.png?raw=1" alt="Drawing" style="width:90%"/> </td> # </tr> # + [markdown] id="arxSyhBLDMoX" cellId="fxzxgbl11g2dixss4t9nx" # # Building a network # # Simple neural networks with layers applied on top of one another can be implemented as `torch.nn.Sequential` - just add a list of pre-built modules and let it train. # + id="7QF2hMVxDMoY" cellId="g5yf9z66xdpvq688ze2d8" # #!L import torch, torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable # + [markdown] id="DJ6QKG3hDMoa" cellId="6yn15hpuolcmryork2oqs" # Let's start with a dense network for our baseline: # + id="u_mbfRXMDMob" cellId="f985tf2dvssqwmyc6w99d" # #!L model = nn.Sequential() # reshape from "images" to flat vectors model.add_module('flatten', Flatten()) # dense "head" model.add_module('dense1', nn.Linear(3 * 64 * 64, 1064)) model.add_module('dense2', nn.Linear(1064, 512)) model.add_module('dropout0', nn.Dropout(0.05)) model.add_module('dense3', nn.Linear(512, 256)) model.add_module('dropout1', nn.Dropout(0.05)) model.add_module('dense4', nn.Linear(256, 64)) model.add_module('dropout2', nn.Dropout(0.05)) model.add_module('dense1_relu', nn.ReLU()) model.add_module('dense2_logits', nn.Linear(64, 200)) # logits for 200 classes if torch.cuda.is_available(): device = torch.device('cuda:0') else: device = torch.device('cpu') model.to(device) device # + [markdown] id="DvugZZbeDMoe" cellId="7dh3d8xmkeinv4kx0g079" # As in our basic tutorial, we train our model with negative log-likelihood aka crossentropy. # + id="cGEhRWMYDMof" cellId="3y7p7o6s7vecpf3kpktj8v" # #!L def compute_loss(X_batch, y_batch): X_batch = torch.FloatTensor(X_batch).to(device=device) y_batch = torch.LongTensor(y_batch).to(device=device) logits = model.to(device)(X_batch) return F.cross_entropy(logits, y_batch).mean() # + [markdown] id="kEhnKaujDMoi" cellId="p4a6luymigbplsoliurw" # ### Training on minibatches # * We got 100k images, that's way too many for a full-batch SGD. Let's train on minibatches instead # * Below is a function that splits the training sample into minibatches # + id="GMA79nDODMoi" cellId="lcktpsnetg10nod1roloyr" opt = torch.optim.SGD(model.parameters(), lr=0.01) train_loss = [] val_accuracy = [] # + id="sEy0LiHxDMol" cellId="w8rht9ygh7uns89ypozln" import numpy as np opt = torch.optim.SGD(model.parameters(), lr=0.01) train_loss = [] val_accuracy = [] num_epochs = 50 # total amount of full passes over training data import time for epoch in range(num_epochs): start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for (X_batch, y_batch) in train_batch_gen: # train on batch loss = compute_loss(X_batch, y_batch) loss.backward() opt.step() opt.zero_grad() train_loss.append(loss.cpu().data.numpy()) model.train(False) # disable dropout / use averages for batch_norm for X_batch, y_batch in val_batch_gen: logits = model(Variable(torch.FloatTensor(X_batch)).cuda()) y_pred = logits.max(1)[1].data val_accuracy.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() )) # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration): \t{:.6f}".format( np.mean(train_loss[-len(train_dataset) // batch_size :]))) print(" validation accuracy: \t\t\t{:.2f} %".format( np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100)) # + [markdown] id="77q6ffk0DMon" cellId="42doe8nw4umunw6t9k3m" # Don't wait for full 100 epochs. You can interrupt training after 5-20 epochs once validation accuracy stops going up. # ``` # ``` # # ### Final test # + id="KmK71tpNDMoo" cellId="3bwb0sgr9emg6lbxu9q0uw" model.train(False) # disable dropout / use averages for batch_norm test_batch_acc = [] for X_batch, y_batch in val_batch_gen: logits = model(Variable(torch.FloatTensor(X_batch)).cuda()) y_pred = logits.max(1)[1].data test_batch_acc.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() )) test_accuracy = np.mean(test_batch_acc) print("Final results:") print(" test accuracy:\t\t{:.2f} %".format( test_accuracy * 100)) if test_accuracy * 100 > 70: print("U'r freakin' amazin'!") elif test_accuracy * 100 > 50: print("Achievement unlocked: 110lvl Warlock!") elif test_accuracy * 100 > 40: print("Achievement unlocked: 80lvl Warlock!") elif test_accuracy * 100 > 30: print("Achievement unlocked: 70lvl Warlock!") elif test_accuracy * 100 > 20: print("Achievement unlocked: 60lvl Warlock!") else: print("We need more magic! Follow instructons below") # + [markdown] id="Cy06JedIDMos" cellId="0z0tvct7nkypcm4qv6c00m" # ## Task I: small convolution net # ### First step # # Let's create a mini-convolutional network with roughly such architecture: # * Input layer # * 3x3 convolution with 128 filters and _ReLU_ activation # * 2x2 pooling (or set previous convolution stride to 3) # * Flatten # * Dense layer with 1024 neurons and _ReLU_ activation # * 30% dropout # * Output dense layer. # # # __Convolutional layers__ in torch are just like all other layers, but with a specific set of parameters: # # __`...`__ # # __`model.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=128, kernel_size=3)) # convolution`__ # # __`model.add_module('pool1', nn.MaxPool2d(2)) # max pooling 2x2`__ # # __`...`__ # # # Once you're done (and compute_loss no longer raises errors), train it with __Adam__ optimizer with default params (feel free to modify the code above). # # If everything is right, you should get at least __16%__ validation accuracy. # # __HACK_OF_THE_DAY__ :the number of channels must be in the order of the number of class_labels # + [markdown] id="jB5zePLiJBcw" cellId="e966aarcrr6yn8ju7we72" # ### Before we start: # **Stride, Padding and Kernel_size** # + id="RJLbJonGIUrQ" cellId="ecg65c1ujoqbi7m9pp9rik" from IPython.display import Image Image(url='https://deeplearning.net/software/theano/_images/numerical_padding_strides.gif') # + id="lEZfV3fIDMos" cellId="hjs8r7vldzsoyxy4s4qkcp" model = nn.Sequential() model.add_module #decribe convnet here model.add_module('flatten', Flatten()) model.add_module('dense1_logits', nn.Linear(10368, 200)) # logits for 200 classes # + id="SuIuiCmUDMov" cellId="e5xtixbhyyavdeohu2xlj" opt = torch.optim.SGD(model.parameters(), lr=0.01) train_loss = [] val_accuracy = [] # + id="32STsNW0DMox" cellId="xzol5eh69mbh0y78gj04a" from torchsummary import summary summary(model.cuda(), (3, 64, 64)) # + [markdown] id="4k_VEEp-DMoz" cellId="fnkxzg6svarh6ljph9u13s" # ## retrain it ## # + id="NAG-g8QrDMo0" cellId="wodsy8etj2it77rny5u44l" import time num_epochs = 100 # total amount of full passes over training data batch_size = 50 # number of samples processed in one SGD iteration for epoch in range(num_epochs): print (num_epochs) # In each epoch, we do a full pass over the training data: start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for (X_batch, y_batch) in train_batch_gen: # train on batch loss = compute_loss(X_batch, y_batch) loss.backward() opt.step() opt.zero_grad() train_loss.append(loss.data.cpu().numpy()) print (num_epochs) model.train(False) # disable dropout / use averages for batch_norm for X_batch, y_batch in val_batch_gen: logits = model(Variable(torch.FloatTensor(X_batch)).cuda()) y_pred = logits.max(1)[1].data val_accuracy.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() )) print (num_epochs) # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration): \t{:.6f}".format( np.mean(train_loss[-len(train_dataset) // batch_size :]))) print(" validation accuracy: \t\t\t{:.2f} %".format( np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100)) # + [markdown] id="92eADk6qDMo2" cellId="k1rhr6a3i5ictyja2ja09w" # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # ``` # # __Hint:__ If you don't want to compute shapes by hand, just plug in any shape (e.g. 1 unit) and run compute_loss. You will see something like this: # # __`RuntimeError: size mismatch, m1: [5 x 1960], m2: [1 x 64] at /some/long/path/to/torch/operation`__ # # See the __1960__ there? That's your actual input shape. # # ## Task 2: adding normalization # # * Add batch norm (with default params) between convolution and ReLU # * nn.BatchNorm*d (1d for dense, 2d for conv) # * usually better to put them after linear/conv but before nonlinearity # * Re-train the network with the same optimizer, it should get at least 20% validation accuracy at peak. # # To know more about **batch_norm** and **data covariate shift** # # https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821c # # https://www.youtube.com/watch?v=nUUqwaxLnWs # + id="LLbMCrwUDMo2" cellId="h0orx2qeai7f8u5pns35zc" model = nn.Sequential() #decribe conv net with batchnorm here # + id="LN2d5KN7DMo5" cellId="opc2ig8mr4ps9jdz20yc3f" opt = torch.optim.SGD(model.parameters(), lr=0.01) train_loss = [] val_accuracy = [] # + id="tnlGj42iDMo7" cellId="rl7hxf77qdkrjahcysgf4" import time num_epochs = 100 # total amount of full passes over training data batch_size = 50 # number of samples processed in one SGD iteration for epoch in range(num_epochs): print (num_epochs) # In each epoch, we do a full pass over the training data: start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for (X_batch, y_batch) in train_batch_gen: # train on batch loss = compute_loss(X_batch, y_batch) loss.backward() opt.step() opt.zero_grad() train_loss.append(loss.data.cpu().numpy()) print (num_epochs) model.train(False) # disable dropout / use averages for batch_norm for X_batch, y_batch in val_batch_gen: logits = model(Variable(torch.FloatTensor(X_batch)).cuda()) y_pred = logits.max(1)[1].data val_accuracy.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() )) print (num_epochs) # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration): \t{:.6f}".format( np.mean(train_loss[-len(train_dataset) // batch_size :]))) print(" validation accuracy: \t\t\t{:.2f} %".format( np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100)) # + [markdown] id="72fXRudFDMo9" cellId="vf7qaq1wj4ajdcqghtdri" # # # ``` # # ``` # # ``` # # ``` # # ``` # ## Task 3: Data Augmentation # # ** Augmenti - A spell used to produce water from a wand (Harry Potter Wiki) ** # # <img src="https://github.com/yandexdataschool/Practical_DL/blob/sem3spring2019/week03_convnets/HagridsHut_PM_B6C28_Hagrid_sHutFireHarryFang.jpg?raw=1" style="width:80%"> # # There's a powerful torch tool for image preprocessing useful to do data preprocessing and augmentation. # # Here's how it works: we define a pipeline that # * makes random crops of data (augmentation) # * randomly flips image horizontally (augmentation) # * then normalizes it (preprocessing) # + [markdown] id="_AG9EMuWDMo9" cellId="slw9ya2dgwd07z4wt277s9l" # When testing, we don't need random crops, just normalize with same statistics. # + id="b8uC2R2PDMo-" cellId="1k95do5uw063u9mq1l5kqy" import torchvision from torchvision import transforms transform_augment = <YOUR CODE> # decribe transformation here # + id="DTD34cgpDMpA" cellId="rezdny8kd6hsacggoyow" dataset = torchvision.datasets.ImageFolder('tiny-imagenet-200/train', transform=transform_augment) # + id="oBb0Gq6xDMpD" cellId="d05majgvkitrqayuj5o7" train_dataset, val_dataset = torch.utils.data.random_split(dataset, [90000, 10000]) # + id="4Wu4eHU_DMpF" cellId="68xeb753nfzs85u77cgd" train_batch_gen = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=1) # + id="cCOQEfSUDMpI" cellId="630wr0cfmemfcdfd2lxzfl" val_batch_gen = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=1) # + id="dISTMNfcDMpK" cellId="wt6332af48qebjbjtx82zi" import time num_epochs = 100 # total amount of full passes over training data batch_size = 50 # number of samples processed in one SGD iteration for epoch in range(num_epochs): print (num_epochs) # In each epoch, we do a full pass over the training data: start_time = time.time() model.train(True) # enable dropout / batch_norm training behavior for (X_batch, y_batch) in train_batch_gen: # train on batch loss = compute_loss(X_batch, y_batch) loss.backward() opt.step() opt.zero_grad() train_loss.append(loss.data.cpu().numpy()) print (num_epochs) model.train(False) # disable dropout / use averages for batch_norm for X_batch, y_batch in val_batch_gen: logits = model(Variable(torch.FloatTensor(X_batch)).cuda()) y_pred = logits.max(1)[1].data val_accuracy.append(np.mean( (y_batch.cpu() == y_pred.cpu()).numpy() )) print (num_epochs) # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss (in-iteration): \t{:.6f}".format( np.mean(train_loss[-len(train_dataset) // batch_size :]))) print(" validation accuracy: \t\t\t{:.2f} %".format( np.mean(val_accuracy[-len(val_dataset) // batch_size :]) * 100)) # + [markdown] id="vQG4txuJDMpN" cellId="3pj0hzqdxiif9s9ngjmtej" # We need for test data __only normalization__, not cropping and rotation # + id="LntBFXN6DMpO" cellId="hhz8nzik5gzpv9kq4xq9b" transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(means, stds), #normalize by channel. all value along the channel have mean and deviation ]) test_dataset = <YOUR CODE> # + [markdown] id="k7pl4SLpDMpQ" cellId="nlsjsod2nx7nffpvf18efs" # ## The Quest For A Better Network # # See `practical_dl/homework02` for a full-scale assignment.
seminar03-conv_nets/seminar_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:ssc] # language: python # name: conda-env-ssc-py # --- # # Train a Depth Seeding Network # + import os from time import time import numpy as np import matplotlib.pyplot as plt # My libraries import src.data_loader as data_loader import src.segmentation as segmentation import src.train as train import src.util.utilities as util_ import src.util.flowlib as flowlib os.environ['CUDA_VISIBLE_DEVICES'] = "0" # TODO: Change this if you have more than 1 GPU # - # # Example Dataset: TableTop Object Dataset (TOD) # + TOD_filepath = '...' # TODO: change this to the dataset you want to train on data_loading_params = { # Camera/Frustum parameters 'img_width' : 640, 'img_height' : 480, 'near' : 0.01, 'far' : 100, 'fov' : 45, # vertical field of view in degrees 'use_data_augmentation' : True, # Multiplicative noise 'gamma_shape' : 1000., 'gamma_scale' : 0.001, # Additive noise 'gaussian_scale_range' : [0., 0.003], # up to 2.5mm standard dev 'gp_rescale_factor_range' : [12, 20], # [low, high (exclusive)] # Random ellipse dropout 'ellipse_dropout_mean' : 10, 'ellipse_gamma_shape' : 5.0, 'ellipse_gamma_scale' : 1.0, # Random high gradient dropout 'gradient_dropout_left_mean' : 15, 'gradient_dropout_alpha' : 2., 'gradient_dropout_beta' : 5., # Random pixel dropout 'pixel_dropout_alpha' : 0.2, 'pixel_dropout_beta' : 10., } dl = data_loader.get_TOD_train_dataloader(TOD_filepath, data_loading_params, batch_size=4, num_workers=8, shuffle=True) # - # ## Train Depth Seeding Network # + dsn_config = { # Sizes 'feature_dim' : 64, # 32 would be normal # Mean Shift parameters (for 3D voting) 'max_GMS_iters' : 10, 'num_seeds' : 200, # Used for MeanShift, but not BlurringMeanShift 'epsilon' : 0.05, # Connected Components parameter 'sigma' : 0.02, # Gaussian bandwidth parameter 'subsample_factor' : 5, 'min_pixels_thresh' : 500, # Differentiable backtracing params 'tau' : 15., 'M_threshold' : 0.3, # Robustness stuff 'angle_discretization' : 100, 'discretization_threshold' : 0., } tb_dir = ... # TODO: change this to desired tensorboard directory dsn_training_config = { # Training parameters 'lr' : 1e-4, # learning rate 'iter_collect' : 20, # Collect results every _ iterations 'max_iters' : 150000, # Loss function stuff 'lambda_fg' : 3., 'lambda_co' : 5., 'lambda_sep' : 1., 'lambda_cl' : 1., 'num_seeds_training' : 50, 'delta' : 0.1, # for clustering loss. 2*eps 'max_GMS_iters' : 10, # Tensorboard stuff 'tb_directory' : os.path.join(tb_dir, 'train_DSN/'), 'flush_secs' : 10, # Write tensorboard results every _ seconds } iter_num = 0 dsn_training_config.update({ # Starting optimization from previous checkpoint 'load' : False, 'opt_filename' : os.path.join(dsn_training_config['tb_directory'], f'DSNTrainer_DSNWrapper_{iter_num}_checkpoint.pth'), 'model_filename' : os.path.join(dsn_training_config['tb_directory'], f'DSNTrainer_DSNWrapper_{iter_num}_checkpoint.pth'), }) # - dsn = segmentation.DSNWrapper(dsn_config) dsn_trainer = train.DSNTrainer(dsn, dsn_training_config) # Train the network for 1 epoch num_epochs = 1 dsn_trainer.train(num_epochs, dl) dsn_trainer.save() # ## Plot some losses # + # %matplotlib inline fig = plt.figure(1, figsize=(15,3)) total_subplots = 5 starting_epoch = 0 info_items = {k:v for (k,v) in dsn_trainer.infos.items() if k > starting_epoch} plt.subplot(1,total_subplots,1) losses = [x['loss'] for (k,x) in info_items.items()] plt.plot(info_items.keys(), losses) plt.xlabel('Iteration') plt.title('Losses. {0}'.format(losses[-1])) plt.subplot(1,total_subplots,2) fg_losses = [x['FG loss'] for (k,x) in info_items.items()] plt.plot(info_items.keys(), fg_losses) plt.xlabel('Iteration') plt.title('Foreground Losses. {0}'.format(fg_losses[-1])) plt.subplot(1,total_subplots,3) co_losses = [x['Center Offset loss'] for (k,x) in info_items.items()] plt.plot(info_items.keys(), co_losses) plt.xlabel('Iteration') plt.title('Center Offset Losses. {0}'.format(co_losses[-1])) plt.subplot(1,total_subplots,4) sep_losses = [x['Separation loss'] for (k,x) in info_items.items()] plt.plot(info_items.keys(), sep_losses) plt.xlabel('Iteration') plt.title('Separation Losses. {0}'.format(sep_losses[-1])) plt.subplot(1,total_subplots,5) cl_losses = [x['Cluster loss'] for (k,x) in info_items.items()] plt.plot(info_items.keys(), cl_losses) plt.xlabel('Iteration') plt.title('Cluster Losses. {0}'.format(cl_losses[-1])) print("Number of iterations: {0}".format(dsn_trainer.iter_num)) # - # ## Visualize some stuff # # Run the network on a single batch, and plot the results # + dl = data_loader.get_TOD_test_dataloader(TOD_filepath, data_loading_params, batch_size=8, num_workers=8, shuffle=True) dl_iter = dl.__iter__() batch = next(dl_iter) rgb_imgs = util_.torch_to_numpy(batch['rgb'], is_standardized_image=True) # Shape: [N x H x W x 3] xyz_imgs = util_.torch_to_numpy(batch['xyz']) # Shape: [N x H x W x 3] foreground_labels = util_.torch_to_numpy(batch['foreground_labels']) # Shape: [N x H x W] center_offset_labels = util_.torch_to_numpy(batch['center_offset_labels']) # Shape: [N x 2 x H x W] N, H, W = foreground_labels.shape[:3] # + print("Number of images: {0}".format(N)) dsn.eval_mode() ### Compute segmentation masks ### st_time = time() fg_masks, center_offsets, object_centers, initial_masks = dsn.run_on_batch(batch) total_time = time() - st_time print('Total time taken for Segmentation: {0} seconds'.format(round(total_time, 3))) print('FPS: {0}'.format(round(N / total_time,3))) # Get segmentation masks in numpy fg_masks = fg_masks.cpu().numpy() center_offsets = center_offsets.cpu().numpy().transpose(0,2,3,1) initial_masks = initial_masks.cpu().numpy() for i in range(len(object_centers)): object_centers[i] = object_centers[i].cpu().numpy() # - fig_index = 1 for i in range(N): fig = plt.figure(fig_index); fig_index += 1 fig.set_size_inches(20,5) # Plot image plt.subplot(1,5,1) plt.imshow(rgb_imgs[i,...].astype(np.uint8)) plt.title('Image {0}'.format(i+1)) # Plot Depth plt.subplot(1,5,2) plt.imshow(xyz_imgs[i,...,2]) plt.title('Depth') # Plot prediction plt.subplot(1,5,3) plt.imshow(util_.get_color_mask(fg_masks[i,...])) plt.title("Predicted Masks") # Plot Center Direction Predictions plt.subplot(1,5,4) fg_mask = np.expand_dims(fg_masks[i,...] == 2, axis=-1) plt.imshow(flowlib.flow_to_image(direction_predictions[i,...] * fg_mask)) plt.title("Center Direction Predictions") # Plot Initial Masks plt.subplot(1,5,5) plt.imshow(util_.get_color_mask(initial_masks[i,...])) plt.title(f"Initial Masks. #objects: {np.unique(initial_masks[i,...]).shape[0]-1}")
train_DSN.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="6LIXVJeIAmVa" # # Hyper-parameter Tunning of Machine Learning (ML) Models # # # + [markdown] id="4XhQDOADAmVe" # ### Code for Regression Problems # + [markdown] id="zMYHvDbUAmVf" # #### `Dataset Used:` # Boston housing dataset # + [markdown] id="GI497R4ZAmVh" # #### `Machine Learning Algorithm Used:` # * Random Forest (RF) # * Support Vector Machine (SVM) # * K-Nearest Neighbor (KNN) # * Artificial Neural Network (ANN) # + [markdown] id="yLOZ_qJ1AmVj" # #### `Hyper-parameter Tuning Algorithms Used:` # * Grid Search # * Random Search # * Bayesian Optimization with Gaussian Processes (BO-GP) # * Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE) # + [markdown] id="A2MwCNNhAmVk" # --- # + id="sbrOyFciAmVl" # Importing required libraries import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt # %matplotlib inline import scipy.stats as stats from sklearn import datasets from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.svm import SVR # + [markdown] id="RqEAcbr_AmVs" # #### Loading Boston Housing Dataset # Boston Housing dataset contains information about different houses in Boston. It contains 506 records with 13 columns. The main goal is to predict the value of prices of the house using the given features. # # For more details about the dataset click here: # [Details-1](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) , # [Details-2](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="AxwfEuWEAmVt" outputId="21db46e9-5f8d-4630-e680-b3ddf6768950" # Loading dataset X, y = datasets.load_boston(return_X_y=True) datasets.load_boston() # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="nrQdYgKfAmV4" outputId="56c355f2-266e-4170-a5ff-161fa643967a" print(X.shape) #The data matrix # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="oWxYxskcAmV-" outputId="a6c517d8-ebc9-42a1-c481-9ab348eba0ce" print(y.shape) #The regression target # + [markdown] id="WSCCggf_AmWG" # ### Baseline Machine Learning Models: Regressor with default Hyper-parameters # + [markdown] id="__AfrricAmWH" # ### `Random Forest` # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="5dnEpJ_9AmWI" outputId="4a4b7d41-25fd-478f-bd3c-447269e878b5" # Random Forest (RF) with 3-fold cross validation RF_clf = RandomForestRegressor() RF_scores = cross_val_score(RF_clf, X, y, cv = 3, scoring = 'neg_mean_squared_error') print("Mean Square Error (RF) :" + str(-RF_scores.mean())) # + [markdown] id="KlCw8FxnAmWQ" # ### `Support Vector Machine` # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="czTHVWzLAmWT" outputId="3c4a279e-d0ed-45bd-eec4-fffec19bc25e" # Support Vector Machine (SVM) SVM_clf = SVR(gamma ='scale') SVM_scores = cross_val_score(SVM_clf, X, y, cv = 3, scoring = 'neg_mean_squared_error') print("Mean Square Error (SVM) :" + str(-SVM_scores.mean())) # + [markdown] id="MaB4c8p9AmWc" # ### `K-Nearest Neighbor` # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Fm0bdf8DAmWd" outputId="d76d8e8e-e08c-4554-a6b2-feff342e2bbf" # K-Nearest Neighbor (KNN) KN_clf = KNeighborsRegressor() KN_scores = cross_val_score(KN_clf, X, y, cv = 3,scoring = 'neg_mean_squared_error') print("Mean Square Error (KNN) :" + str(-KN_scores.mean())) # + [markdown] id="6B1BhwhxAmWl" # ### `Artificial Neural Network` # + colab={"base_uri": "https://localhost:8080/", "height": 35} id="U9xTUiVpAmWn" outputId="1248b84d-b24a-4112-c68c-14c554d10433" # Artificial Neural Network (ANN) from keras.models import Sequential, Model from keras.layers import Dense, Input from keras.wrappers.scikit_learn import KerasRegressor from keras.callbacks import EarlyStopping def ann_model(optimizer = 'adam', neurons = 32,batch_size = 32, epochs = 50 ,activation = 'relu',patience = 5,loss = 'mse'): model = Sequential() model.add(Dense(neurons, input_shape = (X.shape[1],), activation = activation)) model.add(Dense(neurons, activation = activation)) model.add(Dense(1)) model.compile(optimizer = optimizer ,loss = loss) early_stopping = EarlyStopping(monitor = "loss", patience = patience) history = model.fit(X, y,batch_size = batch_size,epochs = epochs,callbacks = [early_stopping],verbose=0) return model ANN_clf = KerasRegressor(build_fn = ann_model, verbose = 0) ANN_scores = cross_val_score(ANN_clf, X, y, cv = 3,scoring = 'neg_mean_squared_error') print("Mean Square Error (ANN):"+ str(-ANN_scores.mean())) # + [markdown] id="dpD-x-QcAmWu" # ### Hyper-parameter Tuning Algorithms # + [markdown] id="riW63iO6AmWw" # ### ` 1] Grid Search` # + id="AbdwwVzkAmWz" from sklearn.model_selection import GridSearchCV # + [markdown] id="MVFXL1vhAmXA" # #### `Random Forest` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="UolMvFxqAmXC" outputId="335715e4-d2d6-4326-a2c6-da4f68771d9c" # Random Forest (RF) RF_params = { 'n_estimators': [10, 20, 30], 'max_depth': [15,20,25,30,50], } RF_clf = RandomForestRegressor(random_state = 0) RF_grid = GridSearchCV(RF_clf, RF_params, cv = 3, scoring = 'neg_mean_squared_error') RF_grid.fit(X, y) print(RF_grid.best_params_) print("Mean Square Error (RF) : "+ str(-RF_grid.best_score_)) # + [markdown] id="l7H5vmJYAmXI" # #### `Support Vector Machine` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="tIZAyy3QAmXJ" outputId="85047353-f1dd-4c29-ada8-aa9a938d769f" # Support Vector Machine (SVM) SVM_params = { 'C': [1,10, 100,1000], 'kernel' :['poly','rbf','sigmoid'], 'epsilon':[0.001, 0.01,0.1,1] } SVM_clf = SVR(gamma = 'scale') SVM_grid = GridSearchCV(SVM_clf, SVM_params, cv = 3, scoring = 'neg_mean_squared_error') SVM_grid.fit(X, y) print(SVM_grid.best_params_) print("Mean Square Error (SVM) :"+ str(-SVM_grid.best_score_)) # + [markdown] id="qJn0VVPzAmXP" # #### `K-Nearest Neighbor` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="CTGQZqywAmXQ" outputId="896a8e19-8284-4405-ac8d-aaaa8b332d30" # K-nearest Neighnor (KNN) KNN_params = { 'n_neighbors': [2,4,6,8] } KNN_clf = KNeighborsRegressor() KNN_grid = GridSearchCV(KNN_clf, KNN_params, cv=3, scoring='neg_mean_squared_error') KNN_grid.fit(X, y) print(KNN_grid.best_params_) print("Mean Square Error (KNN) :"+ str(-KNN_grid.best_score_)) # + [markdown] id="ROt4p1SWAmXY" # #### `Artificial Neural Network` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="FV6DimwOAmXZ" outputId="c66fd566-9e04-4b18-dd9f-f84be1abbc5f" # Artificial Neural Network (ANN) RF_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32], 'neurons':[16,32], 'epochs':[20,50], 'patience':[3,5] } RF_clf = KerasRegressor(build_fn = ann_model, verbose = 0) RF_grid = GridSearchCV(RF_clf, RF_params, cv=3,scoring = 'neg_mean_squared_error') RF_grid.fit(X, y) print(RF_grid.best_params_) print("MSE:"+ str(-RF_grid.best_score_)) # + [markdown] id="nyvvnbT4AmXe" # ### `2] Random Search` # + id="ISy1hFRsAmXf" from sklearn.model_selection import RandomizedSearchCV from scipy.stats import randint as sp_randint # + [markdown] id="1jIHAV8XAmXl" # #### `Random Forest` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="lsNcD38LAmXm" outputId="80f1513e-6ffc-425e-f133-2bffa3e216d6" # Random Forest (RF) RF_params = { 'n_estimators': sp_randint(10,100), 'max_depth': sp_randint(5,50), "criterion":['mse','mae'] } RF_clf = RandomForestRegressor(random_state = 0) RF_Random = RandomizedSearchCV(RF_clf, param_distributions = RF_params, n_iter = 20 ,cv = 3,scoring = 'neg_mean_squared_error') RF_Random.fit(X, y) print(RF_Random.best_params_) print("Mean Square Error (RF):"+ str(-RF_Random.best_score_)) # + [markdown] id="aeoZDfEiAmXq" # #### `Support Vector Machine` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="BKAwYL1LAmXr" outputId="665a4b31-e605-4f86-e46c-630c09709a4b" # Support Vector Machine (SVM) SVM_params = { 'C': stats.uniform(0,50), "kernel":['poly','rbf'], "epsilon":stats.uniform(0,1) } SVM_clf = SVR(gamma = 'scale') SVM_Random = RandomizedSearchCV(SVM_clf, param_distributions = SVM_params, n_iter = 20,cv = 3,scoring = 'neg_mean_squared_error') SVM_Random.fit(X, y) print(SVM_Random.best_params_) print("Mean Square Error (SVM) :"+ str(-SVM_Random.best_score_)) # + [markdown] id="CMb8P2maAmXw" # #### `K-Nearest Neighbor` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="sQeUuCtyAmXx" outputId="37e1e6b8-b97e-4cef-8a01-b5a30f65adad" # K-Nearest Neighbor (KNN) KNN_params = { 'n_neighbors': sp_randint(1,20), } KNN_clf = KNeighborsRegressor() KNN_Random = RandomizedSearchCV(KNN_clf, param_distributions = KNN_params, n_iter = 10,cv = 3,scoring = 'neg_mean_squared_error') KNN_Random.fit(X, y) print(KNN_Random.best_params_) print("Mean Square Error (KNN) :"+ str(-KNN_Random.best_score_)) # + [markdown] id="uF_fMtsPAmX0" # #### `Artificial Neural Network` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="Ck57nEMUAmX0" outputId="d142a86c-9fbe-4e99-b797-0d3740ca3472" # Artificial Neural Network (ANN) ANN_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32], 'neurons':sp_randint(10,100), 'epochs':[20,50], 'patience':sp_randint(5,20) } ANN_clf = KerasRegressor(build_fn = ann_model, verbose = 0) ANN_Random = RandomizedSearchCV(ANN_clf, param_distributions = ANN_params, n_iter = 10,cv = 3,scoring = 'neg_mean_squared_error') ANN_Random.fit(X, y) print(ANN_Random.best_params_) print("Mean Square Error (ANN):"+ str(-ANN_Random.best_score_)) # + [markdown] id="QBA34Ri-Gshj" # ### `3] Bayesian Optimization with Gaussian Processes (BO-GP)` # + id="pC_Rp7jTAmX3" from skopt import Optimizer from skopt import BayesSearchCV from skopt.space import Real, Categorical, Integer # + [markdown] id="LySXAx7VJBmb" # #### `Random Forest` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="J_xhnDkLIn9v" outputId="94d088e6-ef7e-4839-e0a1-1c3d7c05005e" # Random Forest (RF) RF_params = { 'n_estimators': Integer(10,100), 'max_depth': Integer(5,50), "criterion":['mse','mae'] } RF_clf = RandomForestRegressor(random_state = 0) RF_Bayes = BayesSearchCV(RF_clf, RF_params,cv = 3,n_iter = 20, scoring = 'neg_mean_squared_error') RF_Bayes.fit(X, y) print(RF_Bayes.best_params_) print("Mean Square Error (RF):"+ str(-RF_Bayes.best_score_)) # + [markdown] id="gJ9-TVAEJy2W" # ### `Support Vector Machine` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="Oa61n1SAJ2N0" outputId="31247e85-e408-4836-9934-91761cc9dbf1" # Support Vector Machine (SVM) SVM_params = { "kernel":['poly','rbf'], 'C': Real(1,50), 'epsilon': Real(0,1) } SVM_clf = SVR(gamma='scale') SVM_Bayes = BayesSearchCV(SVM_clf, SVM_params,cv = 3,n_iter = 20, scoring = 'neg_mean_squared_error') SVM_Bayes.fit(X, y) print(SVM_Bayes.best_params_) print("Mean Square Error (SVM):"+ str(-SVM_Bayes.best_score_)) # + [markdown] id="6rewYf76KmQG" # #### `K-Nearest Neighbor` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="KQCocUNzKlXU" outputId="a1ff1aea-e576-4257-bb92-3d4154d568a6" # K-Nearest Neighbor (KNN) KNN_params = { 'n_neighbors': Integer(1,20), } KNN_clf = KNeighborsRegressor() KNN_Bayes = BayesSearchCV(KNN_clf, KNN_params,cv = 3,n_iter = 10, scoring = 'neg_mean_squared_error') KNN_Bayes.fit(X, y) print(KNN_Bayes.best_params_) print("Mean Square Error (KNN):"+ str(-KNN_Bayes.best_score_)) # + [markdown] id="-Q4IDrnPL2tM" # #### `Artificial Neural Network (ANN)` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="y8brG7bOL3CZ" outputId="b84063f1-9d97-45a8-cb0b-b231359ca1ac" # Artificial Neural Network (ANN) ANN_params = { 'optimizer': ['adam','rmsprop'], 'activation': ['relu','tanh'], 'loss': ['mse','mae'], 'batch_size': [16,32], 'neurons':Integer(10,100), 'epochs':[20,50], 'patience':Integer(5,20) } ANN_clf = KerasRegressor(build_fn = ann_model, verbose = 0) ANN_Bayes = BayesSearchCV(ANN_clf, ANN_params,cv = 3,n_iter = 10, scoring = 'neg_mean_squared_error') ANN_Bayes.fit(X, y) print(ANN_Bayes.best_params_) print("Mean Square Error (ANN):"+ str(-ANN_Bayes.best_score_)) # + [markdown] id="N1l86GDIMk4H" # ### `4] Bayesian Optimization with Tree-structured Parzen Estimator (BO-TPE)` # + id="B3Vcjoo1NIbX" from sklearn.model_selection import StratifiedKFold from hyperopt import hp, fmin, tpe, STATUS_OK, Trials # + [markdown] id="5KQ4_b74NE2c" # #### `Random Forest` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="jqTyukq-Nd0N" outputId="6e72b18e-edcb-44f2-b362-4ec34318d434" # Random Forest (RF) def RF_fun(params): params = { 'n_estimators': int(params['n_estimators']), 'max_depth': int(params['max_depth']), "criterion":str(params['criterion']) } RF_clf = RandomForestRegressor(**params) RF_score = -np.mean(cross_val_score(RF_clf, X, y, cv = 3, n_jobs = -1,scoring = "neg_mean_squared_error")) return {'loss':RF_score, 'status': STATUS_OK } RF_space = { 'n_estimators': hp.quniform('n_estimators', 10, 100, 1), 'max_depth': hp.quniform('max_depth', 5, 50, 1), "criterion":hp.choice('criterion',['mse','mae']) } RF_best = fmin(fn = RF_fun, space = RF_space, algo = tpe.suggest, max_evals = 20) print("Estimated optimum (RF):" +str (RF_best)) # + [markdown] id="owln7VETNYkF" # #### `Support Vector Machine` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="xIEZqHnyMt7H" outputId="54c72df3-c86e-45bc-a5ef-14fa491d39d2" # Support Vector Machine (SVM) def SVM_fun(params): params = { "kernel":str(params['kernel']), 'C': abs(float(params['C'])), 'epsilon': abs(float(params['epsilon'])), } SVM_clf = SVR(gamma='scale', **params) SVM_score = -np.mean(cross_val_score(SVM_clf, X, y, cv = 3, n_jobs = -1, scoring="neg_mean_squared_error")) return {'loss':SVM_score, 'status': STATUS_OK } SVM_space = { "kernel":hp.choice('kernel',['poly','rbf']), 'C': hp.normal('C', 0, 50), 'epsilon': hp.normal('epsilon', 0, 1), } SVM_best = fmin(fn = SVM_fun ,space = SVM_space, algo=tpe.suggest, max_evals = 20) print("Estimated optimum (SVM):" +str(SVM_best)) # + [markdown] id="bkN5jDKgNezb" # #### `K-Nearest Neighbor` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="Nr8oCSEENieY" outputId="a2d748c9-ac54-4343-8935-9c4bc3f65fb3" #K-Nearest Neighbor (KNN) def KNN_fun(params): params = {'n_neighbors': abs(int(params['n_neighbors']))} KNN_clf = KNeighborsRegressor(**params) KNN_score = -np.mean(cross_val_score(KNN_clf, X, y, cv = 3, n_jobs = -1, scoring = "neg_mean_squared_error")) return {'loss':KNN_score, 'status': STATUS_OK } KNN_space = {'n_neighbors': hp.quniform('n_neighbors', 1, 20, 1),} KNN_best = fmin(fn = KNN_fun, space = KNN_space,algo = tpe.suggest, max_evals = 10) print("Estimated optimum (KNN):"+str(KNN_best)) # + [markdown] id="qlxhyxvxNkRO" # #### `Artificial Neural Network` # + colab={"base_uri": "https://localhost:8080/", "height": 52} id="1w0TTrOyNjFD" outputId="ad7b9c23-b37e-4e26-98ed-f3f9460d7325" #Artificial Neural Network (ANN) def ANN_fun(params): params = { "optimizer":str(params['optimizer']), "activation":str(params['activation']), "loss":str(params['loss']), 'batch_size': abs(int(params['batch_size'])), 'neurons': abs(int(params['neurons'])), 'epochs': abs(int(params['epochs'])), 'patience': abs(int(params['patience'])) } ANN_clf = KerasRegressor(build_fn = ann_model,**params, verbose = 0) ANN_score = -np.mean(cross_val_score(ANN_clf, X, y, cv = 3, scoring = "neg_mean_squared_error")) return {'loss':ANN_score, 'status': STATUS_OK } ANN_space = { "optimizer":hp.choice('optimizer',['adam','rmsprop']), "activation":hp.choice('activation',['relu','tanh']), "loss":hp.choice('loss',['mse','mae']), 'batch_size': hp.quniform('batch_size', 16, 32,16), 'neurons': hp.quniform('neurons', 10, 100,10), 'epochs': hp.quniform('epochs', 20, 50,20), 'patience': hp.quniform('patience', 5, 20,5), } ANN_best = fmin(fn = ANN_fun, space = ANN_space, algo = tpe.suggest, max_evals = 10) print("Estimated optimum (ANN): " + str(ANN_best)) # + [markdown] id="Gpvu3XbrTo1f" # ---
(t2) Deep Learning Computations/Shalaka_DL_DLComputations/Shalaka_DL_DLComputations_HPTRegression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np from ctapipe.io import event_source from ctapipe.io import EventSeeker import matplotlib.pyplot as plt import numpy as np from ctapipe.instrument import CameraGeometry from ctapipe.visualization import CameraDisplay # %matplotlib inline plt.rcParams['figure.figsize'] = (16, 9) plt.rcParams['font.size'] = 20 # - def format_axes(ax): ax.set_xlabel("") ax.set_ylabel("") ax.tick_params( axis='x', # changes apply to the x-axis which='both', # both major and minor ticks are affected bottom=False, # ticks along the bottom edge are off top=False, # ticks along the top edge are off labelbottom=False) # labels along the bottom edge are off ax.tick_params( axis='y', # changes apply to the x-axis which='both', # both major and minor ticks are affected left=False, # ticks along the bottom edge are off right=False, # ticks along the top edge are off labelleft=False) # labels along the bottom edge are off return ax source = event_source(input_url="../../data/real_data/LST-1.1.Run01627.0001.fits.fz", max_events=100) subarray = source.subarray tel_id = 1 geom = subarray.tels[tel_id].camera.geometry for i, ev in enumerate(source): n_pixels = geom.n_pixels std_signal = np.zeros(n_pixels) for pixel in range(0, n_pixels): std_signal[pixel] = np.max(ev.r0.tel[tel_id].waveform[0, pixel, 2:38]) if((np.size(std_signal[std_signal>1000.]) < 15) or (np.size(std_signal[std_signal>1000.]) > 1800)): continue print(f"Event {ev.lst.tel[0].evt.event_id}, Max: {np.max(std_signal)} counts") fig, ax = plt.subplots(figsize=(10,10)) disp0 = CameraDisplay(geom, ax=ax) disp0.cmap = 'viridis' disp0.image = std_signal disp0.add_colorbar(ax=ax) # Establish max and min max_color = np.max(std_signal) min_color = np.min(std_signal) disp0.set_limits_minmax(min_color, max_color) ax.set_title(f"Event {ev.lst.tel[tel_id].evt.event_id}") format_axes(ax) # fig.savefig("Images_LST/Event_%i.png"%(ev.lst.tel[0].evt.event_id)) plt.show() # + # If you want to make a movie with all the slices max_color = np.max(std_signal) min_color = np.min(std_signal) # !mkdir -p image_test for cell in range(1,39): print("cell",cell) fig, ax = plt.subplots(figsize=(10,10)) disp0 = CameraDisplay(geom, ax=ax) disp0.cmap = 'viridis' disp0.add_colorbar(ax=ax) disp0.image = ev.r0.tel[tel_id].waveform[0,:,cell] disp0.set_limits_minmax(min_color, max_color) format_axes(ax) ax.set_title(f"Event {ev.lst.tel[tel_id].evt.event_id}, Time {cell} ns") fig.savefig("image_test/Event_{:02d}_cell{:02d}.png".format(ev.lst.tel[tel_id].evt.event_id,cell)) #plt.show() # - # make a gif movie # !convert image_test/'Event_{ev.lst.tel[tel_id].evt.event_id}*png' image_test/'Event_{ev.lst.tel[tel_id].evt.event_id}.gif'
notebooks/lst/real_data/explore_LST_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # language: python # name: python3 # --- import json import csv f=json.loads(open("data.json").read()) fields_names=f[0].keys() fields_name=list(fields_names) fields_name.sort() fields_name fields_names=['institute_name', 'program_name', 'tests_accepted', 'test_cutoff', 'city_name', 'program_duration', 'country_name', 'program_duration_text', 'final_placements_domestic_salary_avg', 'specialization_name', 'state_name', 'tution_fees_local_country_student'] with open('data.csv', 'w') as csvfile: writer = csv.DictWriter(csvfile, fieldnames = fields_names) writer.writeheader() writer.writerows(f) tests=[i['tests_accepted'] for i in f] tests=" ".join(tests) tests=tests.split() sets=set(tests) sets=[i for i in sets if i.isalpha()] sets
Helper/mba.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt # + # # Original HMM (uses hmmlearn) # from hmmlearn import hmm # S = hmm.GaussianHMM(3, covariance_type='full') # S.startprob_ = np.array([0.1, 0.6, 0.3]) # S.transmat_ = np.array([[0.8, 0.1, 0.1], [0.1, 0.8, 0.1], [0.1, 0.1, 0.8]]) # S.means_ = np.array([[0, 5],[-1, -1],[-1, 3]]) # S.covars_ = np.array([ # [[0.1, -0.5], # [-0.5, 3]], # [[0.6, 0.7], # [0.7, 1]], # [[1, 0], # [0, 1]] # ]) # obs, states = S.sample(300) # np.savetxt('sample.txt', obs) # - # copies from above OG_means = np.array([[0, 5],[-1, -1],[-1, 3]]) OG_covars = np.array([ [[0.1, -0.5], [-0.5, 3]], [[0.6, 0.7], [0.7, 1]], [[1, 0], [0, 1]] ]) data = np.loadtxt('sample.txt') # + from matplotlib import rcParams rcParams['figure.figsize'] = (12, 6) rcParams['figure.facecolor'] = 'w' rcParams['legend.edgecolor'] = 'k' plt.plot(data[:, 0], data[:, 1], 'x') plt.show() # - from edhsmm.hsmm_base import GaussianHSMM R = GaussianHSMM(n_states = 3, n_durations = 20, rnd_state = 42) R.fit(data, censoring = 1) # fast for hsmm_core_x # + # comparison print("Original Means:\n", OG_means, "\n") print("Original Covariance Matrices:\n", OG_covars, "\n") print("Learned Means:\n", R.mean, "\n") print("Learned Covariance Matrices:\n", R.covmat) # -
notebooks/EDHSMM (Multivariate Normal).ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Gaussian Mixture Model # # Gaussian mixture model (GMM) is a probabilistic model created by averging multiple gaussian density functions. # It is not uncommon to think of these models as a clustering technique because when the a model is fitted, it can be used to backtrack which individual density each samples is created from. # However, in `chaospy`, which first and foremost deals with forward problems, sees GMM as a very flexible class of distributions. # # On the most basic level constructing GMM in `chaospy` can be done from a sequence of means and covariances: # + import chaospy means = ([0, 1], [1, 1], [1, 0]) covariances = ([[1.0, -0.9], [-0.9, 1.0]], [[1.0, 0.9], [ 0.9, 1.0]], [[0.1, 0.0], [ 0.0, 0.1]]) distribution = chaospy.GaussianMixture(means, covariances) distribution # + import numpy from matplotlib import pyplot pyplot.rc("figure", figsize=[15, 6], dpi=75) xloc, yloc = numpy.mgrid[-2:3:100j, -1:3:100j] density = distribution.pdf([xloc, yloc]) pyplot.contourf(xloc, yloc, density) pyplot.show() # - # ### Fitting Model from Data # # `chaospy` supports Gaussian mixture model representation, but does not provide an automatic method for constructing them from data. # However, this is something for example `scikit-learn` supports. # It is possible to use `scikit-learn` to fit a model, and use the generated parameters in the `chaospy` implementation. # For example, let us consider the [Iris example from scikit-learn's documentation](https://scikit-learn.org/stable/auto_examples/mixture/plot_gmm_covariances.html) ("full" implementation in 2-dimensional representation): # + # NBVAL_CHECK_OUTPUT from sklearn import datasets, mixture model = mixture.GaussianMixture(3, random_state=1234) model.fit(datasets.load_iris().data) means = model.means_[:, :2] covariances = model.covariances_[:, :2, :2] print(means.round(4)) print(covariances.round(4)) # + distribution = chaospy.GaussianMixture(means, covariances) xloc, yloc = numpy.mgrid[4:8:100j, 1.5:4.5:100j] density = distribution.pdf([xloc, yloc]) pyplot.contourf(xloc, yloc, density) pyplot.show() # - # Like `scikit-learn`, `chaospy` also support higher dimensions, but that would make the visuallization harder. # ### Low discrepency Sequences # # `chaospy` support low-discrepency sequences through inverse mapping. # This support extends to mixture models, making the following possible: # + pseudo_samples = distribution.sample(500, rule="additive_recursion") pyplot.scatter(*pseudo_samples) pyplot.show() # - # ### Constructing Polynomial Chaos Expansion # # To be able to do point collocation method it requires the user to have access to sampler from the input distribution and orthogonal polynomials with respect to the input distribution. # The former is available above, while the latter is available as follows: # + # NBVAL_CHECK_OUTPUT expansion = chaospy.generate_expansion(1, distribution, rule="cholesky") expansion.round(4)
docs/tutorials/gaussian_mixture_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ML # language: python # name: ml # --- # # Sentiment with Transformers # # The HuggingFace Transformers library is presently the most advanced and accessible library for building and using transformer models. As such, it will be what we primarily use throughout these notebooks. # # To apply sentiment analysis using the transformers library, we first need to decide on a model to use - as we will be applying a pretrained model, rather than starting from scratch. The list of models available can be found at [huggingface.co/models](https://www.huggingface.co/models). # # ![Filter for Text Classification on HuggingFace models page](assets/hf_models_text_classification_filter.jpg) # # From the model page we select the **Text Classification** filter on the left of the page to filter for models that we can apply for text classification tasks immediately. We will be performing sentiment analysis on posts from */r/investing* (in this section we will be using the example given in `txt` below), which are finance oriented, so we can use the [finBERT](https://arxiv.org/abs/1908.10063) model [`ProsusAI/finbert`](https://huggingface.co/ProsusAI/finbert) which has been trained on financial articles for financial sentiment classification. # # FinBERT is ofcourse a BERT model, so when loading the model and tokenizer we will using BERT classes, and because we are performing *sequence classification* we will be using `BertForSequenceClassification`. Let's initialize our model and tokenizer: # + from transformers import BertForSequenceClassification, BertTokenizer # initialize the tokenizer for BERT models tokenizer = BertTokenizer.from_pretrained('ProsusAI/finbert') # initialize the model for sequence classification model = BertForSequenceClassification.from_pretrained('ProsusAI/finbert') # - # The first time that this is run when using the `ProsusAI/finbert` model, it will be downloaded from the HuggingFace model repositories. We will be following a very similar process to that which we worked through for our Flair sentiment classifier, with some added steps to convert model output activations to class predictions. # # 1. We tokenize our input text. # # 2. Tokenized inputs are fed into the model, which outputs final layer **activations** (note *activations* are not *probabilities*). # # 3. Convert those activations into probabilities using a softmax function (sigmoid for multiple classes). # # 4. Take the **argmax** of those probabilities. # # 5. *(Optional) Extract the probability of the winning class.* # # For step one, we will use the `encode_plus` method: # + # this is our example text txt = ("Given the recent downturn in stocks especially in tech which is likely to persist as yields keep going up, " "I thought it would be prudent to share the risks of investing in ARK ETFs, written up very nicely by " "[The Bear Cave](https://thebearcave.substack.com/p/special-edition-will-ark-invest-blow). The risks comes " "primarily from ARK's illiquid and very large holdings in small cap companies. ARK is forced to sell its " "holdings whenever its liquid ETF gets hit with outflows as is especially the case in market downturns. " "This could force very painful liquidations at unfavorable prices and the ensuing crash goes into a " "positive feedback loop leading into a death spiral enticing even more outflows and predatory shorts.") tokens = tokenizer.encode_plus(txt, max_length=512, truncation=True, padding='max_length', add_special_tokens=True, return_tensors='pt') tokens # - # Here we have specified a few arguments that require some explanation. # # * `max_length` - this tell the tokenizer the maximum number of tokens we want to see in each sample, for BERT we almost always use `512` as that is the length of sequences that BERT consumes. # # * `truncation` - if our input string `txt` contains more tokens than allowed (specified in `max_length` parameter) then we cut all tokens past the `max_length` limit. # # * `padding` - if our input string `txt` contains less tokens than specified by `max_length` then we pad the sequence with zeros (`0` is the token ID for *'[PAD]'* - BERTs padding token). # # * `add_special_tokens` - whether or not to add special tokens, when using BERT we always want this to be `True` unless we are adding them ourselves. # # | Token | ID | Description | # | --- | --- | --- | # | [PAD] | 0 | Used to fill empty space when input sequence is shorter than required sequence size for model | # | [UNK] | 100 | If a word/character is not found in BERTs vocabulary it will be represented by this *unknown* token | # | [CLS] | 101 | Represents the start of a sequence | # | [SEP] | 102 | Seperator token to denote the end of a sequence and as a seperator where there are multiple sequences | # | [MASK] | 103 | Token used for masking other tokens, used for masked language modeling | # # *Note that our tokenized sequence begins with `101`, the seperator token `102` can be found seperating the input sequence and padding tokens `0`.* # # * `return_tensors` - here we specify either `'pt'` to return PyTorch tensors, or `'tf'` to return TensorFlow tensors. # # The output produced includes **three** tensors in a dictionary format, `'input_ids'`, `'token_type_ids'`, and `'attention_mask'`. We can ignore `'token_type_ids'` as they are not used by BERT, the other two tensors are however. # # * `'input_ids'` are the token ID representations of our input text. These will be passed into an embedding array where vector representations of each word will be found and passed into the following BERT layers. # # * `'attention_mask'` tells the attention layers in BERT which words to calculate attention for. If you look at this tensors you will see that each `1` value maps to an input ID from the `'input_ids'` tensor, whereas each `0` value maps to a *padding token* from the `'attention_mask'` tensor. In the attention layer (activations mapping to padding tokens are multiplied by 0, and so are cancelled out). # # Now that we have our tokenized input, we can pass it onto our `model` for inference. We pass in our `tokens` as *\*\*kwargs* (key word arguments), which we can use thanks to our tokens being in a dictionary format. When a dictionary is passed as a \*\*kwargs argument, the keys will be taken literally as variable names and the respective values become the variable values. So these two approaches would do the same thing: # # ``` # # without **kwargs # random_func(var1='hello', var2='world') # # # with **kwargs # input_dict = {'var1': 'hello', 'var2': 'world'} # random_func(**input_dict) # ``` # # Let's see how that looks for making predictions with our model. # + output = model(**tokens) output # - # You will notice here that the output logits tensor **cannot** be a set of probabilities because probability values must be within the range 0-1. These are infact the final output activations from BERT, to convert these into probabilities we must apply a **softmax** function. We will be using the PyTorch implementation of softmax for this, which we import from `torch.nn.functional`. # + import torch.nn.functional as F # apply softmax to the logits output tensor of our model (in index 0) across dimension -1 probs = F.softmax(output[0], dim=-1) probs # - # *(We use `dim=-1` as **-1** signifies our tensors final dimension, so if we had a 3D tensor with dims `[0, 1, 2]` writing `dim=-1` is the equivalent to writing `dim=2`. In this case if we wrote `dim=-2` this would be the equivalent to writing `dim=1`. For a 2D tensor with dims `[0, 1]`, `dim=-1` is the equivalent of `dim=1`.)* # # Now we have a tensor containing three classes, all with outputs within the probability range of 0-1, these are our probabilities! We can see that class index **1** has the highest probability with a value of **0.9072**. We can use PyTorch's argmax function to extract this, we can use `argmax` after importing `torch`. # + import torch pred = torch.argmax(probs) pred # - # Argmax outputs our winning class as **1** as expected. To convert this value from a PyTorch tensor to a Python integer we can use the `item` method. pred.item()
course/language_classification/02_sentiment_with_transformers.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # CCPi Iterative Reconstruction Algorithms # # This notebook contains basic demo how to use CCPi reconstruction algorithms. # There are three main iterative reconstructions available in this package. they are # * Conjugate Gradient Least Squares (CGLS)* Maximum Likelihood Estimation Method (MLEM)* Simultaneous Iterative Reconstructive Technique (SIRT) # In addition to the above methods there are three more varients of CGLS available in this package. they are # * CGLS with Convolution* CGLS with Tikhonov regularization* CGLS with Total Variation Regularisation (TV) from ccpi.reconstruction.parallelbeam import alg import numpy import h5py # Download demo data from github using urllib.request. import urllib.request urllib.request.urlretrieve("https://github.com/DiamondLightSource/Savu/blob/master/test_data/data/24737_fd.nxs?raw=true", "24737_fd.nxs") # https://github.com/DiamondLightSource/Savu/blob/master/test_data/data/24737_fd.nxs?raw=true #import wget #wget.download("https://github.com/DiamondLightSource/Savu/blob/master/test_data/data/24737_fd.nxs?raw=true", "24737_fd.nxs") # After imports one should load the dataset. The pre-processing, i.e. load the nexus (hdf5) file, extracting the angles and image data, scaling to 0-1 scalar range are done within the load_data function. def load_data(filename): '''Load a dataset stored in a NeXuS file (HDF5)''' print ("Loading Data") nx = h5py.File(filename, "r") data = nx.get('entry1/tomo_entry/data/rotation_angle') angles = numpy.zeros(data.shape) data.read_direct(angles) print (angles) data = nx.get('entry1/tomo_entry/data/data') stack = numpy.zeros(data.shape) data.read_direct(stack) print (data.shape) print ("Data Loaded") # Normalize data = nx.get('entry1/tomo_entry/instrument/detector/image_key') itype = numpy.zeros(data.shape) data.read_direct(itype) # 2 is dark field darks = [stack[i] for i in range(len(itype)) if itype[i] == 2 ] dark = darks[0] for i in range(1, len(darks)): dark += darks[i] dark = dark / len(darks) # 1 is flat field flats = [stack[i] for i in range(len(itype)) if itype[i] == 1 ] flat = flats[0] for i in range(1, len(flats)): flat += flats[i] flat = flat / len(flats) # 0 is projection data proj = [stack[i] for i in range(len(itype)) if itype[i] == 0 ] angle_proj = [angles[i] for i in range(len(itype)) if itype[i] == 0 ] angle_proj = numpy.asarray (angle_proj) angle_proj = angle_proj.astype(numpy.float32) def normalize(projection, dark, flat, def_val=0.1): a = (projection - dark) b = (flat-dark) with numpy.errstate(divide='ignore', invalid='ignore'): c = numpy.true_divide( a, b ) c[ ~ numpy.isfinite( c )] = def_val # set to not zero if 0/0 return c norm = [normalize(projection, dark, flat) for projection in proj] norm = numpy.asarray (norm) norm = norm.astype(numpy.float32) return norm, angle_proj filename = "24737_fd.nxs" ## TODO load_data is not defined - where it came from??? norm, angle_proj = load_data(filename) # Data can now be passed to the reconstruction algorithms. # + ## Data can now be passed to the reconstruction algorithms: ## CGLS, MLEM, SIRT, CGLS_CONV, CGLS_TIKHONOV, CGLS_TVregularization # center of rotation center_of_rotation = numpy.double(86.2) # resolution resolution = 1 # number of iterations niterations = 15 # number of threads threads = 3 #data are in log scale? isPixelDataInLogScale = False # CGLS img_cgls = alg.cgls(norm, angle_proj, center_of_rotation , resolution , niterations, threads, isPixelDataInLogScale) # MLEM img_mlem = alg.mlem(norm, angle_proj, center_of_rotation , resolution , niterations, threads, isPixelDataInLogScale) # SIRT img_sirt = alg.sirt(norm, angle_proj, center_of_rotation , resolution , niterations, threads, isPixelDataInLogScale) # CGLS CONV iteration_values1 = numpy.zeros((niterations,)) img_cgls_conv = alg.cgls_conv(norm, angle_proj, center_of_rotation , resolution , niterations , threads, iteration_values1 , isPixelDataInLogScale) #Regularization parameter regularization = numpy.double(1e-3) # CGLS TIKHONOV iteration_values2 = numpy.zeros((niterations,)) img_cgls_tikhonov = alg.cgls_tikhonov(norm, angle_proj, center_of_rotation , resolution , niterations, threads, regularization, iteration_values2 , isPixelDataInLogScale) # CGLS Total Variation Regularization iteration_values3 = numpy.zeros((niterations,)) img_cgls_TVreg = alg.cgls_TVreg(norm, angle_proj, center_of_rotation , resolution , niterations, threads, regularization, iteration_values3, isPixelDataInLogScale) # - # One may want to compare the results of the reconstruction algorithms: # + # %matplotlib notebook import matplotlib.pyplot as plt fs = 10 fig, ax = plt.subplots(1,6,sharey=True) ax[0].imshow(img_cgls[80]) ax[0].axis('off') # clear x- and y-axes ax[0].set_title("CGLS" , fontsize = fs) ax[1].imshow(img_sirt[80]) ax[1].axis('off') # clear x- and y-axes ax[1].set_title("SIRT" , fontsize = fs) ax[2].imshow(img_mlem[80]) ax[2].axis('off') # clear x- and y-axesplt.show() ax[2].set_title("MLEM" , fontsize = fs) ax[3].imshow(img_cgls_conv[80]) ax[3].axis('off') # clear x- and y-axesplt.show() ax[3].set_title("CGLS CONV" , fontsize = fs) ax[4].imshow(img_cgls_tikhonov[80]) ax[4].axis('off') # clear x- and y-axesplt.show() ax[4].set_title("Tikhonov" , fontsize = fs) ax[5].imshow(img_cgls_TVreg[80]) ax[5].axis('off') # clear x- and y-axesplt.show() ax[5].set_title("TV Reg" , fontsize = fs) plt.show() # -
notebooks/demo-1-reconstruction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # #MakeoverMonday - Music Industry Sales # > This visualisation examines how digital changed the music industry. # # - toc: false # - badges: true # - comments: true # - categories: [makeovermonday, altair, python, visualisation] # - image: images/music_industry.png # ## 40 Years of Music Industry Sales # # The record industry has seen a lot of change over the years. # # 8-tracks took a short-lived run at the dominance of vinyl, cassettes faded away as compact discs took the world by storm, and through it all, the music industry saw its revenue continue to climb. That is, until it was digitally disrupted. # # Looking back at four decades of U.S. music industry sales data is a fascinating exercise as it charts not only the rise and fall the record company profits, but seismic shifts in technology and consumer behavior as well. # # Sources: https://www.visualcapitalist.com/music-industry-sales/, https://www.riaa.com/u-s-sales-database/<br> # hide import altair as alt import pandas as pd from altair_saver import save # hide df = pd.read_csv("MusicData.csv", parse_dates=["Year"], thousands=',') df # hide # Convert Value column to float so it can be used in Altair as quantitative measure df.astype({"Value": "float"}) df["Value"].dtype # hide # Check for leading or trailing whitespace in columns df.columns # hide # Get an overview of the format which have been used df.groupby(['Format']).sum() # + # hide # Create a new column which assigns groups to each format. These groups will be displayed in the stream chart def groups(row): if row["Format"] == "8 - Track": val = "8 - Track" elif ( row["Format"] == "Cassette" or row["Format"] == "Cassette Single" or row["Format"] == "Other Tapes" ): val = "Tape" elif ( row["Format"] == "CD" or row["Format"] == "CD Single" or row["Format"] == "DVD Audio" ): val = "CD" elif ( row["Format"] == "Download Album" or row["Format"] == "Download Music Video" or row["Format"] == "Download Single" or row["Format"] == "Kiosk" ): val = "Download" elif ( row["Format"] == "Music Video (Physical)" or row["Format"] == "Other Digital" or row["Format"] == "Music Video (Physical)" or row["Format"] == "Ringtones & Ringbacks" or row["Format"] == "SACD" or row["Format"] == "SoundExchange Distributions" or row["Format"] == "Synchronization" ): val = "Other" elif ( row["Format"] == "Limited Tier Paid Subscription" or row["Format"] == "On-Demand Streaming (Ad-Supported)" or row["Format"] == "Other Ad-Supported Streaming" or row["Format"] == "Paid Subscription" ): val = "Stream" else: val = "Vinyl" return val # - # hide df["Format group"] = df.apply(groups, axis=1) df # hide # Check if all rows have been assigned groups df.groupby(['Format group']).sum() # + selection = alt.selection_multi(fields=["Format group"], bind="legend") chart = alt.Chart(df).mark_area().configure_axis(grid=False).properties( title="40 Years of Music Industry Sales" ).encode( alt.X("yearmonth(Year):T", title="", axis=alt.Axis(format="%Y", domain=False, tickSize=0)), alt.Y("sum(Value):Q", stack="center", axis=None), alt.Color( "Format group:N", scale=alt.Scale( domain=[ "8 - Track", "CD", "Vinyl", "Tape", "Other", "Download", "Stream", ], range=["#423e58", "#d3587b", "#6a3589", "#9b478b", "#ffae78", "#f78972", "#fbcd98"], ), ), tooltip=["Format group", "sum(Value):Q", "year(Year):T"], opacity=alt.condition(selection, alt.value(1), alt.value(0.2)), ).add_selection(selection) chart # - # hide chart.save("music_industry.html")
_notebooks/2020-05-25-Music-Industry-Sales.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: venv-datascience # language: python # name: venv-datascience # --- # # Intro to MultiIndex Module # - Multiple layers, Multiple levels, Multiple tiers import pandas as pd # Interestingly, prices of Bigmac kinda represents country's economy indicator. bigmac = pd.read_csv('Data/bigmac.csv') bigmac.head() bigmac.info() bigmac.isnull().sum() # ## Optimization Points # + Date are in object datatype. If necessary, we need to change to Datetime. # + Prices are in object (String), so we need to change them to float to allow us make calculations. bigmac = pd.read_csv('Data/bigmac.csv', parse_dates=['Date']) bigmac.head() # ------ # # 2) Create a MultiIndex with the `set_index` Method bigmac = pd.read_csv('Data/bigmac.csv', parse_dates=['Date']) bigmac.head(3) # ### We can have multiIndex with 2 layers, 3 layers, many more, etc bigmac.set_index(keys=['Date', 'Country']) bigmac.set_index(keys=['Country', 'Date']) # ### TIPS: Column with less values should be the most Outer Layer # In our case => Date bigmac.nunique() bigmac = bigmac.set_index(keys=['Date', 'Country']) bigmac.head(3) # ## `sort_index()` after multi indexing bigmac = bigmac.sort_index() # sort by date, then by country bigmac.head(3) # ### Index and Names # + bigmac.index bigmac.index.names # - type(bigmac.index) # ### calling `.index` returns tuple for multi Index bigmac.index bigmac.index[0] date, name = bigmac.index[0] print('date: ', date) print('Name: ', name) # ------ # # 3) Extract Index Level Values with the `get_level_values` Method bigmac = pd.read_csv('Data/bigmac.csv', parse_dates=['Date'], index_col=['Date', 'Country']) bigmac = bigmac.sort_index() bigmac.head(3) bigmac.index.get_level_values('Date') bigmac.index.get_level_values(0) bigmac.index.get_level_values(1) bigmac.index.get_level_values('Country') # ------ # # 4) Change Index Level Name with the `set_names` Method bigmac = pd.read_csv('Data/bigmac.csv', parse_dates=['Date'], index_col=['Date', 'Country']) bigmac = bigmac.sort_index() bigmac.head() bigmac.index # ### Change the names of Our Index # - need to be in the same order when passing `names = []` parameter bigmac.index.set_names(names = ['Day', 'Location']) bigmac.index.set_names(names = ['Day', 'Location'], inplace=True) bigmac.head(3) # ## If we want to change only one index of multiindex, we can pass using `level` parameter # + `level = level_number` # + or `level = 'current index name'` # only changing from Day to Date bigmac.index.set_names(names='Date', level=0) bigmac.index.set_names(names='Date', level='Day') # only changing from Location to Country bigmac.index.set_names(names='Country', level=1) bigmac.index.set_names(names='Country', level='Location') bigmac.index.set_names(names=['Date', 'Country'], inplace=True) bigmac.head(3) # ------- # # 5) The `sort_index` Method on a `MultiIndex DataFrame` bigmac = pd.read_csv('Data/bigmac.csv', parse_dates=['Date'], index_col=['Date', 'Country']) bigmac.head(3) # + bigmac.sort_index() # sort_index works in the sequence of index (Date => Country) bigmac.sort_index(ascending=True) bigmac.sort_index(ascending=False) # all levels are sorting descending (Date => Country) # - # ## What if we want to customize Sorting order for index? # + we can pass boolean as a list # + bigmac.sort_index(ascending=[True, False]) # ascending for Date, descending for Country # - bigmac.sort_index(ascending=[False, True]) # + bigmac.sort_index() bigmac.sort_index(ascending=True) bigmac.sort_index(ascending=False) bigmac.sort_index(ascending=[True, False]) bigmac.sort_index(ascending=[False, True], inplace=True) # - bigmac.head(3) # ## Sorting by Single Level using `level` parameter bigmac.sort_index(level=0) bigmac.sort_index(level='Date') # when we sort on level of 1 or Country level, Date values are brought along (not necessarily sorted though) bigmac.sort_index(level=1, ascending=False) bigmac.sort_index(level='Country', ascending=False) # ----- # # 6) Extract Rows from a `MultiIndex DataFrame` using `.loc` and `.iloc` Accessors bigmac = pd.read_csv('Data/bigmac.csv', index_col=['Date', 'Country'], parse_dates=['Date']) bigmac = bigmac.sort_index() bigmac.head(3) # The following syntax is a bit ambiguous because 2nd value can be either # + Index name # + or Column name. # This will lead to confusion. That's why we shouldn't use like that. # + bigmac.loc['2010-01-01'] bigmac.loc['2010-01-01', 'Brazil'] bigmac.loc['2010-01-01', 'Price in US Dollars'] # - # ## Correct Approach (Panda Convention) for `.loc` Accessor for MultiIndex # + **1st Argument as tuple for ROW Index** (it can include index or multi indexes) # + **2nd Arugment as tuple for COLUMN names** # + **NOTE: if there is one value to pass in 1st Argument, make sure to use comma , to indicate that it is tuple.** # # This is consistent and less ambiguous. # + bigmac.loc[('2010-01-01', 'Brazil')] bigmac.loc[('2010-01-01', 'Brazil'), 'Price in US Dollars'] bigmac.loc[('2010-01-01', 'Brazil'), ['Price in US Dollars', 'Price in US Dollars']] # NOTE: if there is one value to pass in 1st Argument, make sure to use comma , to indicate that it is tuple. bigmac.loc[('2010-01-01', )] # - bigmac.head(2) # ## using `.iloc[]` for multiindex bigmac.iloc[0] bigmac.iloc[10] bigmac.iloc[[10, 20, 30]] bigmac.iloc[[10, 20, 30, 250]]| # ------- # # 7) The `transpose` Method # - swap the indexes of the row labels and column labels bigmac = pd.read_csv('Data/bigmac.csv', index_col=['Date', 'Country'], parse_dates=['Date']) bigmac = bigmac.sort_index() bigmac.head(3) bigmac = bigmac.transpose() bigmac.head(3) # ## using the format of Best Practice tuple() for both row index and column labels # # NOTE: Always remember: # + **1st Argument as tuple for ROW Index** (it can include index or multi indexes) # + **2nd Arugment as tuple for COLUMN names** (it can include index or multi indexes) # + bigmac.loc[('Price in US Dollars',)] bigmac.loc[('Price in US Dollars', ), ('2010-01-01', )] bigmac.loc[('Price in US Dollars', ), ('2010-01-01', 'Canada')] bigmac.loc[('Price in US Dollars', ), ('2010-01-01', 'Canada'): ('2010-01-01', 'Taiwan')] # to extract values, we need to tell pandas both multi indexes (Date and Country) # - # ---- # # 8) The `swaplevel()` Method bigmac = pd.read_csv('Data/bigmac.csv', index_col=['Date', 'Country'], parse_dates=['Date']) bigmac = bigmac.sort_index() bigmac.head(3) # In our case, there are only two level of indexes. So pandas know to swap them accordingly. # # **If we have more than two level of indexes, we need to tell pandas which one (`i` )to swap with which (`j`).** # + bigmac.swaplevel() bigmac.swaplevel('Date', 'Country') bigmac.swaplevel('Country', 'Date') bigmac.swaplevel(i=1, j=0) bigmac.swaplevel(1,0) # - bigmac = bigmac.swaplevel(1,0) bigmac.head() # ------ # # 9) The `.stack()` Method # **it moves Column based indexes to Row based indexes.** world = pd.read_csv('Data/worldstats.csv', index_col=['country', 'year']) world.head(3) world.info() # **After stacking, Population and GDP column are moved to Row based indexes.** world.stack() type(world.stack()) # ### We can convert series to Dataframe using `.to_frame()` world.stack().to_frame() # ------- # # 10) The `.unstack()` Method - Part 1 # **move Row Based Indexes to Column Based Indexes** world = pd.read_csv('Data/worldstats.csv', index_col=['country', 'year']) world.head(3) s = world.stack() s.head(3) s.unstack() s.unstack().unstack() s.unstack().unstack().unstack().to_frame() # ---- # # 11) The `.unstack()` Method - Part 2 world = pd.read_csv('Data/worldstats.csv', index_col=['country', 'year']) world.head(3) s = world.stack() s.head(3) s.to_frame().head(3) # ## We can unstack on different level based on requirements using `level` parameter # + s.unstack(level=0) s.unstack(0) # country s.unstack(1) # year s.unstack(2) # Popultion and GDP # we can also use negative indexes (moving backwards) s.unstack(-1) #Population and GDP s.unstack(-2) # Year s.unstack(-3) # country # - # ## We can unstack on different level based on requirements using `level` parameter using Name of actual index s.unstack('year') s.unstack('country') # ------ # # 12) The `.unstack()` Method - Part 3 world = pd.read_csv('Data/worldstats.csv', index_col=['country', 'year']) s = world.stack() s.head(3) s.to_frame().head(3) # ## Unstack using list of parameters s.unstack(level=[1, 0]) s.unstack(level=['year', 'country']) # we can see year became 1st level and country became 2nd level. Pandas follow order to unstack based on what we provided in the list s.unstack(level=[0, 1]) s.unstack(level=['country', 'year']) # ### If we look at the following data, we can see for some country(such as Albania) there are no data for 1960, 1961,etc. # This is because for those year there is no data(GDP and Population) for that country. So when we unstack year, those empty values became NaN. s.unstack('year') # ## We can use `fill_value` to fill in those empty values. s = s.unstack('year', fill_value=0) s.head(3) # ------- # # 13) The `.pivot()` Method # + **re-orient the data set** # + basically take values currently in column to column headers sales = pd.read_csv('Data/salesmen.csv', parse_dates=['Date']) sales['Salesman'] = sales['Salesman'].astype('category') sales.head(3) len(sales) sales['Salesman'].value_counts() # As for our case, we have only 5 Salesman. Instead of storing data in current format which gonna take a lot of memory usage, we can re-orient the dataset. # ### we can condense the data as follow # + we can see original dataframe of 1830 is reduced down to 366. len(sales.pivot(index='Date', columns='Salesman', values='Revenue')) sales.pivot(index='Date', columns='Salesman', values='Revenue') # -------- # # 14) The `.pivot_table()` Method foods = pd.read_csv('Data/foods.csv') foods.head(3) # ### What are the average spending and Total Spending per Gender? foods.pivot_table(index='Gender', values='Spend', aggfunc='mean') foods.pivot_table(index='Gender', values='Spend', aggfunc='sum') # ### What about other sales? foods.pivot_table(index='Item', values='Spend', aggfunc='sum') # ### We can also make multi Index foods.pivot_table(index=['Gender', 'Item'], values='Spend', aggfunc='sum') foods.pivot_table(index=['Gender', 'Item'], columns=['City'], values='Spend', aggfunc='sum') foods.pivot_table(index=['Gender', 'Item'], columns=['Frequency', 'City'], values='Spend', aggfunc='sum') # ## Other avaliable `aggfunc` foods.pivot_table(index=['Gender', 'Item'], columns='City', values='Spend', aggfunc='count') foods.pivot_table(index=['Gender', 'Item'], columns='City', values='Spend', aggfunc='max') foods.pivot_table(index=['Gender', 'Item'], columns='City', values='Spend', aggfunc='min') # ## Directly performing with pandas, insteading of directly calling on DataFrame pd.pivot_table(data=foods, index=['Gender', 'Item'], columns='City', values='Spend', aggfunc='count') # ----- # # 15) The `pd.melt()` Method # **take the aggregated data set into Tabular format.** # # + `id_vars`: columns which are going to be preserved. sales = pd.read_csv('Data/quarters.csv') sales pd.melt(sales, id_vars='Salesman', var_name='Quarter', value_name='Revenue') # we want to preserved Salesman # --------
Pandas - Data Analysis with Pandas and Python - BP/07_MultiIndex.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## Summary # ### Dec 8, 2016 # # Looks like there are some different parameterizations people have done to different continuous variables ([see this post](https://www.kaggle.com/mariusbo/allstate-claims-severity/xgb-lb-1106-33084/discussion)). Try running their xgboost model, and averaging with some of my other models (NN). # + import numpy as np import pandas as pd import xgboost as xgb from datetime import datetime from sklearn.metrics import mean_absolute_error from sklearn.cross_validation import KFold from scipy.stats import skew, boxcox from sklearn import preprocessing from sklearn.preprocessing import StandardScaler import itertools # + shift = 200 COMB_FEATURE = 'cat80,cat87,cat57,cat12,cat79,cat10,cat7,cat89,cat2,cat72,' \ 'cat81,cat11,cat1,cat13,cat9,cat3,cat16,cat90,cat23,cat36,' \ 'cat73,cat103,cat40,cat28,cat111,cat6,cat76,cat50,cat5,' \ 'cat4,cat14,cat38,cat24,cat82,cat25'.split(',') def encode(charcode): r = 0 ln = len(str(charcode)) for i in range(ln): r += (ord(str(charcode)[i]) - ord('A') + 1) * 26 ** (ln - i - 1) return r fair_constant = 0.7 def fair_obj(preds, dtrain): labels = dtrain.get_label() x = (preds - labels) den = abs(x) + fair_constant grad = fair_constant * x / (den) hess = fair_constant * fair_constant / (den * den) return grad, hess def xg_eval_mae(yhat, dtrain): y = dtrain.get_label() return 'mae', mean_absolute_error(np.exp(y)-shift, np.exp(yhat)-shift) def mungeskewed(train, test, numeric_feats): ntrain = train.shape[0] test['loss'] = 0 train_test = pd.concat((train, test)).reset_index(drop=True) skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna())) skewed_feats = skewed_feats[skewed_feats > 0.25] skewed_feats = skewed_feats.index for feats in skewed_feats: train_test[feats] = train_test[feats] + 1 train_test[feats], lam = boxcox(train_test[feats]) return train_test, ntrain # + import os import matplotlib.pyplot as plt # %matplotlib inline loc = '/Users/elena/Documents/Kaggle/Allstate/data/' train = pd.read_csv(os.path.join(loc,'train.csv')) #train.drop('id',axis=1,inplace=True) test = pd.read_csv(os.path.join(loc,'test.csv')) ids = test['id'] #test.drop('id',axis=1,inplace=True) submission = pd.read_csv(os.path.join(loc,"sample_submission.csv")) # - numeric_feats = [x for x in train.columns[1:-1] if 'cont' in x] categorical_feats = [x for x in train.columns[1:-1] if 'cat' in x] train_test, ntrain = mungeskewed(train, test, numeric_feats) # taken from Vladimir's script (https://www.kaggle.com/iglovikov/allstate-claims-severity/xgb-1114) for column in list(train.select_dtypes(include=['object']).columns): if train[column].nunique() != test[column].nunique(): set_train = set(train[column].unique()) set_test = set(test[column].unique()) remove_train = set_train - set_test remove_test = set_test - set_train remove = remove_train.union(remove_test) def filter_cat(x): if x in remove: return np.nan return x train_test[column] = train_test[column].apply(lambda x: filter_cat(x), 1) # + # taken from Ali's script (https://www.kaggle.com/aliajouz/allstate-claims-severity/singel-model-lb-1117) train_test["cont1"] = np.sqrt(preprocessing.minmax_scale(train_test["cont1"])) train_test["cont4"] = np.sqrt(preprocessing.minmax_scale(train_test["cont4"])) train_test["cont5"] = np.sqrt(preprocessing.minmax_scale(train_test["cont5"])) train_test["cont8"] = np.sqrt(preprocessing.minmax_scale(train_test["cont8"])) train_test["cont10"] = np.sqrt(preprocessing.minmax_scale(train_test["cont10"])) train_test["cont11"] = np.sqrt(preprocessing.minmax_scale(train_test["cont11"])) train_test["cont12"] = np.sqrt(preprocessing.minmax_scale(train_test["cont12"])) train_test["cont6"] = np.log(preprocessing.minmax_scale(train_test["cont6"]) + 0000.1) train_test["cont7"] = np.log(preprocessing.minmax_scale(train_test["cont7"]) + 0000.1) train_test["cont9"] = np.log(preprocessing.minmax_scale(train_test["cont9"]) + 0000.1) train_test["cont13"] = np.log(preprocessing.minmax_scale(train_test["cont13"]) + 0000.1) train_test["cont14"] = (np.maximum(train_test["cont14"] - 0.179722, 0) / 0.665122) ** 0.25 # + for comb in itertools.combinations(COMB_FEATURE, 2): feat = comb[0] + "_" + comb[1] train_test[feat] = train_test[comb[0]] + train_test[comb[1]] train_test[feat] = train_test[feat].apply(encode) print('Combining Columns:', feat) for col in categorical_feats: print('Analyzing Column:', col) train_test[col] = train_test[col].apply(encode) print(train_test[categorical_feats]) # - test.columns # + ss = StandardScaler() train_test[numeric_feats] = \ ss.fit_transform(train_test[numeric_feats].values) train = train_test.iloc[:ntrain, :].copy() test = train_test.iloc[ntrain:, :].copy() print('\nMedian Loss:', train.loss.median()) print('Mean Loss:', train.loss.mean()) ids = pd.read_csv(os.path.join(loc,'test.csv'))['id'] train_y = np.log(train['loss'] + shift) train_x = train.drop(['loss','id'], axis=1) test_x = test.drop(['loss','id'], axis=1) n_folds = 10 cv_sum = 0 early_stopping = 100 fpred = [] xgb_rounds = [] d_train_full = xgb.DMatrix(train_x, label=train_y) d_test = xgb.DMatrix(test_x) kf = KFold(train.shape[0], n_folds=n_folds) for i, (train_index, test_index) in enumerate(kf): print('\n Fold %d' % (i+1)) X_train, X_val = train_x.iloc[train_index], train_x.iloc[test_index] y_train, y_val = train_y.iloc[train_index], train_y.iloc[test_index] rand_state = 2016 params = { 'seed': 0, 'colsample_bytree': 0.7, 'silent': 1, 'subsample': 0.7, 'learning_rate': 0.03, 'objective': 'reg:linear', 'max_depth': 12, 'min_child_weight': 100, 'booster': 'gbtree'} d_train = xgb.DMatrix(X_train, label=y_train) d_valid = xgb.DMatrix(X_val, label=y_val) watchlist = [(d_train, 'train'), (d_valid, 'eval')] clf = xgb.train(params, d_train, 100000, watchlist, early_stopping_rounds=50, obj=fair_obj, feval=xg_eval_mae) xgb_rounds.append(clf.best_iteration) scores_val = clf.predict(d_valid, ntree_limit=clf.best_ntree_limit) cv_score = mean_absolute_error(np.exp(y_val), np.exp(scores_val)) print('eval-MAE: %.6f' % cv_score) y_pred = np.exp(clf.predict(d_test, ntree_limit=clf.best_ntree_limit)) - shift if i > 0: fpred = pred + y_pred else: fpred = y_pred pred = fpred cv_sum = cv_sum + cv_score mpred = pred / n_folds score = cv_sum / n_folds print('Average eval-MAE: %.6f' % score) n_rounds = int(np.mean(xgb_rounds)) # + result = pd.DataFrame(mpred, columns=['loss']) result["id"] = ids result.to_csv(os.path.join(loc,'xgboost2.csv'),index=False) result.head() # - # ## Compare to NN and first XGB # + orig_xgb = pd.read_csv(os.path.join(loc,'xgboost1.csv')) orig_nn = pd.read_csv(os.path.join(loc,'submission_keras.csv')) orig_loss = pd.read_csv(os.path.join(loc,'train.csv')) import seaborn as sns sns.set_context('poster') sns.distplot(np.log1p(orig_loss['loss']),hist=False,label='Original') sns.distplot(np.log1p(orig_xgb['loss']),hist=False,label='XGBoost1') sns.distplot(np.log1p(orig_nn['loss']),hist=False,label='NN') sns.distplot(np.log1p(result['loss']),hist=False,label='XGBoost2') plt.legend() # -
notebooks/20161208_KEW_new-xgboost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Parameter drift # # Purpose # If the matematical model is not correct or too little data is available this may lead to paramter drift, so that the parameters in the matematical model changes depending on how the fitted data has been sampled. # # Methodology # * Sample data of forces from a higher order model # * Fit a lower order model to a random sample of this data # # Setup # + # # %load imports.py ## Local packages: # %matplotlib inline # %load_ext autoreload # %autoreload 2 # %config Completer.use_jedi = False ## (To fix autocomplete) ## External packages: import pandas as pd pd.options.display.max_rows = 999 pd.options.display.max_columns = 999 pd.set_option("display.max_columns", None) import numpy as np np.set_printoptions(linewidth=150) import numpy as np import os import matplotlib.pyplot as plt #if os.name == 'nt': # plt.style.use('presentation.mplstyle') # Windows import plotly.express as px import plotly.graph_objects as go import seaborn as sns import sympy as sp from sympy.physics.mechanics import (dynamicsymbols, ReferenceFrame, Particle, Point) from sympy.physics.vector.printing import vpprint, vlatex from IPython.display import display, Math, Latex from src.substitute_dynamic_symbols import run, lambdify import pyro import sklearn import pykalman from statsmodels.sandbox.regression.predstd import wls_prediction_std import statsmodels.api as sm from scipy.integrate import solve_ivp ## Local packages: from src.data import mdl from src.symbols import * from src.parameters import * import src.symbols as symbols from src import prime_system from src.models.regression import ForceRegression, results_summary_to_dataframe from src.models.diff_eq_to_matrix import DiffEqToMatrix from src.visualization.regression import show_pred, show_pred_captive from src.visualization.plot import track_plot,captive_plot ## Load models: # (Uncomment these for faster loading): import src.models.vmm_abkowitz as vmm import src.models.vmm_martin as vmm_simpler from src.models.vmm import ModelSimulator from src.data.wpcc import ship_parameters, df_parameters, ps, ship_parameters_prime, ps_ship, scale_factor # - #format the book import src.visualization.book_format as book_format book_format.set_style() # ## Load VCT data df_VCT_all = pd.read_csv('../data/external/vct.csv', index_col=0) df_VCT = df_VCT_all.groupby(by=['model_name']).get_group('V2_5_MDL_modelScale') # # Subtract the resistance # + df_resistance = df_VCT.groupby(by='test type').get_group('resistance') X = df_resistance[['u','fx']].copy() X['u**2'] = X['u']**2 y = X.pop('fx') model_resistance = sm.OLS(y,X) results_resistance = model_resistance.fit() X_pred = pd.DataFrame() X_pred['u'] = np.linspace(X['u'].min(), X['u'].max(), 20) X_pred['u**2'] = X_pred['u']**2 X_pred['fx'] = results_resistance.predict(X_pred) fig,ax=plt.subplots() df_resistance.plot(x='u', y='fx', style='.', ax=ax) X_pred.plot(x='u', y='fx', style='--', ax=ax); # - df_VCT_0_resistance = df_VCT.copy() df_VCT_0_resistance['u**2'] = df_VCT_0_resistance['u']**2 df_VCT_0_resistance['fx']-= results_resistance.predict(df_VCT_0_resistance[['u','u**2']]) # ## VCT to prime system interesting = [ 'u', 'v', 'r', 'delta', 'fx', 'fy', 'mz', 'thrust', ] #df_VCT_prime = ps_ship.prime(df_VCT[interesting], U=df_VCT['V']) df_VCT_prime = ps_ship.prime(df_VCT_0_resistance[interesting], U=df_VCT_0_resistance['V']) # ## Fit a lower order model to this captive dataset # ### Regression df_captive = df_VCT_prime.copy() df_captive['test type'] = df_VCT['test type'] reg = ForceRegression(vmm=vmm, data=df_captive) display(reg.show_pred_X()) display(reg.show_pred_Y()) display(reg.show_pred_N()) model_vct = reg.create_model(df_parameters=df_parameters, ship_parameters=ship_parameters, ps=ps, control_keys=['delta']) # + outputs = model_vct.forces(inputs = df_VCT_prime) df_captive_all = pd.merge(left=df_captive, right=outputs, how='left', left_index=True, right_index=True, suffixes = ('','_model'), ) captive_plot(df_captive=df_captive_all, suffixes=['_model'], legends = ['VCT', 'model'], styles=['.', '-']) # + df_captive = df_VCT_prime.copy() df_captive['test type'] = df_VCT['test type'] N = len(df_captive) N_sample = N - 5 df_captive_sample = df_captive.sample(n=N_sample, random_state=42) # - N # + model_names = [f'{i}' for i in range(20)] df_captive_all = df_captive.copy() np.random.seed(42) models = {} for model_name in model_names: df_captive_sample = df_captive.sample(n=N_sample) reg = ForceRegression(vmm=vmm, data=df_captive_sample) model_reg = reg.create_model(df_parameters=df_parameters, ship_parameters=ship_parameters, ps=ps) models[model_name] = model_vct = reg.create_model(df_parameters=df_parameters, ship_parameters=ship_parameters, ps=ps) outputs = model_reg.forces(inputs = df_captive) df_captive_all = pd.merge(left=df_captive_all, right=outputs, how='left', left_index=True, right_index=True, suffixes = ('',f'_{model_name}'), ) # + suffixes = [f'_{model_name}' for model_name in model_names] styles = ['r.'] + ['b-' for model_name in model_names] legends = ['VCT'] + model_names captive_plot(df_captive=df_captive_all, suffixes=suffixes, legends = legends, styles=styles, alpha=0.2, lw=2, add_legend=False) # + df_results = pd.DataFrame() result = model_vct.zigzag(u0=2, angle=30) for model_name, model in models.items(): result_ = model.simulate(result.result) df_ = result_.result df_['t'] = df_.index df_['model_name'] = model_name df_results = df_results.append(df_, ignore_index=True) # + from src.visualization.plot import track_plot fig,ax=plt.subplots() fig.set_size_inches(10,10) for model_name, df_ in df_results.groupby(by='model_name'): df_.plot(x='x0', y='y0', ax=ax, alpha=0.2, lw=3, style='b-') result.result.plot(x='x0', y='y0', style='k-', zorder=10, ax=ax) ax.set_xlabel('x0 [m]') ax.set_ylabel('y0 [m]') ax.set_aspect("equal") ax.set_title("Track plot") ax.get_legend().set_visible(False) ax.grid(True) handles, labels = ax.get_legend_handles_labels() ax.legend(handles[-2:],['simulations','model test']) # + fig,ax=plt.subplots() fig.set_size_inches(14,3) df_results['psi_deg'] = np.rad2deg(df_results['psi']) df_results_ = result.result.copy() df_results_['-delta_deg'] =-np.rad2deg(df_results_['delta']) df_results_['psi_deg'] = np.rad2deg(df_results_['psi']) for model_name, df_ in df_results.groupby(by='model_name'): df_.plot(x='t', y='psi_deg', ax=ax, alpha=0.2, lw=3, style='b-') df_results_.plot(y='psi_deg', ax=ax, style='k-', zorder=10) df_results_.plot(y='-delta_deg', ax=ax, style='m-', zorder=10) ax.set_xlabel('time [s]') ax.set_ylabel('Heading $\psi$ [deg]') ax.set_title("ZigZag30/30") handles, labels = ax.get_legend_handles_labels() ax.legend(handles[-3:],['alternative models','model','rudder angle']) ax.grid(True) ax.set_ylim(-60,60) # - model_vct.parameters
notebooks/21.02_parameter_drift.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="vYmMCnfg1PN8" # # Preface # # The locations requiring configuration for your experiment are commented in capital text. # + [markdown] id="kgYWNPhf801A" # # Setup # + [markdown] id="-7DmzUo2vZZ_" # ## Installations # + id="wKMPt_L5bNeu" # !pip install sphinxcontrib-napoleon # !pip install sphinxcontrib-bibtex # !pip install -i https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ submodlib # !git clone https://github.com/decile-team/distil.git # !git clone https://github.com/circulosmeos/gdown.pl.git import sys sys.path.append("/content/distil/") # + [markdown] id="ZYsutkIJrGvK" # **Experiment-Specific Imports** # + id="lfQKdd0DrKsa" from distil.utils.models.mnist_net import MnistNet # IMPORT YOUR MODEL HERE # + [markdown] id="Maz6VJxS787x" # ## Main Imports # + id="V9-8qRo8KD3a" import pandas as pd import numpy as np import copy from torch.utils.data import Dataset, DataLoader, Subset, ConcatDataset import torch.nn.functional as F from torch import nn from torchvision import transforms from torchvision import datasets from PIL import Image import torch import torch.optim as optim from torch.autograd import Variable import sys sys.path.append('../') import matplotlib.pyplot as plt import time import math import random import os import pickle from numpy.linalg import cond from numpy.linalg import inv from numpy.linalg import norm from scipy import sparse as sp from scipy.linalg import lstsq from scipy.linalg import solve from scipy.optimize import nnls from distil.active_learning_strategies.badge import BADGE from distil.active_learning_strategies.glister import GLISTER from distil.active_learning_strategies.margin_sampling import MarginSampling from distil.active_learning_strategies.entropy_sampling import EntropySampling from distil.active_learning_strategies.random_sampling import RandomSampling from distil.active_learning_strategies.gradmatch_active import GradMatchActive from distil.active_learning_strategies.fass import FASS from distil.active_learning_strategies.adversarial_bim import AdversarialBIM from distil.active_learning_strategies.adversarial_deepfool import AdversarialDeepFool from distil.active_learning_strategies.core_set import CoreSet from distil.active_learning_strategies.least_confidence_sampling import LeastConfidenceSampling from distil.active_learning_strategies.margin_sampling import MarginSampling from distil.active_learning_strategies.bayesian_active_learning_disagreement_dropout import BALDDropout from distil.utils.train_helper import data_train from distil.utils.utils import LabeledToUnlabeledDataset from google.colab import drive import warnings warnings.filterwarnings("ignore") # + [markdown] id="ecvumggc6UhF" # ## Checkpointing and Logs # + id="ohuLHm5E58bj" class Checkpoint: def __init__(self, acc_list=None, indices=None, state_dict=None, experiment_name=None, path=None): # If a path is supplied, load a checkpoint from there. if path is not None: if experiment_name is not None: self.load_checkpoint(path, experiment_name) else: raise ValueError("Checkpoint contains None value for experiment_name") return if acc_list is None: raise ValueError("Checkpoint contains None value for acc_list") if indices is None: raise ValueError("Checkpoint contains None value for indices") if state_dict is None: raise ValueError("Checkpoint contains None value for state_dict") if experiment_name is None: raise ValueError("Checkpoint contains None value for experiment_name") self.acc_list = acc_list self.indices = indices self.state_dict = state_dict self.experiment_name = experiment_name def __eq__(self, other): # Check if the accuracy lists are equal acc_lists_equal = self.acc_list == other.acc_list # Check if the indices are equal indices_equal = self.indices == other.indices # Check if the experiment names are equal experiment_names_equal = self.experiment_name == other.experiment_name return acc_lists_equal and indices_equal and experiment_names_equal def save_checkpoint(self, path): # Get current time to use in file timestamp timestamp = time.time_ns() # Create the path supplied os.makedirs(path, exist_ok=True) # Name saved files using timestamp to add recency information save_path = os.path.join(path, F"c{timestamp}1") copy_save_path = os.path.join(path, F"c{timestamp}2") # Write this checkpoint to the first save location with open(save_path, 'wb') as save_file: pickle.dump(self, save_file) # Write this checkpoint to the second save location with open(copy_save_path, 'wb') as copy_save_file: pickle.dump(self, copy_save_file) def load_checkpoint(self, path, experiment_name): # Obtain a list of all files present at the path timestamp_save_no = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))] # If there are no such files, set values to None and return if len(timestamp_save_no) == 0: self.acc_list = None self.indices = None self.state_dict = None return # Sort the list of strings to get the most recent timestamp_save_no.sort(reverse=True) # Read in two files at a time, checking if they are equal to one another. # If they are equal, then it means that the save operation finished correctly. # If they are not, then it means that the save operation failed (could not be # done atomically). Repeat this action until no possible pair can exist. while len(timestamp_save_no) > 1: # Pop a most recent checkpoint copy first_file = timestamp_save_no.pop(0) # Keep popping until two copies with equal timestamps are present while True: second_file = timestamp_save_no.pop(0) # Timestamps match if the removal of the "1" or "2" results in equal numbers if (second_file[:-1]) == (first_file[:-1]): break else: first_file = second_file # If there are no more checkpoints to examine, set to None and return if len(timestamp_save_no) == 0: self.acc_list = None self.indices = None self.state_dict = None return # Form the paths to the files load_path = os.path.join(path, first_file) copy_load_path = os.path.join(path, second_file) # Load the two checkpoints with open(load_path, 'rb') as load_file: checkpoint = pickle.load(load_file) with open(copy_load_path, 'rb') as copy_load_file: checkpoint_copy = pickle.load(copy_load_file) # Do not check this experiment if it is not the one we need to restore if checkpoint.experiment_name != experiment_name: continue # Check if they are equal if checkpoint == checkpoint_copy: # This checkpoint will suffice. Populate this checkpoint's fields # with the selected checkpoint's fields. self.acc_list = checkpoint.acc_list self.indices = checkpoint.indices self.state_dict = checkpoint.state_dict return # Instantiate None values in acc_list, indices, and model self.acc_list = None self.indices = None self.state_dict = None def get_saved_values(self): return (self.acc_list, self.indices, self.state_dict) def delete_checkpoints(checkpoint_directory, experiment_name): # Iteratively go through each checkpoint, deleting those whose experiment name matches. timestamp_save_no = [f for f in os.listdir(checkpoint_directory) if os.path.isfile(os.path.join(checkpoint_directory, f))] for file in timestamp_save_no: delete_file = False # Get file location file_path = os.path.join(checkpoint_directory, file) if not os.path.exists(file_path): continue # Unpickle the checkpoint and see if its experiment name matches with open(file_path, "rb") as load_file: checkpoint_copy = pickle.load(load_file) if checkpoint_copy.experiment_name == experiment_name: delete_file = True # Delete this file only if the experiment name matched if delete_file: os.remove(file_path) #Logs def write_logs(logs, save_directory, rd): file_path = save_directory + 'run_'+'.txt' with open(file_path, 'a') as f: f.write('---------------------\n') f.write('Round '+str(rd)+'\n') f.write('---------------------\n') for key, val in logs.items(): if key == 'Training': f.write(str(key)+ '\n') for epoch in val: f.write(str(epoch)+'\n') else: f.write(str(key) + ' - '+ str(val) +'\n') # + [markdown] id="Bd-8vetN6cP-" # ## AL Loop # + id="jWdgKV2M6PFu" def train_one(full_train_dataset, initial_train_indices, test_dataset, net, n_rounds, budget, args, nclasses, strategy, save_directory, checkpoint_directory, experiment_name): # Split the full training dataset into an initial training dataset and an unlabeled dataset train_dataset = Subset(full_train_dataset, initial_train_indices) initial_unlabeled_indices = list(set(range(len(full_train_dataset))) - set(initial_train_indices)) unlabeled_dataset = Subset(full_train_dataset, initial_unlabeled_indices) # Set up the AL strategy if strategy == "random": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = RandomSampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "entropy": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = EntropySampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "margin": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = MarginSampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "least_confidence": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = LeastConfidenceSampling(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "badge": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = BADGE(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "coreset": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = CoreSet(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "fass": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = FASS(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "glister": strategy_args = {'batch_size' : args['batch_size'], 'lr': args['lr'], 'device':args['device']} strategy = GLISTER(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args, typeOf='rand', lam=0.1) elif strategy == "adversarial_bim": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = AdversarialBIM(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "adversarial_deepfool": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = AdversarialDeepFool(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) elif strategy == "bald": strategy_args = {'batch_size' : args['batch_size'], 'device':args['device']} strategy = BALDDropout(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset), net, nclasses, strategy_args) # Define acc initially acc = np.zeros(n_rounds+1) initial_unlabeled_size = len(unlabeled_dataset) initial_round = 1 # Define an index map index_map = np.array([x for x in range(initial_unlabeled_size)]) # Attempt to load a checkpoint. If one exists, then the experiment crashed. training_checkpoint = Checkpoint(experiment_name=experiment_name, path=checkpoint_directory) rec_acc, rec_indices, rec_state_dict = training_checkpoint.get_saved_values() # Check if there are values to recover if rec_acc is not None: # Restore the accuracy list for i in range(len(rec_acc)): acc[i] = rec_acc[i] # Restore the indices list and shift those unlabeled points to the labeled set. index_map = np.delete(index_map, rec_indices) # Record initial size of the training dataset intial_seed_size = len(train_dataset) restored_unlabeled_points = Subset(unlabeled_dataset, rec_indices) train_dataset = ConcatDataset([train_dataset, restored_unlabeled_points]) remaining_unlabeled_indices = list(set(range(len(unlabeled_dataset))) - set(rec_indices)) unlabeled_dataset = Subset(unlabeled_dataset, remaining_unlabeled_indices) # Restore the model net.load_state_dict(rec_state_dict) # Fix the initial round initial_round = (len(train_dataset) - initial_seed_size) // budget + 1 # Ensure loaded model is moved to GPU if torch.cuda.is_available(): net = net.cuda() strategy.update_model(net) strategy.update_data(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset)) dt = data_train(train_dataset, net, args) else: if torch.cuda.is_available(): net = net.cuda() dt = data_train(train_dataset, net, args) acc[0] = dt.get_acc_on_set(test_dataset) print('Initial Testing accuracy:', round(acc[0]*100, 2), flush=True) logs = {} logs['Training Points'] = len(train_dataset) logs['Test Accuracy'] = str(round(acc[0]*100, 2)) write_logs(logs, save_directory, 0) #Updating the trained model in strategy class strategy.update_model(net) # Record the training transform and test transform for disabling purposes train_transform = full_train_dataset.transform test_transform = test_dataset.transform ##User Controlled Loop for rd in range(initial_round, n_rounds+1): print('-------------------------------------------------') print('Round', rd) print('-------------------------------------------------') sel_time = time.time() full_train_dataset.transform = test_transform # Disable any augmentation while selecting points idx = strategy.select(budget) full_train_dataset.transform = train_transform # Re-enable any augmentation done during training sel_time = time.time() - sel_time print("Selection Time:", sel_time) selected_unlabeled_points = Subset(unlabeled_dataset, idx) train_dataset = ConcatDataset([train_dataset, selected_unlabeled_points]) remaining_unlabeled_indices = list(set(range(len(unlabeled_dataset))) - set(idx)) unlabeled_dataset = Subset(unlabeled_dataset, remaining_unlabeled_indices) # Update the index map index_map = np.delete(index_map, idx, axis = 0) print('Number of training points -', len(train_dataset)) # Start training strategy.update_data(train_dataset, LabeledToUnlabeledDataset(unlabeled_dataset)) dt.update_data(train_dataset) t1 = time.time() clf, train_logs = dt.train(None) t2 = time.time() acc[rd] = dt.get_acc_on_set(test_dataset) logs = {} logs['Training Points'] = len(train_dataset) logs['Test Accuracy'] = str(round(acc[rd]*100, 2)) logs['Selection Time'] = str(sel_time) logs['Trainining Time'] = str(t2 - t1) logs['Training'] = train_logs write_logs(logs, save_directory, rd) strategy.update_model(clf) print('Testing accuracy:', round(acc[rd]*100, 2), flush=True) # Create a checkpoint used_indices = np.array([x for x in range(initial_unlabeled_size)]) used_indices = np.delete(used_indices, index_map).tolist() round_checkpoint = Checkpoint(acc.tolist(), used_indices, clf.state_dict(), experiment_name=experiment_name) round_checkpoint.save_checkpoint(checkpoint_directory) print('Training Completed') return acc # + [markdown] id="-rFh9y0M3ZVH" # # MNIST # + [markdown] id="E-e_sDnGsC_N" # ## Parameter Definitions # # Parameters related to the specific experiment are placed here. You should examine each and modify them as needed. # + id="0cHXLa_YsIQG" data_set_name = "MNIST" # DSET NAME HERE dataset_root_path = '../downloaded_data/' net = MnistNet() # MODEL HERE # MODIFY AS NECESSARY logs_directory = '/content/gdrive/MyDrive/colab_storage/logs/' checkpoint_directory = '/content/gdrive/MyDrive/colab_storage/check/' model_directory = "/content/gdrive/MyDrive/colab_storage/model/" experiment_name = "MNIST BASELINE" initial_seed_size = 300 # INIT SEED SIZE HERE training_size_cap = 15300 # TRAIN SIZE CAP HERE budget = 1000 # BUDGET HERE # CHANGE ARGS AS NECESSARY args = {'n_epoch':300, 'lr':float(0.01), 'batch_size':20, 'max_accuracy':float(0.99), 'islogs':True, 'isreset':True, 'isverbose':True, 'device':'cuda'} # Train on approximately the full dataset given the budget contraints n_rounds = (training_size_cap - initial_seed_size) // budget # + [markdown] id="O0WfH3eq3nv_" # ## Initial Loading and Training # # You may choose to train a new initial model or to continue to load a specific model. If this notebook is being executed in Colab, you should consider whether or not you need the gdown line. # + id="K1522SUk3nwF" # Mount drive containing possible saved model and define file path. colab_model_storage_mount = "/content/gdrive" drive.mount(colab_model_storage_mount) # Retrieve the model from a download link and save it to the drive os.makedirs(logs_directory, exist_ok = True) os.makedirs(checkpoint_directory, exist_ok = True) os.makedirs(model_directory, exist_ok = True) model_directory = F"{model_directory}/{data_set_name}" # #!/content/gdown.pl/gdown.pl "INSERT SHARABLE LINK HERE" "INSERT DOWNLOAD LOCATION HERE (ideally, same as model_directory)" # MAY NOT NEED THIS LINE IF NOT CLONING MODEL FROM COLAB # Load the dataset if data_set_name == "CIFAR10": train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))]) full_train_dataset = datasets.CIFAR10(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor) test_dataset = datasets.CIFAR10(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor) nclasses = 10 # NUM CLASSES HERE elif data_set_name == "CIFAR100": train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]) test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))]) full_train_dataset = datasets.CIFAR100(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor) test_dataset = datasets.CIFAR100(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor) nclasses = 100 # NUM CLASSES HERE elif data_set_name == "MNIST": image_dim=28 train_transform = transforms.Compose([transforms.RandomCrop(image_dim, padding=4), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) test_transform = transforms.Compose([transforms.Resize((image_dim, image_dim)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) full_train_dataset = datasets.MNIST(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor) test_dataset = datasets.MNIST(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor) nclasses = 10 # NUM CLASSES HERE elif data_set_name == "FashionMNIST": train_transform = transforms.Compose([transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) # Use mean/std of MNIST full_train_dataset = datasets.FashionMNIST(dataset_root_path, download=True, train=True, transform=train_transform, target_transform=torch.tensor) test_dataset = datasets.FashionMNIST(dataset_root_path, download=True, train=False, transform=test_transform, target_transform=torch.tensor) nclasses = 10 # NUM CLASSES HERE elif data_set_name == "SVHN": train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std full_train_dataset = datasets.SVHN(dataset_root_path, split='train', download=True, transform=train_transform, target_transform=torch.tensor) test_dataset = datasets.SVHN(dataset_root_path, split='test', download=True, transform=test_transform, target_transform=torch.tensor) nclasses = 10 # NUM CLASSES HERE elif data_set_name == "ImageNet": train_transform = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) test_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) # ImageNet mean/std # Note: Not automatically downloaded due to size restrictions. Notebook needs to be adapted to run on local device. full_train_dataset = datasets.ImageNet(dataset_root_path, download=False, split='train', transform=train_transform, target_transform=torch.tensor) test_dataset = datasets.ImageNet(dataset_root_path, download=False, split='val', transform=test_transform, target_transform=torch.tensor) nclasses = 1000 # NUM CLASSES HERE args['nclasses'] = nclasses dim = full_train_dataset[0][0].shape # Seed the random number generator for reproducibility and create the initial seed set np.random.seed(42) initial_train_indices = np.random.choice(len(full_train_dataset), replace=False, size=initial_seed_size) # COMMENT OUT ONE OR THE OTHER IF YOU WANT TO TRAIN A NEW INITIAL MODEL load_model = False #load_model = True # Only train a new model if one does not exist. if load_model: net.load_state_dict(torch.load(model_directory)) initial_model = net else: dt = data_train(Subset(full_train_dataset, initial_train_indices), net, args) initial_model, _ = dt.train(None) torch.save(initial_model.state_dict(), model_directory) print("Training for", n_rounds, "rounds with budget", budget, "on unlabeled set size", training_size_cap) # + [markdown] id="B9N-4eTMPrZZ" # ## Random Sampling # + id="i4eKSOaiPruO" strategy = "random" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="bg1XH87hPsCe" # ## Entropy # + id="mRAKMe2RPsTp" strategy = "entropy" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="NkMYHHwyP5Bd" # ## GLISTER # + id="D0KstkZWP5fT" strategy = "glister" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="AQmHTnnOP9KU" # ## FASS # + id="tJSuDWowP9fD" strategy = "fass" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="6ZSiRahu3nwK" # ## BADGE # + id="b5c8AckN3nwK" strategy = "badge" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="RQeyff_gWp2E" # ## CoreSet # + id="T4osHfoHWp2F" strategy = "coreset" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="TwlszjoBWqMW" # ## Least Confidence # + id="myi_zJ2UWqMW" strategy = "least_confidence" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}") # + [markdown] id="rv-NjQBFWqWW" # ## Margin # + id="IE0NVJW5WqWW" strategy = "margin" strat_logs = logs_directory+F'{data_set_name}/{strategy}/' os.makedirs(strat_logs, exist_ok = True) train_one(full_train_dataset, initial_train_indices, test_dataset, copy.deepcopy(initial_model), n_rounds, budget, args, nclasses, strategy, strat_logs, checkpoint_directory, F"{experiment_name}_{strategy}")
benchmark_notebooks/baseline/AL Baseline MNIST v2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 最小二乘法 # + import numpy as np import matplotlib.pyplot as plt from scipy.optimize import leastsq Xi = np.array( [157, 162, 169, 176, 188, 200, 211, 220, 230, 237, 247, 256, 268, 287, 285, 290, 301, 311, 326, 335, 337, 345, 348, 358, 384, 396, 409, 415, 432, 440, 448, 449, 461, 467, 478, 493], dtype=np.float) Yi = np.array( [143, 146, 153, 160, 169, 180, 190, 196, 207, 215, 220, 228, 242, 253, 251, 257, 271, 283, 295, 302, 301, 305, 308, 324, 341, 357, 371, 382, 397, 406, 413, 411, 422, 434, 447, 458], dtype=np.float) def func(p, x): k, b = p return k * x + b def error(p, x, y): return func(p, x) - y # k,b的初始值,可以任意设定,经过几次试验,发现p0的值会影响cost的值:Para[1] p0 = [1, 20] # 把error函数中除了p0以外的参数打包到args中(使用要求) Para = leastsq(error, p0, args=(Xi, Yi)) # 读取结果 k, b = Para[0] # 画样本点 plt.figure(figsize=(8, 6)) ##指定图像比例: 8:6 plt.scatter(Xi, Yi, color="green", linewidth=2) # 画拟合直线 # x = np.linspace(0, 12, 100) ##在0-15直接画100个连续点 # x = np.linspace(0, 500, int(500/12)*100) ##在0-15直接画100个连续点 # y = k * x + b ##函数式 plt.plot(Xi, k * Xi + b, color="red", linewidth=2) plt.legend(loc='lower right') # 绘制图例 plt.show() # - # ## 梯度下降法 import numpy as np import matplotlib.pyplot as plt x = np.array( [157, 162, 169, 176, 188, 200, 211, 220, 230, 237, 247, 256, 268, 287, 285, 290, 301, 311, 326, 335, 337, 345, 348, 358, 384, 396, 409, 415, 432, 440, 448, 449, 461, 467, 478, 493], dtype=np.float) y = np.array( [143, 146, 153, 160, 169, 180, 190, 196, 207, 215, 220, 228, 242, 253, 251, 257, 271, 283, 295, 302, 301, 305, 308, 324, 341, 357, 371, 382, 397, 406, 413, 411, 422, 434, 447, 458], dtype=np.float) def GD(x, y, learning_rate, iteration_num=10000): theta = np.random.rand(2, 1) # 初始化参数 x = np.hstack((np.ones((len(x), 1)), x.reshape(len(x), 1))) y = y.reshape(len(y), 1) for i in range(iteration_num): # 计算梯度 grad = np.dot(x.T, (np.dot(x, theta) - y)) / x.shape[0] # 更新参数 theta -= learning_rate * grad # 计算 MSE # loss = np.linalg.norm(np.dot(x, theta) - y) plt.figure() plt.title('Learning rate: {}, iteration_num: {}'.format(learning_rate, iteration_num)) plt.scatter(x[:, 1], y.reshape(len(y))) plt.plot(x[:, 1], np.dot(x, theta), color='red', linewidth=3) GD(x, y, learning_rate=0.00001, iteration_num=1) GD(x, y, learning_rate=0.00001, iteration_num=3) GD(x, y, learning_rate=0.00001, iteration_num=10) GD(x, y, learning_rate=0.00001, iteration_num=100) GD(x, y, learning_rate=0.000001, iteration_num=1) GD(x, y, learning_rate=0.000001, iteration_num=3) GD(x, y, learning_rate=0.000001, iteration_num=10) GD(x, y, learning_rate=0.000001, iteration_num=100)
MLDL_homework/HW3/51194506093.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd df = pd.read_pickle('../data/external_features/train_external.pickle') tdf = pd.read_pickle('../data/external_features/test_external.pickle') alldf = df.append(tdf) alldf.shape limit = int(0.8*alldf.shape[0]) df_train = alldf.head(limit) df_test = alldf.tail(len(alldf) - limit) df_train.shape, df_test.shape df_train.to_pickle('../data/external_features/train_external.pickle') df_test.to_pickle('../data/external_features/test_external.pickle')
notebooks/9_AB_test_external_feas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.7 ('MSCS-basic') # language: python # name: python3 # --- import pandas as pd import numpy as np from sklearn.decomposition import PCA import matplotlib.pyplot as plt df = pd.read_csv('forSVD.csv', index_col=0) df.describe() pd.plotting.scatter_matrix(df) pca = PCA() pca.fit(df) for s in pca.singular_values_: print(s/pca.singular_values_[0]) for ev in pca.explained_variance_: print(ev*100) pca = PCA(n_components=3) pca.fit(df) for s in pca.singular_values_: print(s/pca.singular_values_[0]) for ev in pca.explained_variance_: print(ev*100) g = pd.DataFrame(pca.transform(df), columns=[f'pc{i}' for i in range(pca.n_components)]) g.shape g pd.plotting.scatter_matrix(g) fig = plt.figure() ax = fig.add_subplot(projection='3d') ax.scatter(g['pc0'], g['pc1'], g['pc2']) ax.view_init(270, 0)
Class/22-04-15/22-04-15.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] graffitiCellId="id_oerv9ac" # # Walkthrough: Integrated GPU (IGPU) and the DevCloud # # This notebook is a demonstration showing you how to request an edge node with an Intel i5 CPU and load a model on the integrated GPU using Udacity's workspace integration with Intel's DevCloud. This notebook is just to give you an overview of the process (you won't be writing any code). In the next workspace, you'll be given TODO items to complete. # # Below are the six steps we'll walk through in this notebook: # # 1. Creating a Python script to load the model # 2. Creating a job submission script # 3. Submitting a job using the `qsub` command # 4. Checking the job status using the `liveQStat` function # 5. Retrieving the output files using the `getResults` function # 6. Viewing the resulting output # # Click the **Introduction to IGPU and the DevCloud** button below for a quick overview of the overall process. We'll then walk through each step of the process. # + [markdown] graffitiCellId="id_5niicuw" # <span class="graffiti-highlight graffiti-id_5niicuw-id_efbgzv7"><i></i><button>Introduction to IGPU and the DevCloud</button></span> # + [markdown] graffitiCellId="id_tgl5iqg" # #### IMPORTANT: Set up paths so we can run Dev Cloud utilities # You *must* run this every time you enter a Workspace session. # + graffitiCellId="id_50nijg9" # %env PATH=/opt/conda/bin:/opt/spark-2.4.3-bin-hadoop2.7/bin:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/intel_devcloud_support import os import sys sys.path.insert(0, os.path.abspath('/opt/intel_devcloud_support')) sys.path.insert(0, os.path.abspath('/opt/intel')) # + [markdown] graffitiCellId="id_j3phxmn" # ## The Model # # We will be using the `vehicle-license-plate-detection-barrier-0106` model for this exercise. Remember that to run a model on the IGPU, we need to use `FP16` as the model precision. # # The model has already been downloaded for you in the `/data/models/intel` directory on Intel's DevCloud. We will be using the following filepath during the job submission in **Step 3**: # # > **/data/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106** # + [markdown] graffitiCellId="id_k4u4jqs" # # Step 1: Creating a Python Script # # The first step is to create a Python script that you can use to load the model and perform an inference. I have used the `%%writefile` magic command to create a Python file called `load_model_to_device.py`. This will create a new Python file in the working directory. # # **Note**: Up until this point, we've been writing a Python script that hardcoded the device we were using to load the model on, `CPU`. We'll modify our Python script so it is more flexible and will allow us to pass in a device type as a command line argument. # # Click the **Writing a Python Script** button below for a demonstration. # + [markdown] graffitiCellId="id_sgni4cq" # <span class="graffiti-highlight graffiti-id_sgni4cq-id_k78kjz8"><i></i><button>Writing a Python Script</button></span> # + graffitiCellId="id_5vc1jfe" # %%writefile load_model_to_device.py import time from openvino.inference_engine import IENetwork from openvino.inference_engine import IECore import argparse def main(args): model=args.model_path model_weights=model+'.bin' model_structure=model+'.xml' start=time.time() model=IENetwork(model_structure, model_weights) core = IECore() net = core.load_network(network=model, device_name=args.device, num_requests=1) print(f"Time taken to load model = {time.time()-start} seconds") if __name__=='__main__': parser=argparse.ArgumentParser() parser.add_argument('--model_path', required=True) parser.add_argument('--device', default=None) args=parser.parse_args() main(args) # + [markdown] graffitiCellId="id_9uu81yk" # ## Step 2: Creating a Job Submission Script # # To submit a job to the DevCloud, we need to create a shell script. Similar to the Python script above, I have used the `%%writefile` magic command to create a shell script called `load_gpu_model_job.sh`. # # This script does a few things. # 1. Writes stdout and stderr to their respective .log files # 2. Creates the `/output` directory # 3. Creates `DEVICE ` and `MODELPATH` variables and assigns their value as the first and second argument passed to the shell script # 4. Calls the Python script using the `MODELPATH` and `DEVICE` variable values as the command line argument # 5. Changes to the `/output` directory # 6. Compresses the stdout.log and stderr.log files to `output.tgz` # # **Note**: Our shell script now takes two command line arguments. # # Click the **Creating a Job Submission Script** button below for a demonstration. # + [markdown] graffitiCellId="id_zjuq4f8" # <span class="graffiti-highlight graffiti-id_zjuq4f8-id_1gwff5u"><i></i><button>Creating a Job Submission Script</button></span> # + graffitiCellId="id_btqokbq" # %%writefile load_gpu_model_job.sh exec 1>/output/stdout.log 2>/output/stderr.log # mkdir -p /output DEVICE=$1 MODELPATH=$2 # Run the load model python script python3 load_model_to_device.py --model_path ${MODELPATH} --device ${DEVICE} # cd /output tar zcvf output.tgz stdout.log stderr.log # + [markdown] graffitiCellId="id_vy566p7" # ## Step 3: Submitting a Job to Intel's DevCloud # # The code below will submit a job to an **IEI Tank-870** edge node with an Intel® i5 processor and IGPU. We will load the model on the GPU. # # **Note**: In addition to specifying the model path, we'll also pass in a device type argument of `GPU` to load our model on the IGPU. As a reminder, when running a model on an IGPU, the model precision we'll need is `FP16`. # # The `!qsub` command takes a few command line arguments: # 1. The first argument is the shell script filename - `load_gpu_model_job.sh`. This should always be the first argument. # 2. The `-d` flag designates the directory where we want to run our job. We'll be running it in the current directory as denoted by `.`. # 3. The `-l` flag designates the node and quantity we want to request. The default quantity is 1, so the **1** after `nodes` is optional. # 4. The `-F` flag let's us pass in a string with all command line arguments we want to pass to our Python script. # # **Note**: There is an optional flag, `-N`, you may see in a few exercises. This is an argument that only works on Intel's DevCloud that allows you to name your job submission. This argument doesn't work in Udacity's workspace integration with Intel's DevCloud. # # In the cell below, we assign the returned value of the `!qsub` command to a variable `job_id_core`. This value is an array with a single string. # # Once the cell is run, this queues up a job on Intel's DevCloud and prints out the first value of this array below the cell, which is the job id. # # Click the **Submitting a Job to Intel's DevCloud** button below for a demonstration. # + [markdown] graffitiCellId="id_6ehbk52" # <span class="graffiti-highlight graffiti-id_6ehbk52-id_kcckgrx"><i></i><button> Submission of Job onto GPU</button></span> # + graffitiCellId="id_q0rkiao" # job_id_core = !qsub load_gpu_model_job.sh -d . -l nodes=1:tank-870:i5-6500te:intel-hd-530 -F "GPU /data/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106" print(job_id_core[0]) # + [markdown] graffitiCellId="id_miev760" # ## Step 4: Running liveQStat # # Running the `liveQStat` function, we can see the live status of our job. Running the this function will lock the cell and poll the job status 10 times. The cell is locked until this finishes polling 10 times or you can interrupt the kernel to stop it by pressing the stop button at the top: ![stop button](assets/interrupt_kernel.png) # # * `Q` status means our job is currently awaiting an available node # * `R` status means our job is currently running on the requested node # # **Note**: In the demonstration, it is pointed out that `W` status means your job is done. This is no longer accurate. Once a job has finished running, it will no longer show in the list when running the `liveQStat` function. # # Click the **Running liveQStat** button below for a demonstration. # + [markdown] graffitiCellId="id_5bmt3o8" # <span class="graffiti-highlight graffiti-id_5bmt3o8-id_f0vbs8h"><i></i><button> Running liveQStat </button></span> # + graffitiCellId="id_rwv57pw" import liveQStat liveQStat.liveQStat() # + [markdown] graffitiCellId="id_fc44908" # ## Step 5: Retrieving Output Files # # In this step, we'll be using the `getResults` function to retrieve our job's results. This function takes a few arguments. # # 1. `job id` - This value is stored in the `job_id_core` variable we created during **Step 3**. Remember that this value is an array with a single string, so we access the string value using `job_id_core[0]`. # 2. `filename` - This value should match the filename of the compressed file we have in our `load_gpu_model_job.sh` shell script. In this example, filename shoud be set to `output.tgz`. # 3. `blocking` - This is an optional argument and is set to `False` by default. If this is set to `True`, the cell is locked while waiting for the results to come back. There is a status indicator showing the cell is waiting on results. # # **Note**: The `getResults` function is unique to Udacity's workspace integration with Intel's DevCloud. When working on Intel's DevCloud environment, your job's results are automatically retrieved and placed in your working directory. # # Click the **Retrieving Output Files** button below for a demonstration. # + [markdown] graffitiCellId="id_etx4vog" # <span class="graffiti-highlight graffiti-id_etx4vog-id_22ysmhm"><i></i><button>Retrieving Output Files</button></span> # + graffitiCellId="id_hpta9zw" import get_results get_results.getResults(job_id_core[0], filename="output.tgz", blocking=True) # + [markdown] graffitiCellId="id_2whnxuq" # ## Step 6: Viewing the Outputs # In this step, we unpack the compressed file using `!tar zxf` and read the contents of the log files by using the `!cat` command. # # `stdout.log` should contain the printout of the print statement in our Python script. # + graffitiCellId="id_7pi0pds" # !tar zxf output.tgz # + graffitiCellId="id_av1dh88" # !cat stdout.log # + graffitiCellId="id_xjt1021" # !cat stderr.log
Choosing the right hardware for OpenVino - Dev Cloud/Notebooks/Int_GPU_and_the_DevCloud.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.10.1 64-bit # language: python # name: python3 # --- # # Tip Calculator # # ## Instructions # # Build a program that calculates how much each person should pay. The inputs are the number of people and the % of the tip. # # + #inputs print("Welcome to the tip calculator!") bill = input("What was your total bill? $") tip = input("How much tip would you like to give? 10, 12 or 15?") people = input("How many people will split the bill?") # + #calculations total = (float(bill) * ( 1 + int(tip)/100)) / int(people) round_total = round(total, 2) #set the result to 2 decimal places print(f"Each pearson should pay: ${round_total}")
Day-2/Tip_Calculator_Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Usage of IF else # + num = 3 if num<2 : print("it is less than 2") else : print("it is bigger than 2") # + num = 3 if num<2 : print("it is less than 2") elif num==2: print("it is equal to 2") else : print("it is bigger than 2") # - _list = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] for item in _list: print(item) for item in _list: if item%2==0: print(item) else: print(f"The number {item} is odd ") for item in range(1,10): if item%2==0: print(item) else: print(f"The number {item} is odd ") # # While Loops in Python # + x=0; while x<5: print(f"current value of f is {x}") x+=1; # -
Python - Statements & Loops.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.6.10 64-bit # name: python361064bit494a1c36fce9415db60ad315c97cf403 # --- import geopandas as gpd import psycopg2 import psycopg2 con = psycopg2.connect( database="qalert_test", user="docker", password="<PASSWORD>", host="localhost" ) sql = "select * from qalert_requests;" qalert_df = gpd.read_postgis(sql, con, geom_col='point') qalert_df.head() qalert_df = qalert_df.drop(qalert_df[(qalert_df['latitude'] == 0.0) & (qalert_df['longitude'] == 0.0)].index) # + import folium map = folium.Map([42.859432, -71.332655], zoom_start=10) for idx, point in qalert_df.iterrows(): folium.Marker( location=[point['latitude'], point['longitude']], popup=point[['type_id']]).add_to(map) # - map
utils/visualize_db/visualize_db.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Code/Environment Preparation # # Let's start by installing nnabla and other required packages first. If you're running on Colab, make sure that your Runtime setting is set as GPU. If not, that can be set up from the top menu (Runtime → change runtime type). Then click Connect on the top right-hand side of the screen before you start. # !pip install musdb norbert pydub nnabla-ext-cuda100 # # Next, clone the code from sony/ai-research-code repository, and then download the [pre-trained weights](https://nnabla.org/pretrained-models/ai-research-code/x-umx/x-umx.h5) to test on music fles(only wav format) and to run evaluation on MUSDB18 dataset. # !git clone https://github.com/sony/ai-research-code.git # %cd ai-research-code/x-umx # !mkdir models # !wget -P models https://nnabla.org/pretrained-models/ai-research-code/x-umx/x-umx.h5 # # Testing # # If you do not have sample music files, such files can be downloaded from [this link](https://www.ee.columbia.edu/~dpwe/sounds/music/). Please note that because memory requirement is high, we suggest users to either try with music smaller than 15 seconds or set --chunk-dur with values appropriate for each computer. It is used to break audio into smaller chunks, separate sources and stitch them back together. If your inference fails, kindly reduce chunk duration and try again. # + #To upload music files from local machine from google.colab import files uploaded_file = files.upload() filename = list(uploaded_file.keys())[0] # - # !python test.py --inputs $filename --out-dir results --model models/x-umx.h5 # # Listen To Audio Seperated Files # + cellView="form" #@title Choose a separate track track = 'vocals' #@param ["bass", "drums", "vocals", "other"] import IPython.display as ipd ipd.Audio(f'results/{track}.wav')
x-umx/X-UMX.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import project_functionssss as pf import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") # - df = pf.load_and_process('../../data/raw/Statistics on a Blockbreaker-like Game.csv') df # Here's the processed dataset. Look at all this worthless info. df.describe(include=np.number).T # This isn't really useful, but it's nifty to look at. sns.displot(x="Accuracy", hue="Win?", multiple="stack", data=df, palette='pastel') # A higher accuracy means more wins. Who could have guessed? Probably only everyone. sns.displot(x="Level", hue="Win?", multiple="stack", data=df, palette='bright', aspect=7) # This really shows how innacurate the old difficulty ratings are. sns.displot(x="New Difficulty", hue="Win?", multiple="stack", data=df, palette='muted', aspect=7) # This really shows how innacurate my new difficulty ratings are. # At least my new difficulty ratings are *slightly* better than the old ones. Maybe. Probably not. sns.lineplot(data = df, x="Number of Blocks", y="Score", hue="Win?", palette='dark') # More blocks means more score for winners. I suppose anyone could have guessed that one. sns.scatterplot(y="New Difficulty", x="Score", data=df, palette='deep') # High difficulty makes for a low score, low difficulty makes for a high score. # My new difficulty ratings may not be very good, but at least they almost make sense.
analysis/Paul/.ipynb_checkpoints/milestone2-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Currency Conversion with Matrix Multiplication # # In this notebook you will solve a currency problem using matrix multiplication and the python package [NumPy](http://www.numpy.org/). This demonstration is provided to prepare you for using matrix multiplication to solve more complex problems. # # ## Currency Conversion Problem # # Over the years you have traveled to eight different countries and just happen to have leftover local currency from each of your trips. # You are planning to return to one of the eight countries, but you aren't sure which one just yet. # You are waiting to find out which will have the cheapest airfare. # # In preparation, for the trip you *will* want convert *all* your local currency into the currency local of the place you will be traveling to. # Therefore, to double check the bank's conversion of your currency, you want to compute the total amount of currency you would expect for each of the eight countries. # To compute the conversion you first need to import a matrix that contains the currency conversion rates for each of the eight countries. The data we will be use comes from the [Overview Matrix of Exchange Rates from Bloomberg Cross-Rates _Overall Chart_](https://www.bloomberg.com/markets/currencies/cross-rates) on January, 10 2018. # <img src="currencyProbImage.png" height=300 width=750> # # # You can think about this problem as taking a _vector of **inputs**_ (the currencies from the 8 countries) and applying a _matrix of **weights**_ (the conversion rates matrix) to these inputs to produce a _vector of **outputs**_ (total amount of currency for each country) using matrix multiplication with the NumPy package. # ### Coding the Currency Conversion Problem # First you will need to create the _**inputs** vector_ that holds the currency you have from the eight countries into a numpy vector. To begin, first import the NumPy package and then use the package to create a vector from a list. Next we convert the vector into a pandas dataframe so that it will print out nicely below with column labels to indicate the country the currency amount is associated to. # + import numpy as np import pandas as pd # Creates numpy vector from a list to represent money (inputs) vector. money = np.asarray([70, 100, 20, 80, 40, 70, 60, 100]) # Creates pandas dataframe with column labels(currency_label) from the numpy vector for printing. currency_label = ["USD", "EUR", "JPY", "GBP", "CHF", "CAD", "AUD", "HKD"] money_df = pd.DataFrame(data=money, index=currency_label, columns=["Amounts"]) print("Inputs Vector:") money_df.T # - # Next we need to create the _**weights** matrix_ by importing the currency conversion rates matrix. We will use python package [Pandas](https://pandas.pydata.org/) to quickly read in the matrix and approriately assign row and colunm labels. Additionally, we define a variable **_path_** to define the location of the currency conversion matrix. The code below imports this weights matrix, converts the dataframe into a numpy matrix, and displays its content to help you determine how to solve the problem using matrix multiplication. # + # Sets path variable to the 'path' of the CSV file that contains the conversion rates(weights) matrix. # path = %pwd # Imports conversion rates(weights) matrix as a pandas dataframe. conversion_rates_df = pd.read_csv(path+"/currencyConversionMatrix.csv",header=0,index_col=0) # Creates numpy matrix from a pandas dataframe to create the conversion rates(weights) matrix. conversion_rates = conversion_rates_df.values # Prints conversion rates matrix. print("Weights Matrix:") conversion_rates_df # - # The _**weights** matrix_ above provides the conversion rates between each of the eight countries. For example, in row 1, column 1 the value **1.0000** represents the conversion rate from US dollars to US dollars. In row 2, column 1 the value **1.1956** represents that 1 Euro is worth **1.1956** US dollars. In row 1, column 2 the value **0.8364** represents that 1 US dollar is only worth **0.8364** Euro. # The _**outputs** vector_ is computed below using matrix multiplication. The numpy package provides the [function _**matmul**_](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html) for multiplying two matrices (or a vector and a matrix). Below you will find the equation for matrix multiplication as it applies to AI, where the _**inputs** vector_($x_{1}...x_{n}$) multiplied by the _**weights** matrix_($w_{11}...w_{nm}$) to compute the _**outputs** vector_($y_{1}...y_{m}$). # # $\hspace{4cm} \begin{bmatrix} x_{1}&x_{2}&...&x_{n}\end{bmatrix} \begin{bmatrix} w_{11}&w_{12}&...&w_{1m}\\ w_{21}&w_{22}&...&w_{2m}\\ ...&...&...&... \\ w_{n1}&w_{n2}&...&w_{nm}\end{bmatrix} = \begin{bmatrix} y_{1}&y_{2}&...&y_{m}\end{bmatrix}$ # # The example matrix multiplication below, has $n$ as 4 in **inputs** and **weights** and $m$ as 3 in **weights** and **outputs**. # # $\hspace{4cm} \begin{bmatrix} 10 & 2 & 1 & 5\end{bmatrix} \begin{bmatrix} 1 & 20 & 7\\ 3 & 15 & 6 \\ 2 & 5 & 12 \\ 4 & 25 & 9 \end{bmatrix} = \begin{bmatrix} 38 & 360 & 139 \end{bmatrix}$ # # As seen with the example above, matrix multiplication resulting matrix(_**outputs** vector_) will have same row dimension as the first matrix(_**inputs** vector_) and the same column dimension as the second matrix(_**weights** matrix_). With the currency example the number of columns in the inputs and weights matrices are the same, but this won't always be the case in AI. # ## TODO: Matrix Multiplication # Replace the **None** below with code that uses the [function _**matmul**_](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html) for multiplying **money** and **conversion_rates** to compute the vector **money_totals**. Recall that we used the alias _**np**_ when we imported the Numpy package above, so be certain to use the _**np**_ alias when calling the _**matmul**_ function below. Additionally, be certain to select _'Cell'_ and _'Run All'_ to check the code you insert below. # + # TODO 1.: Calculates the money totals(outputs) vector using matrix multiplication in numpy. money_totals = None # Converts the resulting money totals vector into a dataframe for printing. money_totals_df = pd.DataFrame(data = money_totals, index = currency_label, columns = ["Money Totals"]) print("Outputs Vector:") money_totals_df.T # - # ### Solution for Currrency Conversion with Matrix Multiplication # Your output from above should match the **Money Totals** displayed below. If you need any help or want to check your answer, feel free to check out the solution notebook by clicking [here](matrixMultCurrencySolution.ipynb). The results can be interperted as converting all the currency to US dollars(**USD**) would provide **454.28** US dollars, converting all the currency to Euros(**EUR**) would provide **379.96** Euros, and etc. # # <img src="money_totals.png" height=225 width=563> # # ### Solution Video for Currrency Conversion with Matrix Multiplication # The solution video can be found in the **Linear Mapping Lab Solution** section. You may want to open another browser window to allow you to easily toggle between the Vector's Lab Jupyter Notebook and the solution videos for this lab.
3 - Linear Algebra Essentials/4. Matrix Multi Currency/matrixMultCurrency.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import requests endpoint = "http://graphdb:7200/repositories/data/statements" def addMapping(localTerm, targetClass, superClass, dataType): query = """ PREFIX owl: <http://www.w3.org/2002/07/owl#> PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX xsd: <http://www.w3.org/2001/XMLSchema#> PREFIX ICF: <https://www.who.int/icf#> PREFIX SCTID: <http://snomed.info/sct/> PREFIX fun: <https://github.com/ERCJanssen/Functionomics/blob/master/Functionomics28052021.owl#> INSERT { GRAPH <http://annotation.local/> { ?term owl:equivalentClass [ rdf:type owl:Class; owl:intersectionOf [ rdf:first ?superClass; rdf:rest [ rdf:first [ rdf:type owl:Class; owl:unionOf [ rdf:first [ rdf:type owl:Restriction; owl:hasValue ?localValue; owl:onProperty fun:has_value; ]; rdf:rest rdf:nil; ] ]; rdf:rest rdf:nil; ] ] ]. } } WHERE { BIND(%s AS ?term). BIND(%s AS ?superClass). BIND(%s("%s") AS ?localValue). } """ % (targetClass, superClass, dataType, localTerm) annotationResponse = requests.post(endpoint, data="update="+query, headers={ "Content-Type": "application/x-www-form-urlencoded" }) print(annotationResponse.status_code) # + #SEX addMapping("0", "fun:Feminnine_gender", "fun:Gender_finding", "xsd:integer") addMapping("1", "fun:Masculine_gender", "fun:Gender_finding", "xsd:integer") #ASA class #addMapping("0", "SCTID:413495001", "LSF:ASA_1", "xsd:interger") #even checen in ontology #addMapping("1", "SCTID:413496000", "LSF:ASA_2", "xsd:interger") #addMapping("2", "SCTID:413497009", "LSF:ASA_3","xsd:interger") #DisorderOfTheBack #addMapping("0", "SCTID:202725007", LSF:"xsd:interger") #addMapping("1", "SCTID:443700006", "xsd:interger") #addMapping("2", "SCTID:32117100119102", "xsd:interger") #POSTOP_COMPL #addMapping("0", "LSF:no_complication", "xsd:interger") #addMapping("1", "LSF:complication", "xsd:interger") #Procedure #addMapping("0", "SCTID:277764006", "xsd:interger") #addMapping("1", "SCTID:10420000", "xsd:interger") #addMapping("2", "LSF:spondylodese", "xsd:interger") # -
notebooks/termMapping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # ## `eppy` Tutorial Scenarios # # Created by: <NAME> # # The goal of this tutorial is to show how to use the `eppy` library in a realistic setting by performing the following tasks: # # - splitting an IDF file into 'modules' that can be developed individually # # - reassembling the modules into IDF files for simulation # # - creating parametric run files from a single IDF file and a spreadsheet # # ## Scenario #1. Splitting an IDF file into 'modules' # # In order to illustrate this process, we will use an example EnergyPlus IDF file from the U.S. DOE library of example input files: # # http://energy.gov/eere/buildings/commercial-reference-buildings from eppy import modeleditor from eppy.modeleditor import IDF # eppy requires that we load the `.idd` from EnergyPlus and the `.idf` that we want to manipulate IDF.setiddname("Energy+V7_2_0.idd") wholeidffile = IDF("RefBldgLargeOfficeNew2004_v1.4_7.2_5A_USA_IL_CHICAGO-OHARE.idf") # The `.idfobjects` function is what we use to look inside the idf to view all of the object types and instances objectlist = wholeidffile.idfobjects print objectlist objectnamelist = [] for objectname in objectlist: objectnamelist.append(objectname) # Let's use the `pandas` library again to use the `Series` and `.csv` file import/export capabilities import pandas as pd # Here we create a pandas series (1-dimensional) of the possible object 'types' within EnergyPlus nameseries = pd.Series(objectnamelist) nameseries # We'll export this list so that we can add the 'module' categories using excel nameseries.to_csv('objectnames.csv') # Now, we upload the created `csv` file in which we have already manually categorized each of the object types such that we can divide the IDF into 'modules' categorizedobjects = pd.read_csv('catagorized_objectnames.csv', header=None, names=['index','objectname','objectcatagory'], index_col='index') # Let's use the `.unique()` function to get a list of the categories that we put into the spreadsheet categorizedobjects.objectcatagory.unique() # Let's loop through and call each category and get the objectnames within each category for category in categorizedobjects.objectcatagory.unique(): print list(categorizedobjects[(categorizedobjects.objectcatagory == category)].objectname) # Let's call an individual object 'type' of `AIRLOOPHVAC` and get a list of the individual object instances within the whole IDF file wholeidffile.idfobjects["AIRLOOPHVAC"] objectnamelist = wholeidffile.idfobjects["AIRLOOPHVAC"] # We can loop through this list and call the individual object instance 'names' for objectname in objectnamelist: print objectname.Name # Here we will go through relatively complex series of `for` loops to: # # 1. Loop through each category type # - Loop through each object in each category # - Extract these objects and copy them into their own 'category' idf file for category in categorizedobjects.objectcatagory.unique(): #Create a list of the objects in each category list_of_category_objs = list(categorizedobjects[(categorizedobjects.objectcatagory == category)].objectname) #Open the blank IDF file with the same name as the categories catIDF = IDF("./blankidftemplates/"+category+".idf") for catobj in list_of_category_objs: # print category + catobj # print wholeidffile.idfobjects[catobj] objectnamelist = wholeidffile.idfobjects[catobj] if len(objectnamelist) != 0: # print objectnamelist for idfobject in objectnamelist: try: print idfobject.Name except: print "No Name field" #Add each object to the new 'modularized' idf file catIDF.copyidfobject(idfobject) catIDF.saveas("./modularizedidfs/"+category+"_updated.idf") # ## 2. Scenario #2 -- Parameteric modeling # # Now let's say that we want to go through and create a series of new IDF files with each file have slightly different values for a particular variable. In this example, we are going to create 10 different IDF files, each having different `ZoneInfiltration:DesignFlowRate` values. # # First, let's reload the whole idf file: wholeidffile = IDF("RefBldgLargeOfficeNew2004_v1.4_7.2_5A_USA_IL_CHICAGO-OHARE.idf") # Let's see all of the existing `ZONEINFILTRATION:DESIGNFLOWRATE` objects wholeidffile.idfobjects['ZONEINFILTRATION:DESIGNFLOWRATE'] # Ok, so it looks like all of these objects have a 'Flow per Exterior Surface Area' of 0.000302. # # ### Let's create 10 new IDF files that have values for this particular field of an equal range between 0.0001 and 0.0010. # # First create a list of the values: import numpy flowpersurfacearea_list = numpy.linspace(0.0001, 0.001, num=10) flowpersurfacearea_list # If we loop through the infiltration objects, we will see that all of them are set to `0.00032` for objectinstance in wholeidffile.idfobjects['ZONEINFILTRATION:DESIGNFLOWRATE']: print objectinstance['Flow_per_Exterior_Surface_Area'] # Let's loop through each of these values, change the particular field in the `ZONEINFILTRATION:DESIGNFLOWRATE` and create an IDF file for each situation for flowpersurfacearea in flowpersurfacearea_list: print "Creating IDF file with ZONEINFILTRATION:DESIGNFLOWRATE of "+ str(flowpersurfacearea) for objectinstance in wholeidffile.idfobjects['ZONEINFILTRATION:DESIGNFLOWRATE']: objectinstance['Flow_per_Exterior_Surface_Area'] = flowpersurfacearea wholeidffile.saveas("./ParametricIDF/IDF_"+str(flowpersurfacearea)+".idf")
4_ParametericInputFileCreation/.ipynb_checkpoints/eppy IDF file manipulation-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # knn # # 接下来对KNN算法的思想总结一下:就是在训练集中数据和标签已知的情况下,输入测试数据,将测试数据的特征与训练集中对应的特征进行相互比较,找到训练集中与之最为相似的前K个数据,则该测试数据对应的类别就是K个数据中出现次数最多的那个分类,其算法的描述为: # # 1)计算测试数据与各个训练数据之间的距离; # # 2)按照距离的递增关系进行排序; # # 3)选取距离最小的K个点; # # 4)确定前K个点所在类别的出现频率; # # 5)返回前K个点中出现频率最高的类别作为测试数据的预测分类。 # # [https://www.cnblogs.com/ybjourney/p/4702562.html](https://www.cnblogs.com/ybjourney/p/4702562.html) # + #coding:utf-8 from numpy import * import operator #给出训练数据以及对应的类别 def createDataSet(): group = array([[1.0,2.0],[1.2,0.1],[0.1,1.4],[0.3,3.5]]) labels = ['A','A','B','B'] return group,labels #通过KNN进行分类 def classify(input,dataSet,label,k): dataSize = dataSet.shape[0] #计算欧式距离 diff = tile(input,(dataSize,1)) - dataSet sqdiff = diff ** 2 squareDist = sum(sqdiff,axis = 1)#行向量分别相加,从而得到新的一个行向量 dist = squareDist ** 0.5 #对距离进行排序 sortedDistIndex = argsort(dist)#argsort()根据元素的值从大到小对元素进行排序,返回下标 classCount={} for i in range(k): voteLabel = label[sortedDistIndex[i]] #对选取的K个样本所属的类别个数进行统计 classCount[voteLabel] = classCount.get(voteLabel,0) + 1 #选取出现的类别次数最多的类别 maxCount = 0 for key,value in classCount.items(): if value > maxCount: maxCount = value classes = key return classes #-*-coding:utf-8 -*- import sys sys.path.append("...文件路径...") from numpy import * dataSet,labels = createDataSet() input = array([1.1,0.3]) K = 3 output =classify(input,dataSet,labels,K) print("测试数据为:",input,"分类结果为:",output) # + import tensorflow as tf import numpy as np import random from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data',one_hot=True) mnist # + test_data_input=tf.placeholder(shape=[None,784],dtype=tf.float32) train_data_input=tf.placeholder(shape=[None,784],dtype=tf.float32) f1=tf.expand_dims(test_data_input,1) f2=tf.subtract(train_data_input,f1) test_data_input,f1,f2
deep_learn/knn.ipynb