repo_name
stringlengths
6
67
path
stringlengths
5
185
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.02k
962k
license
stringclasses
15 values
NauIceLab/SpectralAnalysis
oldGTanalysis.py
4
17389
############################################################################## # Created by Garrett Thompson # Graphical User Interface for Data Analysis # Created at Northern Arizona University # for use in the Astrophysical Ice Laboratory # Advisors: Jennifer Hanley, Will Grundy, Henry Roe # garrett.leland.thompson@gmail.com ############################################################################## import os import csv import time import warnings import numpy as np import matplotlib.pyplot as plt from scipy.optimize import curve_fit as cf from scipy.fftpack import fft, fftfreq, ifft from scipy.signal import savgol_filter as sgf from scipy.integrate import trapz def main(): folder_to_save = choose_dir() #choose files for analysis raw_x,raw_y, raw_xbg,raw_ybg = choose_files(folder_to_save) print("Plotting imported data...") plotting_data_for_inspection(raw_x,raw_y,'Raw Data','Wavenumber (cm-1)','% Transmittance','rawspectrum.pdf',folder_to_save, False) plotting_data_for_inspection(raw_xbg,raw_ybg,'Raw Background','Wavenumber (cm-1)','% Transmittance','rawbackground.pdf',folder_to_save, False) #user chooses method after inspecting plots user_method = str(raw_input('Press "s" for savitsky-golay filter, or "f" for fft filter\n:')) choosing = True while choosing: if user_method.lower() == 's': # savitsky-golay option was chosen choosing = False args_list = [folder_to_save, raw_y, raw_ybg, raw_x] raw_x, norm_smooth = sgf_calc(args_list) plot_data(raw_x,norm_smooth,folder_to_save) elif user_method.lower() == 'f': # fft option was chosen choosing = False frq_x,frq_xbg,fft_y,fft_ybg = fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save) plot_figure, plot_axis = plotting_data_for_inspection(frq_x,np.log(abs(fft_ybg)),'FFT of raw bg','Cycles/Wavenumber (cm)','Log(Power/Frequency)','fft_background.pdf',folder_to_save, False) filt_y = fft_y.copy() filt_ybg = fft_ybg.copy() raw_input('Zoom to liking, then press enter to start') print 'Left to add, middle to remove nearest, and right to finish' # global frq_cid vert_lines=[] frq_cid = plot_figure.canvas.mpl_connect('button_press_event',lambda event: freq_click(event, [frq_x,fft_ybg,plot_figure,plot_axis,vert_lines,filt_y,filt_ybg,folder_to_save,raw_x])) plt.show() plot_figure.canvas.mpl_disconnect(frq_cid) # vert_lines, frq_x, filt_y, filt_ybg = args_dict["vert_lines"],args_dict["frq_x"],args_dict["filt_y"],args_dict["filt_ybg"] def save_as_csv(folder_to_save,title, column1_title,column2_title,column1_data,column2_data): os.chdir(folder_to_save) with open(title,"w") as f: writer = csv.writer(f) writer.writerow([column1_title,column2_title]) writer.writerows(zip(column1_data,column2_data)) os.chdir('..') def fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save): """ calculates FFT of data for use in nipping unwanted frequencies""" # finds FFT of ydata fft_y = fft(raw_y) fft_ybg = fft(raw_ybg) # gets frequencies for FFT of data from array, and sample spacing frq_x = fftfreq(len(fft_y),((max(raw_x)-min(raw_x))/len(fft_y))) frq_xbg = fftfreq(len(fft_ybg),((max(raw_xbg)-min(raw_xbg))/len(fft_ybg))) save_as_csv(folder_to_save,"FFT_Raw_bg_data.csv","frq_x","log(abs(fft_bg))",frq_x,np.log(abs(fft_ybg))) return frq_x, frq_xbg, fft_y, fft_ybg def choose_dir(): """ User chooses where all work will be saved and time stamp is created for future reference """ # Where all work to follow will be saved folder_to_save = raw_input('Type name of directory to save all data being created\n:') # make and change to directory named by user os.mkdir(folder_to_save) os.chdir(folder_to_save) # recording date and time that program is run, saving it to folder with open("time_created.txt", "w") as text_file: text_file.write("Time this program was run: {} \n".format(time.strftime("%Y-%m-%d %H:%M"))) os.chdir('..') return folder_to_save def plotting_data_for_inspection(xdata,ydata,plot_title,plot_xlabel,plot_ylabel,filename_for_saving,folder_to_save, block_boolean): """ Plots data for user to look at within program parameters ---------- xdata,ydata: x and y data to be plotted plot_xlabel,plot_ylabel: label x and y axes in plot file_name_for_saving: string given for saving file for later referece block_boolean: True or False, tells if program waits for figure to close """ plot_figure, plot_axis = plt.subplots() plt.plot(xdata,ydata,color='blue') plt.xlabel(plot_xlabel) plt.ylabel(plot_ylabel) plt.suptitle(plot_title) plt.show(block=block_boolean) os.chdir(folder_to_save) plt.savefig(filename_for_saving) os.chdir('..') return plot_figure, plot_axis def choose_files(folder_to_save): """ Lets user determine which files will be imported for analysis and saves preferences for reference later on """ raw_import = str(raw_input('Enter a raw dataset for analysis\n:')) print "\nGot it! Importing now... \n" raw_x,raw_y = import_data(raw_import) bg_import = str(raw_input('Enter a raw background for analysis\n:')) print "\nGot it! Importing now... \n" raw_xbg,raw_ybg = import_data(bg_import) os.chdir(folder_to_save) with open("data_files_used.txt", "w") as text_file: text_file.write("Raw data file used: {} \n".format(raw_import)) text_file.write("Raw background data file used: {}".format(bg_import)) concentration = str(raw_input('Enter concentration of mixture\n:')) # saving text file of concentration for later use in plotting with open("concentration.txt","w") as f: f.write(concentration) temperature = str(raw_input('Enter temperature of mixture\n:')) # saving text file of temperature for later use in plotting with open("temperature.txt","w") as f: f.write(temperature) os.chdir('..') return raw_x, raw_y,raw_xbg,raw_ybg # assumes a csv file, as all data stored from ice lab is in CSV format def import_data(filename): raw_data = np.loadtxt(open(filename,"rb"),delimiter=",") xdat = raw_data[:,0] ydat = raw_data[:,1] return xdat,ydat def freq_click(event, args_list): # if button_click = left: add left line # if button_click = middle: removes closest line # if button_lick = right: finish # add clicked data points to list frq_x,fft_ybg,plot_figure,plot_axis,vert_lines, filt_y, filt_ybg,folder_to_save, raw_x = args_list plt.xlim(plt.gca().get_xlim()) plt.ylim(plt.gca().get_ylim()) if event.button==1: vert_lines.append(event.xdata) plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue') #plt.axvline(x=vert_lines[-1],color='black') for val in vert_lines: plt.axvline(x=val,color='black') plt.xlabel('Cycles/Wavenumber') plt.ylabel('Relative Intensity') # draws points as they are added plt.draw() if event.button==2: # middle click, remove closest vertical line print ('pop!') # gets x,y limits of graph,saves them before destroying figure xlims = plt.gca().get_xlim() ylims = plt.gca().get_ylim() # clears axes, to get rid of old scatter points plot_axis.cla() # re-plots spectrum plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue') # sets axes limits to original values plt.xlim(xlims) plt.ylim(ylims) plt.xlabel('Cycles/Wavenumber') plt.ylabel('Relative Intensity') # deletes point closest to mouse click xindx = np.abs(vert_lines-event.xdata).argmin() del vert_lines[xindx] for line in vert_lines: plt.axvline(x=line,color='black') # draws the new set of vertical lines plt.draw() if event.button==3: # right click, ends clicking awareness # plot_figure.canvas.mpl_disconnect(frq_cid) os.chdir(folder_to_save) plt.savefig('FFT_filter.pdf') with open("freq_window.csv", "w") as f: writer = csv.writer(f) writer.writerow(["Xposition of vert. line"]) writer.writerows(zip(vert_lines)) os.chdir('..') # first window args_dict ={"vert_lines":vert_lines,"frq_x":frq_x,"filt_y":filt_y,"filt_ybg":filt_ybg} plt.close("all") argslist = [vert_lines,frq_x,filt_y,filt_ybg] filt_y,filt_ybg = window_filter(argslist) fft_calc(filt_y, filt_ybg, raw_x,folder_to_save) def fft_calc(filt_y, filt_ybg, raw_x,folder_to_save): # dividing filtered y data from filtered bg data norm_fft = ifft(filt_y)/ifft(filt_ybg) save_as_csv(folder_to_save,"fft_data.csv","raw_x","fft_filt",raw_x,norm_fft.real) plot_data(raw_x,norm_fft.real,folder_to_save) def sgf_calc(args_list): folder_to_save, raw_y, raw_ybg, raw_x = args_list # warning when using sgf option warnings.filterwarnings(action="ignore", module="scipy",message="^internal gelsd") window_param = int(raw_input('Input window box size (must be odd number)\n:')) poly_param = int(raw_input('Input polynomial order for smoothing\n:')) # saving parameters chosen for future inspection os.chdir(folder_to_save) with open("sgf_params.txt", "w") as sgf_file: sgf_file.write("Window parameter used: {} \n".format(window_param)) sgf_file.write("Polynomial paramter used: {}".format(poly_param)) #global norm_smooth smoothed_y = sgf(raw_y,window_param,poly_param,delta=(abs(raw_y)[1]-raw_y)[0]) smoothed_ybg =sgf(raw_ybg,window_param,poly_param,delta=(abs(raw_ybg)[1]-raw_ybg)[0]) # dividing filtered y data from filtered bg data norm_smooth = smoothed_y / smoothed_ybg rows = zip(raw_x,norm_smooth) with open("sgf_data.csv", "w") as f: writer = csv.writer(f) writer.writerow(["window","polynomail order"]) writer.writerow([window_param,poly_param]) writer.writerow(["raw_x","sgf_filt"]) writer.writerows(rows) os.chdir('..') return raw_x,norm_smooth # range of frequenices to cut out def window_filter(args_list): vert_lines, frq_x, filt_y, filt_ybg = args_list window_min, window_max= vert_lines[-2], vert_lines[-1] for i in range(len(frq_x)): if (frq_x[i] >= window_min and frq_x[i] <=window_max) or (frq_x[i]>-1*window_max and frq_x[i]<-1*window_min): filt_y[i] = 0 filt_ybg[i] = 0 return filt_y,filt_ybg def plot_data(x,y,folder_to_save): plot_figure,plot_axis = plotting_data_for_inspection(x,y,"Divide and Filtered Spectrum","Wavenumber cm-1","Relative Intensity","dv_filt_spectrum.pdf",folder_to_save, False) order = str(raw_input('Zoom to liking and then enter what order polynomial for continuum fit\n:')) xcoords,ycoords = [],[] # tells python to turn on awareness for button presses global cid cid = plot_figure.canvas.mpl_connect('button_press_event', lambda event: onclick(event, [xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y])) print 'Left to add, middle to remove nearest, and right to finish' plt.show() # for creating continuum fit to divide out def onclick(event,argslist): xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y = argslist global pvals if event.button==1: # left click plt.xlim(plt.gca().get_xlim()) plt.ylim(plt.gca().get_ylim()) #plt.cla() try: # only delete if curve_fit line already drawn if len(plot_axis.lines) !=1: plot_axis.lines.remove(plot_axis.lines[-1]) except: UnboundLocalError # add clicked data points to list xcoords.append(event.xdata) ycoords.append(event.ydata) plot_axis.scatter(xcoords,ycoords,color='black') plt.xlabel('Wavenumber cm-1') plt.ylabel('Relative Intensity') plt.draw() xvals = np.array(xcoords) yvals = np.array(ycoords) # fits values to polynomial, rankwarning is irrelevant warnings.simplefilter('ignore', np.RankWarning) p_fit = np.polyfit(xvals,yvals,order) pvals = np.poly1d(p_fit) plot_axis.plot(x,pvals(x),color='black') plt.draw() # plt.show(block=False) if event.button==2: # middle click, remove closest point to click print ('pop!') # gets x,y limits of graph,saves them before destroying figure xlims = plt.gca().get_xlim() ylims = plt.gca().get_ylim() # clears axes, to get rid of old scatter points plot_axis.cla() # re-plots spectrum plot_axis.plot(x,y) # sets axes limits to original values plt.xlim(xlims) plt.ylim(ylims) plt.xlabel('Wavenumber cm-1') plt.ylabel('Relative Intensity') # deletes point closest to mouse click xindx = np.abs(xcoords-event.xdata).argmin() del xcoords[xindx] yindx = np.abs(ycoords-event.ydata).argmin() del ycoords[yindx] # draws the new set of scatter points, and colors them plot_axis.scatter(xcoords,ycoords,color='black') plt.draw() xvals = np.array(xcoords) yvals = np.array(ycoords) # fits values to polynomial, rankwarning is ignored warnings.simplefilter('ignore', np.RankWarning) p_fit = np.polyfit(xvals,yvals,order) pvals = np.poly1d(p_fit) plot_axis.plot(x,pvals(x),color='black') plt.draw() if event.button==3: # right click,ends clicking awareness plot_figure.canvas.mpl_disconnect(cid) os.chdir(folder_to_save) plt.savefig('continuum_chosen.pdf') # Saving polynomial eqn used in continuum divide for reference with open("continuum_polynomial.txt", "w") as save_file: save_file.write("%s *x^ %d " %(pvals[0],0)) for i in (xrange(len(pvals))): save_file.write("+ %s *x^ %d " %(pvals[i+1],i+1)) os.chdir('..') calc_coeffs(pvals,x,y,folder_to_save) def calc_coeffs(pvals,x,y,folder_to_save): fit_y = pvals(x) # flattens the continuum new_continuum = y / fit_y thickness = int(raw_input('\nEnter thickness of cell in cm\n:')) # 2 cm thickness for our work in 2016 # remove runtime errors when taking negative log and dividing err_settings = np.seterr(invalid='ignore') alpha_coeffs = -np.log(new_continuum) / thickness plotting_data_for_inspection(x,alpha_coeffs,"Alpha Coefficients","Wavenumber cm-1","Absorption cm-1","alpha_coeffs.pdf",folder_to_save,False) save_as_csv(folder_to_save,"alpha_coeffs.csv","x","alpha",x,alpha_coeffs) # creating masks around each peak x_mask1 = x[(x>10000) & (x<10500)] x_mask2 = x[(x>11200) & (x<12000)] y_mask1 = alpha_coeffs[(x>10000) & (x<10500)] y_mask2 = alpha_coeffs[(x>11200) & (x<12000)] # writing data for plotting later save_as_csv(folder_to_save,"10000_peak.csv","x","y",x_mask1,y_mask1) save_as_csv(folder_to_save,"11200_peak.csv","x","y",x_mask2,y_mask2) # integrated area calcs area10000=trapz(y_mask1,x_mask1) area11200=trapz(y_mask2,x_mask2) os.chdir(folder_to_save) with open("10000area.txt","w") as f: f.write(str(area10000)) with open("11200area.txt","w") as f: f.write(str(area11200)) os.chdir('..') finish_prog = raw_input("Press 'y' when finished\n:") check = True while check: if (finish_prog =="y"): check = False plt.close('all') print "Finished!" quit() # end of program if __name__ == '__main__': main()
mit
paolodedios/tensorflow
tensorflow/python/estimator/inputs/pandas_io.py
41
1293
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """pandas_io python module. Importing from tensorflow.python.estimator is unsupported and will soon break! """ # pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow_estimator.python.estimator.inputs import pandas_io # Include attrs that start with single underscore. _HAS_DYNAMIC_ATTRIBUTES = True pandas_io.__all__ = [s for s in dir(pandas_io) if not s.startswith('__')] from tensorflow_estimator.python.estimator.inputs.pandas_io import *
apache-2.0
fboers/jumeg
jumeg/decompose/fourier_ica_plot.py
3
71131
# Authors: Lukas Breuer <l.breuer@fz-juelich.de> """ ---------------------------------------------------------------------- --- jumeg.decompose.fourier_ica_plot --------------------------------- ---------------------------------------------------------------------- autor : Lukas Breuer email : l.breuer@fz-juelich.de last update: 17.11.2016 version : 1.1 ---------------------------------------------------------------------- This is a simple implementation to plot the results achieved by applying FourierICA ---------------------------------------------------------------------- """ ####################################################### # # # plotting functions for FourierICA # # # ####################################################### # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Simple function to adjust axis in plots # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def adjust_spines(ax, spines, labelsize=10): """ Simple function to adjust axis in plots Parameters ---------- ax: axis object Plot object which should be adjusted spines: list of strings ['bottom', 'left'] Name of the axis which should be adjusted labelsize: integer Font size for the x- and y-axis labels """ for loc, spine in list(ax.spines.items()): if loc in spines: spine.set_position(('outward', 4)) # outward by 4 points # spine.set_smart_bounds(True) else: spine.set_color('none') # don't draw spine # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') else: # no yaxis ticks ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: # no xaxis ticks ax.xaxis.set_ticks([]) ax.tick_params(axis='x', labelsize=labelsize) ax.tick_params(axis='y', labelsize=labelsize) # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # function to generate automatically combined labels # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def get_combined_labels(subject='fsaverage', subjects_dir=None, parc='aparc.a2009s'): """ Helper function to combine labels automatically according to previous studies. Parameters ---------- subject: string containing the subjects name default: subject='fsaverage' subjects_dir: Subjects directory. If not given the system variable SUBJECTS_DIR is used default: subjects_dir=None parc: name of the parcellation to use for reading in the labels default: parc='aparc.a2009s' Return ------ label_keys: names of the new labels labels: list containing the combined labels """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from mne import read_labels_from_annot import numpy as np from os.path import join # ------------------------------------------ # define labels based on previous studies # ------------------------------------------ # to get more information about the label names and their # locations check the following publication: # Destrieux et al. (2010), Automatic parcellation of human # cortical gyri and sulci using standard anatomical nomenclature, # NeuroImage, DOI: 10.1016/j.neuroimage.2010.06.010 label_combinations = { 'auditory': ['G_temp_sup-G_T_transv', 'G_temp_sup-Plan_polar', 'Lat_Fis-post'], 'broca': ['G_front_inf-Opercular', 'G_front_inf-Triangul', 'Lat_Fis-ant-Vertical'], 'cingulate': ['G_cingul-Post-dorsal', 'G_cingul-Post-ventral', 'G_and_S_cingul-Ant', 'G_and_S_cingul-Mid-Ant', 'G_and_S_cingul-Mid-Post', 'S_pericallosal', 'cingul-Post-ventral'], 'frontal': ['G_and_S_frontomargin', 'G_and_S_transv_frontopol', 'G_front_inf-Orbital', 'G_front_middle', 'G_front_sup', 'G_orbital', 'G_rectus', 'G_subcallosal', 'Lat_Fis-ant-Horizont', 'S_front_inf', 'S_front_middle', 'S_front_sup', 'S_orbital_lateral', 'S_orbital-H_Shaped', 'S_suborbital'], 'gustatory': ['G_and_S_subcentral'], 'insula': ['S_circular_insula_ant', 'S_circular_insula_inf', 'S_circular_insula_sup', 'G_Ins_lg_and_S_cent_ins', 'G_insular_short'], 'motor': ['G_precentral', 'S_precentral-sup-part', 'S_precentral-inf-part', 'S_central'], 'olfactory': ['S_temporal_transverse'], 'somatosensory': ['G_postcentral', 'S_postcentral'], 'somatosensory associated': ['G_and_S_paracentral', 'G_pariet_inf-Angular', 'G_parietal_sup', 'S_cingul-Marginalis', 'S_intrapariet_and_P_trans'], 'temporal': ['G_oc-temp_lat-fusifor', 'G_oc-temp_med-Parahip', 'G_temp_sup-Plan_polar', 'G_temporal_inf', 'G_temporal_middle', 'G_temp_sup-Lateral', 'Pole_temporal', 'S_collat_transv_ant', 'S_oc-temp_lat', 'S_oc-temp_med_and_Lingual', 'S_temporal_inf', 'S_temporal_sup'], 'vision': ['G_and_S_occipital_inf', 'G_occipital_middle', 'G_oc-temp_med-Lingual', 'S_collat_transv_post', 'S_oc_sup_and_transversal', 'S_occipital_ant', 'S_oc_middle_and_Lunatus'], 'visual': ['G_cuneus', 'G_precuneus', 'S_calcarine', 'S_parieto_occipital', 'G_occipital_sup', 'Pole_occipital', 'S_subparietal'], 'wernicke': ['G_pariet_inf-Supramar', 'G_temp_sup-Plan_tempo', 'S_interm_prim-Jensen'] } label_keys = list(label_combinations.keys()) labels = [] # ------------------------------------------ # combine labels # ------------------------------------------ # loop over both hemispheres for hemi in ['lh', 'rh']: # read all labels in labels_all = read_labels_from_annot(subject, parc=parc, hemi=hemi, surf_name='inflated', subjects_dir=subjects_dir, verbose=False) # loop over all labels to extract label names label_names = [] for label in labels_all: label_names.append(label.name) # ------------------------------------------ # now generate labels based on previous # studies # ------------------------------------------ # loop over all previously defined labels for label_key in label_keys: # get name of all labels related to the current one label_members = label_combinations[label_key] label_members = [x+'-'+hemi for x in label_members] # check which labels we need for the current one idx_labels_want = np.where(np.in1d(label_names, label_members))[0] labels_want = [labels_all[i] for i in idx_labels_want] # combine labels label_new = np.sum(labels_want) label_new.name = label_key + '-' + hemi # fill the surface between sources label_new.values.fill(1.0) label_new.smooth(subject=subject, subjects_dir=subjects_dir) # save new label fnout = join(subjects_dir, subject, 'label', hemi + '.' + label_key + '.label') label_new.save(fnout) labels.append(label_new) return label_keys, labels # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # function to get the anatomical label to a given vertex # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def get_anat_label_name(vertex, hemi, labels=None, subject='fsaverage', subjects_dir=None, parc='aparc.a2009s'): """ Helper function to get to a given vertex the name of the anatomical label Parameters ---------- vertex: integer containing the vertex number hemi: string containing the information in which hemisphere the vertex is located. Should be either 'lh' or 'rh' labels: labels to use for checking. If not given the labels are read from the subjects directory default: labels=None subject: string containing the subjects name default: subject='fsaverage' subjects_dir: Subjects directory. If not given the system variable SUBJECTS_DIR is used default: subjects_dir=None parc: name of the parcellation to use for reading in the labels default: parc='aparc.a2009s' Return ------ name: string containing the name of the anatomical label related to the given vertex """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from mne import read_labels_from_annot import numpy as np # ------------------------------------------ # check input parameter # ------------------------------------------ # check if labels are given or must be read if not labels: labels = read_labels_from_annot(subject, parc=parc, hemi=hemi, surf_name='inflated', subjects_dir=subjects_dir, verbose=False) # ------------------------------------------ # loop over labels to find corresponding # label # ------------------------------------------ name = '' for label in labels: if label.hemi == hemi: # get vertices of current label label_vert = np.in1d(np.array(vertex), label.vertices) if label_vert: name = label.name break if name == '': name = 'unknown-' + hemi return name # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # function to get the MNI-coordinate(s) to a given # FourierICA component # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def get_mni_coordinates(A_orig, subject='fsaverage', subjects_dir=None, parc='aparc.a2009s', percentile=97, combine_labels=True): """ Helper function to get the MNI-coordinate(s) to a given FourierICA component. The selection if a component has activation in both hemispheres or only in one is made like follows: estimate for each component an activation threshold based on the given percentile. Next, estimate the total number of voxels in the component which are above the estimated threshold. Now check if at least 20% of the total number of voxels above threshold are in each hemisphere. If yes both hemispheres are marked as active, otherwise only one. Parameters ---------- A_orig: array 2D-mixing-array (nvoxel, ncomp) estimated when applying FourierICA subject: string containing the subjects name default: subject='fsaverage' subjects_dir: Subjects directory. If not given the system variable SUBJECTS_DIR is used default: subjects_dir=None parc: name of the parcellation to use for reading in the labels default: parc='aparc.a2009s' percentile: integer value between 0 and 100 used to set a lower limit for the shown intensity range of the spatial plots combine_labels: if set labels are combined automatically according to previous studies default: combine_labels=True Return ------ mni_coords: dictionary The dictionary contains two elements: 'rh' and 'lh', each of which containing a list with the MNI coordinates as string. Note, each list contains the same number of elements as components are given. If there is no MNI coordinate for a component an empty string is used, e.g. for two components {'rh': ['(37.55, 1.58, -21.71)', '(44.78, -10.41, 27.89)'], 'lh': ['(-39.43, 5.60, -27.80)', '']} hemi_loc_txt: list containing for each FourierICA component to which region it spatially belongs ('left', 'right' or 'both') classification: dictionary classification object. It is a dictionary containing two sub-dictionaries 'lh' and 'rh' (for left and right hemisphere). In both sub-dictionaries the information about the groups is stored, i.e. a group/region name + the information which components are stored in this group (as indices). An example for 6 components might look like this: {'rh': {'somatosensory': [1, 3], 'cingulate': [4, 5]}, 'lh': {'somatosensory': [1, 2], 'cingulate': [0, 5]}} labels: list of strings names of the labels which are involved in this data set """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from mne import vertex_to_mni import numpy as np from os import environ import types # ------------------------------------------- # check input parameter # ------------------------------------------- if not subjects_dir: subjects_dir = environ.get('SUBJECTS_DIR') # ------------------------------------------- # generate spatial profiles # (using magnitude and phase) # ------------------------------------------- if isinstance(A_orig[0, 0], complex): A_orig_mag = np.abs(A_orig) else: A_orig_mag = A_orig # ------------------------------------------- # set some parameters # ------------------------------------------- nvoxel, ncomp = A_orig_mag.shape nvoxel_half = int(nvoxel / 2) hemi = ['lh', 'rh'] hemi_names = ['left ', 'right', 'both '] hemi_indices = [[0, nvoxel_half], [nvoxel_half, -1]] hemi_loc_txt = np.array([' '] * ncomp) hemi_loc = np.zeros(ncomp) # ------------------------------------------- # generate structures to save results # ------------------------------------------- # generate dictionary to save MNI coordinates mni_coords = {'rh': [''] * ncomp, 'lh': [''] * ncomp} # ------------------------------------------ # check if labels should be combined # automatically # ------------------------------------------ if combine_labels: label_names, labels = get_combined_labels(subject=subject, subjects_dir=subjects_dir, parc=parc) # generate empty classification dictionary class_keys = label_names[:] class_keys.append('unknown') classification = {'lh': {key: [] for key in class_keys}, 'rh': {key: [] for key in class_keys}} # if not generate empty variables else: label_names, labels = None, None classification = {} # ------------------------------------------ # loop over all components # ------------------------------------------ for icomp in range(ncomp): # ------------------------------------------ # extract maxima in the spatial profile of # the current component separately for both # hemispheres # ------------------------------------------ idx_ver_max_lh = np.argmax(A_orig_mag[:nvoxel_half, icomp]) idx_ver_max_rh = np.argmax(A_orig_mag[nvoxel_half:, icomp]) # ------------------------------------------ # check for both maxima if they are # significant # ------------------------------------------ # set some paremeter threshold = np.percentile(A_orig_mag[:, icomp], percentile) nidx_above = len(np.where(A_orig_mag[:, icomp] > threshold)[0]) cur_label_name = [] # loop over both hemispheres for idx_hemi, idx_vertex_max in enumerate([idx_ver_max_lh, idx_ver_max_rh]): # get the number of vertices above the threshold # in the current hemisphere nidx_above_hemi = len(np.where(A_orig_mag[hemi_indices[idx_hemi][0]:hemi_indices[idx_hemi][1], icomp] > threshold)[0]) # check if at least 20% of all vertices above the threshold # are in the current hemisphere if nidx_above_hemi * 5 > nidx_above: # get MNI-coordinate mni_coord = vertex_to_mni(idx_vertex_max, idx_hemi, subject, subjects_dir=subjects_dir)[0] # store results in structures mni_coords[hemi[idx_hemi]][icomp] = \ '(' + ', '.join(["%2.2f" % x for x in mni_coord]) + ')' # store hemisphere information hemi_loc[icomp] += idx_hemi + 1.0 # ------------------------------------------ # get MNI-coordinate to vertex as well as # the name of the corresponding anatomical # label # ------------------------------------------ anat_name = get_anat_label_name(idx_vertex_max, hemi[idx_hemi], subject=subject, subjects_dir=subjects_dir, parc=parc, labels=labels) cur_label_name.append(anat_name[:-3]) else: cur_label_name.append(' ') # ------------------------------------------ # check which results must be saved # ------------------------------------------ if combine_labels: # check if activation was found in both hemispheres # --> if not we can directly save the results if ' ' in cur_label_name: # adjust classification dictionary if cur_label_name[0] == ' ': classification[hemi[1]][cur_label_name[1]].append(icomp) else: classification[hemi[0]][cur_label_name[0]].append(icomp) # --> otherwise we have to make sure that we group the # component only into one region else: # check if both vertices are in the same anatomical location # --> then we have no problem if cur_label_name[0] == cur_label_name[1]: classification[hemi[0]][cur_label_name[0]].append(icomp) classification[hemi[1]][cur_label_name[1]].append(icomp) else: # check if we have an unknown region being involved # --> if yes chose the other one if cur_label_name[0] == 'unknown': classification[hemi[1]][cur_label_name[1]].append(icomp) hemi_loc[icomp], mni_coords[hemi[0]][icomp] = 2, '' elif cur_label_name[1] == 'unknown': classification[hemi[0]][cur_label_name[0]].append(icomp) hemi_loc[icomp], mni_coords[hemi[1]][icomp] = 1, '' # otherwise chose the region with the strongest vertex else: if A_orig_mag[idx_ver_max_lh, icomp] > A_orig_mag[idx_ver_max_rh, icomp]: classification[hemi[0]][cur_label_name[0]].append(icomp) hemi_loc[icomp], mni_coords[hemi[1]][icomp] = 1, '' else: classification[hemi[1]][cur_label_name[1]].append(icomp) hemi_loc[icomp], mni_coords[hemi[0]][icomp] = 2, '' # ------------------------------------------ # adjust hemi_loc_txt if activity was found # in both hemispheres # ------------------------------------------ for idx, hemi_name in enumerate(hemi_names): idx_change = np.where(hemi_loc == (idx + 1.0))[0] hemi_loc_txt[idx_change] = hemi_name # ------------------------------------------ # adjust label_names to only contain regions # being involved in processing the current # data # ------------------------------------------ labels = [] for cur_hemi in hemi: for key in label_names: if classification[cur_hemi][key]: labels.append(key) labels = np.unique(labels).tolist() return mni_coords, hemi_loc_txt, classification, labels # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # helper function to check if classification was # performed prior to plotting # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def _check_classification(classification, ncomp): """ Helper function to check if classification was performed prior to plotting Parameters ---------- classification: dictionary classification object from the group_ica_object. It is a dictionary containing two sub-dictionaries 'lh' and 'rh' (for left and right hemisphere). In both sub-dictionaries the information about the groups is stored, i.e. a group/region name + the information which components are stored in this group ncomp: integer number of components Return ------ keys: list containing the group names key_borders: list containing the group borders, i.e. the information where to plot a new group name idx_sort: array containing the plotting order of the components, i.e. components beloning to one group are plotted together """ # ------------------------------------------ # import necessary modules # ------------------------------------------ import numpy as np # ------------------------------------------ # check if classification was done # ------------------------------------------ key_borders = [] if np.any(classification): # initialize empty lists idx_sort = [] keys_hemi = list(classification.keys()) # sort keys keys = list(classification[keys_hemi[0]].keys()) keys.sort(key=lambda v: v.upper()) # set 'unknown' variables to the end keys.remove('unknown') keys.append('unknown') # remove keys with empty entries keys_want = [] for key in keys: if classification[keys_hemi[0]][key] or\ classification[keys_hemi[1]][key]: keys_want.append(key) # loop over all keys for key in keys_want: # get indices to each class idx_lh = classification[keys_hemi[0]][key] idx_rh = classification[keys_hemi[1]][key] # get indices of components in both hemispheres idx_both = np.intersect1d(idx_lh, idx_rh) # get indices of components only in right hemisphere idx_only_rh = np.setdiff1d(idx_rh, idx_lh) # get indices of components only in left hemisphere idx_only_lh = np.setdiff1d(idx_lh, idx_rh) # add components to list of sorted indices idx_all = np.concatenate((idx_both, idx_only_rh, idx_only_lh)) idx_sort += idx_all.tolist() key_borders.append(len(idx_all)) # add first border and estimate cumulative sum to # have the right borders key_borders = np.insert(key_borders, 0, 1) key_borders = np.cumsum(key_borders)[:-1] # ------------------------------------------ # if classification was not performed set # some default values # ------------------------------------------ else: idx_sort = np.arange(ncomp) keys_want = [] return keys_want, key_borders, idx_sort # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # helper function to handle time courses for plotting # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def _get_temporal_envelopes(fourier_ica_obj, W_orig, temporal_envelope=[], src_loc_data=[], tICA=False, global_scaling=True, win_length_sec=None, tpre=None, flow=None): """ Helper function to check if classification was performed prior to plotting Parameters ---------- fourier_ica_obj: FourierICA object generated when applying jumeg.decompose.fourier_ica W_orig: array 2D-demixing-array (ncomp x nvoxel) estimated when applying FourierICA temporal_envelope: list of arrays containing the temporal envelopes. If the temporal envelopes are already given here z-scoring and mean estimation is performed src_loc_data: array 3D array containing the source localization data used for FourierICA estimation (nfreq x nepochs x nvoxel). Only necessary if temporal_envelope is not given. tICA: bool If set we know that temporal ICA was applied when estimating the FourierICA, i.e. when generating the temporal-envelopes the data must not be transformed from the Fourier domain to the time-domain global_scaling: bool If set all temporal-envelopes are globally scaled. Otherwise each component is scaled individually win_length_sec: float or None Length of the epoch window in seconds tpre: float or None Lower border (in seconds) of the time-window used for generating/showing the epochs. If 'None' the value stored in 'fourier_ica_obj' is used flow: float, integer or None Lower frequency border for generating the temporal-envelope. If 'None' the frequency border stored in 'fourier_ica_obj' is used Return ------ temporal_envelope_mean: list containing the 2D arrays of the mean temporal envelopes of the components temporal_envelope: list containing the 3D arrays of the temporal envelopes of the components. Necessary for estimating the spectral profiles """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from mne.baseline import rescale import numpy as np from scipy import fftpack # ------------------------------------------- # check input parameter # ------------------------------------------- if tpre == None: tpre = fourier_ica_obj.tpre if flow == None: flow = fourier_ica_obj.flow if not win_length_sec: win_length_sec = fourier_ica_obj.win_length_sec # estimate some simple parameter sfreq = fourier_ica_obj.sfreq ncomp, nvoxel = W_orig.shape win_ntsl = int(np.floor(sfreq * win_length_sec)) startfftind = int(np.floor(flow * win_length_sec)) # ------------------------------------------- # check if temporal envelope is already # given or should be estimated # ------------------------------------------- if temporal_envelope == []: # ------------------------------------------- # check if 'src_loc_data' is given... # if not throw an error # ------------------------------------------- if src_loc_data == []: print(">>> ERROR: You have to provide either the 'temporal_envelope' or") print(">>> 'src_loc_data'. Otherwise no temporal information can be plotted!") import pdb pdb.set_trace() # ------------------------------------------- # get independent components # ------------------------------------------- nfreq, nepochs, nvoxel = src_loc_data.shape act = np.zeros((ncomp, nepochs, nfreq), dtype=np.complex) if tICA: win_ntsl = nfreq temporal_envelope = np.zeros((nepochs, ncomp, win_ntsl)) fft_act = np.zeros((ncomp, win_ntsl), dtype=np.complex) # loop over all epochs to get time-courses from # source localized data by inverse FFT for iepoch in range(nepochs): # normalize data src_loc_zero_mean = (src_loc_data[:, iepoch, :] - np.dot(np.ones((nfreq, 1)), fourier_ica_obj.dmean)) / \ np.dot(np.ones((nfreq, 1)), fourier_ica_obj.dstd) act[:ncomp, iepoch, :] = np.dot(W_orig, src_loc_zero_mean.transpose()) #act[ncomp:, iepoch, :] = np.dot(W_orig, src_loc_zero_mean.transpose()) if tICA: temporal_envelope[iepoch, :, :] = act[:, iepoch, :].real else: # ------------------------------------------- # generate temporal profiles # ------------------------------------------- # apply inverse STFT to get temporal envelope fft_act[:, startfftind:(startfftind + nfreq)] = act[:, iepoch, :] temporal_envelope[iepoch, :, :] = fftpack.ifft(fft_act, n=win_ntsl, axis=1).real # ------------------------------------------- # average temporal envelope # ------------------------------------------- if not isinstance(temporal_envelope, list): temporal_envelope = [[temporal_envelope]] ntemp = len(temporal_envelope) temporal_envelope_mean = np.empty((ntemp, 0)).tolist() times = (np.arange(win_ntsl) / sfreq + tpre) # ------------------------------------------- # perform baseline correction # ------------------------------------------- for itemp in range(ntemp): for icomp in range(ncomp): temporal_envelope[itemp][0][:, icomp, :] = rescale(temporal_envelope[itemp][0][:, icomp, :], times, (None, 0), 'zscore') # ------------------------------------------- # estimate mean from temporal envelopes # ------------------------------------------- for itemp in range(ntemp): temporal_envelope_mean[itemp].append(np.mean(temporal_envelope[itemp][0], axis=0)[:, 5:-5]) # ------------------------------------------- # check if global scaling should be used # ------------------------------------------- # if not scale each component separately between -0.5 and 0.5 if not global_scaling: for icomp in range(ncomp): min_val = np.min([temporal_envelope_mean[0][0][icomp, :], temporal_envelope_mean[1][0][icomp, :]]) max_val = np.max([temporal_envelope_mean[0][0][icomp, :], temporal_envelope_mean[1][0][icomp, :]]) scale_fact = 1.0 / (max_val - min_val) for itemp in range(ntemp): temporal_envelope_mean[itemp][0][icomp, :] = np.clip( scale_fact * temporal_envelope_mean[itemp][0][icomp, :] - scale_fact * min_val - 0.5, -0.5, 0.5) # if global scaling should be used, scale all # data between -0.5 and 0.5 else: # scale temporal envelope between -0.5 and 0.5 min_val = np.min(temporal_envelope_mean) max_val = np.max(temporal_envelope_mean) scale_fact = 1.0 / (max_val - min_val) for itemp in range(ntemp): temporal_envelope_mean[itemp][0] = np.clip(scale_fact * temporal_envelope_mean[itemp][0] - scale_fact * min_val - 0.5, -0.5, 0.5) return temporal_envelope_mean, temporal_envelope # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # helper function to handle spatial profiles for plotting # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def _get_spatial_profiles(A_orig, keys, idx_text, vertno=[], subject='fsaverage', subjects_dir=None, labels=None, classification={}, percentile=97, mni_coord=[], add_foci=False, fnout=None): """ Helper function to get/generate the spatial profiles of the FourierICA components for plotting Parameters ---------- A_orig: array 2D-mixing-array (nvoxel, ncomp) estimated when applying FourierICA keys: list containing the group names idx_text: list containing the information in which brain hemisphere a component is mainly located (could be either 'both', 'left', 'right' or ' ' if no classification was performed before plotting) vertno: list list containing two arrays with the order of the vertices. If not given it will be generated in this routine subject: string string containing the subjects ID subjects_dir: string string containing the subjects directory path labels: list of strings names of the labels which should be plotted. Note, the prefix 'lh.' and the suffix '.label' are automatically added classification: dictionary classification object from the group_ica_object. It is a dictionary containing two sub-dictionaries 'lh' and 'rh' (for left and right hemisphere). In both sub-dictionaries the information about the groups is stored, i.e. a group/region name + the information which components are stored in this group percentile: integer value between 0 and 100 used to set a lower limit for the shown intensity range of the spatial plots mni_coord: list of strings if given the MNI coordinates are plotted beneath the spatial profiles add_foci: bool if True and the MNI coordinates are given a foci is plotted at the position of the MNI coordinate fnout: string or None if labels and classification is given the output filename of the brain plot containing all labels. If 'None' the results are not stored Return ------ temp_plot_dir: string directory where the spatial profiles are stored """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from matplotlib import gridspec as grd from matplotlib import pyplot as plt from mayavi import mlab from mne.source_estimate import _make_stc import numpy as np from os import environ, makedirs from os.path import exists, join import re from scipy import misc from surfer import set_log_level import types # set log level to 'WARNING' set_log_level('CRITICAL') import mayavi mayavi.mlab.options.offscreen = True # ------------------------------------------- # create temporary directory to save plots # of spatial profiles # ------------------------------------------- temp_plot_dir = join(subjects_dir, subject, 'temp_plots') if not exists(temp_plot_dir): makedirs(temp_plot_dir) # ------------------------------------------- # generate spatial profiles # (using magnitude and phase) # ------------------------------------------- if not subjects_dir: subjects_dir = environ.get('SUBJECTS_DIR') if isinstance(A_orig[0, 0], complex): A_orig_mag = np.abs(A_orig) else: A_orig_mag = A_orig nvoxel, ncomp = A_orig_mag.shape # ------------------------------------------- # check if vertno is given, otherwise # generate it # ------------------------------------------- if not np.any(vertno): vertno = [np.arange(nvoxel/2), np.arange(nvoxel/2)] # ------------------------------------------- # check if labels should be plotted and if # classification was already performed # --> if yes define some colors for the # labels # ------------------------------------------- if labels and classification: colors = ['green', 'red', 'cyan', 'yellow', 'mediumblue', 'magenta', 'chartreuse', 'indigo', 'sandybrown', 'slateblue', 'purple', 'lightpink', 'springgreen', 'orange', 'sienna', 'cadetblue', 'crimson', 'maroon', 'powderblue', 'deepskyblue', 'olive'] # ------------------------------------------- # loop over all components to generate # spatial profiles # ------------------------------------------- for icomp in range(ncomp): # ------------------------------------------- # plot spatial profile # ------------------------------------------- # generate stc-object from current component A_cur = A_orig_mag[:, icomp] src_loc = _make_stc(A_cur[:, np.newaxis], vertices=vertno, tmin=0, tstep=1, subject=subject) # define current range (Xth percentile) fmin = np.percentile(A_cur, percentile) fmax = np.max(A_cur) fmid = 0.5 * (fmin + fmax) clim = {'kind': 'value', 'lims': [fmin, fmid, fmax]} # plot spatial profiles brain = src_loc.plot(surface='inflated', hemi='split', subjects_dir=subjects_dir, config_opts={'cortex': 'bone'}, views=['lateral', 'medial'], time_label=' ', colorbar=False, clim=clim) # check if foci should be added to the plot if add_foci and np.any(mni_coord): for i_hemi in ['lh', 'rh']: mni_string = mni_coord[i_hemi][icomp] # if 'mni_string' is not empty (it might be empty if activity # can only be found in one hemisphere) plot a foci if mni_string != "": mni_float = list(map(float, re.findall("[-+]?\d*\.\d+|\d+", mni_string))) brain.add_foci(mni_float, coords_as_verts=False, hemi=i_hemi, color='chartreuse', scale_factor=1.5, map_surface='white') # ------------------------------------------- # check if labels should be plotted # ------------------------------------------- if labels and classification: # import module to read in labels from mne import read_label # get path to labels dir_labels = join(subjects_dir, subject, 'label') # identify in which group the IC is classified hemi = 'rh' if idx_text[icomp] == 'right' else 'lh' # read in the corresponding label for idx_key, key in enumerate(keys): if icomp in classification[hemi][key]: label_name = ".%s.label" % key color = colors[idx_key] break # loop over both hemispheres to read the label in and plot it hemi = ['lh', 'rh'] if idx_text[icomp] == 'both ' else [hemi] for hemi_cur in hemi: label = read_label(join(dir_labels, hemi_cur + label_name), subject=subject) brain.add_label(label, borders=False, hemi=hemi_cur, color=color, alpha=0.1) brain.add_label(label, borders=True, hemi=hemi_cur, color=color) # save results fn_base = "IC%02d_spatial_profile.png" % (icomp+1) fnout_img = join(temp_plot_dir, fn_base) brain.save_image(fnout_img) # close mlab figure mlab.close(all=True) # ------------------------------------------- # also generate one plot with all labels # ------------------------------------------- if labels and classification: # set clim in a way that no activity can be seen # (Note: we only want to see the labels) clim = {'kind': 'value', 'lims': [fmax, 1.5 * fmax, 2.0 * fmax]} # generate plot brain = src_loc.plot(surface='inflated', hemi='split', subjects_dir=subjects_dir, config_opts={'cortex': 'bone'}, views=['lateral', 'medial'], time_label=' ', colorbar=False, clim=clim, background='white') # loop over all labels for idx_key, key in enumerate(keys): label_name = ".%s.label" % key color = colors[idx_key] # loop over both hemispheres in order to plotting the labels for hemi in ['lh', 'rh']: label = read_label(join(dir_labels, hemi + label_name), subject=subject) brain.add_label(label, borders=False, hemi=hemi, color=color, alpha=0.6) # save results if fnout: fnout_img = '%s_labels.png' % fnout brain.save_image(fnout_img) # close mlab figure mlab.close(all=True) # ------------------------------------------- # now adjust the label plot appropriate # ------------------------------------------- # read spatial profile image spat_tmp = misc.imread(fnout_img) # rearrange image x_size, y_size, _ = spat_tmp.shape x_half, y_half = x_size / 2, y_size / 2 x_frame, y_frame = int(0.11 * x_half), int(0.01 * y_half) spatial_profile = np.concatenate((spat_tmp[x_frame:(x_half - x_frame), y_frame:(y_half - y_frame), :], spat_tmp[(x_half + x_frame):-x_frame, y_frame:(y_half - y_frame), :], spat_tmp[(x_half + x_frame):-x_frame, (y_half + y_frame):-y_frame, :], spat_tmp[x_frame:(x_half - x_frame), (y_half + y_frame):-y_frame, :]), axis=1) # plot image plt.ioff() fig = plt.figure('Labels plots', figsize=(17, 3)) gs = grd.GridSpec(1, 30, wspace=0.00001, hspace=0.00001, left=0.0, right=1.0, bottom=0.0, top=1.0) # set plot position and plot image p1 = fig.add_subplot(gs[0, 0:26]) p1.imshow(spatial_profile) adjust_spines(p1, []) # add label names keys_fac = 0.8/len(keys) keys_split = 0 p_text = fig.add_subplot(gs[0, 26:30]) keys_sort_idx = np.argsort(keys) for idx_key in range(len(keys)): key = keys[keys_sort_idx[idx_key]] # check if string should be split if len(key) > 21 and ' ' in key: p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key.split()[0]+'-', fontsize=13, color=colors[keys_sort_idx[idx_key]]) keys_split += 1 p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key.split()[1], fontsize=13, color=colors[keys_sort_idx[idx_key]]) else: p_text.text(0.0, 0.9-keys_fac*(idx_key+keys_split), key, fontsize=13, color=colors[keys_sort_idx[idx_key]]) adjust_spines(p_text, []) plt.savefig(fnout_img, dpi=300) # close plot and set plotting back to screen plt.close('FourierICA plots') plt.ion() mayavi.mlab.options.offscreen = False return temp_plot_dir # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # helper function to get spectral profiles for plotting # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def _get_spectral_profile(temporal_envelope, tpre, sfreq, flow, fhigh, bar_plot=False, use_multitaper=False): """ Helper function to get the spectral-profile of the temporal-envelopes of the FourierICA components for plotting Parameters ---------- temporal_envelope: list of arrays containing the temporal envelopes. tpre: float Lower border (in seconds) of the time-window used for generating/showing the epochs. If 'None' the value stored in 'fourier_ica_obj' is used sfreq: float Sampling frequency of the data flow: float or integer Lower frequency range for time frequency analysis fhigh: float or integer Upper frequency range for time frequency analysis bar_plot: boolean if set the number of time points for time-frequency estimation is reduced in order to save memory and computing-time use_multitaper: boolean If set 'multitaper' is usewd for time frequency analysis, otherwise 'stockwell' Return ------ average_power_all: list containing the averaged frequency power of all components freqs: array containing the frequencies used to calculate the frequency power vmin: lower frequency range for plotting vmax: upper frequency range for plotting """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from mne.baseline import rescale from mne.time_frequency._stockwell import _induced_power_stockwell import numpy as np # ------------------------------------------ # define some parameter # ------------------------------------------ ntemp = len(temporal_envelope) ncomp = temporal_envelope[0][0].shape[1] win_ntsl = temporal_envelope[0][0].shape[-1] average_power_all = np.empty((ntemp, 0)).tolist() vmin = np.zeros(ncomp) vmax = np.zeros(ncomp) # define some time parameter times = np.arange(win_ntsl) / sfreq + tpre idx_start = np.argmin(np.abs(times - tpre)) idx_end = np.argmin(np.abs(times - (tpre + win_ntsl/sfreq))) if bar_plot: decim = 10 else: decim = 1 # ------------------------------------------ # loop over all time courses, i.e. # conditions, and all components # ------------------------------------------ for itemp in range(ntemp): for icomp in range(ncomp): # extract some information from the temporal_envelope nepochs = temporal_envelope[itemp][0].shape[0] # ------------------------------------------ # perform time frequency analysis # ------------------------------------------ # prepare data for frequency analysis data_stockwell = temporal_envelope[itemp][0][:, icomp, idx_start:idx_end].\ reshape((nepochs, 1, idx_end-idx_start)) data_stockwell = data_stockwell.transpose([1, 0, 2]) # mirror data to reduce transient frequencies data_stockwell = np.concatenate((data_stockwell[:, :, 50:0:-1], data_stockwell, data_stockwell[:, :, -1:-51:-1]), axis=-1) n_fft = data_stockwell.shape[-1] # check if 'multitaper' or 'stockwell' should be # used for time-frequency analysis if use_multitaper: from mne.time_frequency.tfr import _compute_tfr n_cycle = 3.0 if (10.0 * n_cycle*sfreq)/(2.0 * np.pi * flow) > n_fft: flow *= ((10.0 * n_cycle*sfreq)/(2.0 * np.pi * flow))/n_fft flow = np.ceil(flow) freqs = np.arange(flow, fhigh) power_data = _compute_tfr(data_stockwell, freqs, sfreq=sfreq, use_fft=True, n_cycles=n_cycle, zero_mean=True, decim=decim, output='power', method='multitaper', time_bandwidth=10) else: power_data, _, freqs = _induced_power_stockwell(data_stockwell, sfreq=sfreq, fmin=flow, fmax=fhigh, width=0.6, decim=1, n_fft=n_fft, return_itc=False, n_jobs=4) # perform baseline correction (and remove mirrored parts from data) power_data = rescale(power_data[:, :, int(50/decim):-int(50/decim)], times[idx_start:idx_end][0:-1:decim], (None, 0), 'mean') average_power = np.mean(power_data, axis=0) # ------------------------------------------ # store all frequency data in one list # ------------------------------------------ average_power_all[itemp].append(average_power) # ------------------------------------------ # estimate frequency thresholds for plotting # ------------------------------------------ vmax[icomp] = np.max((np.percentile(average_power, 98), vmax[icomp])) vmin[icomp] = np.min((np.percentile(average_power, 2), vmin[icomp])) if np.abs(vmax[icomp]) > np.abs(vmin[icomp]): vmin[icomp] = - np.abs(vmax[icomp]) else: vmax[icomp] = np.abs(vmin[icomp]) return average_power_all, freqs, vmin, vmax # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ # plot results when Fourier ICA was applied in the # source space # +++++++++++++++++++++++++++++++++++++++++++++++++++++++ def plot_results_src_space(fourier_ica_obj, W_orig, A_orig, src_loc_data=[], temporal_envelope=[], # parameter for temporal profiles tpre=None, win_length_sec=None, tICA=False, vertno=[], subject='fsaverage', subjects_dir=None, # parameter for spatial profiles percentile=97, add_foci=True, classification={}, mni_coords=[], labels=None, flow=None, fhigh=None, bar_plot=False, # parameter for spectral profiles global_scaling=True, ncomp_per_plot=13, fnout=None, # general plotting parameter temp_profile_names=[]): """ Generate plot containing all results achieved by applying FourierICA in source space, i.e., plot spatial and spectral profiles. Parameters ---------- fourier_ica_obj: FourierICA object generated when applying jumeg.decompose.fourier_ica W_orig: array 2D-demixing-array (ncomp x nvoxel) estimated when applying FourierICA A_orig: array 2D-mixing-array (nvoxel, ncomp) estimated when applying FourierICA **** parameter for temporal profiles **** src_loc_data: array 3D array containing the source localization data used for FourierICA estimation (nfreq x nepochs x nvoxel). Only necessary if temporal_envelope is not given. default: src_loc_data=[] temporal_envelope: list of arrays containing the temporal envelopes. If not given the temporal envelopes are estimated here based on the 'src_loc_data' default: temporal_envelope=[] tpre: float Lower border (in seconds) of the time-window used for generating/showing the epochs. If 'None' the value stored in 'fourier_ica_obj' is used win_length_sec: float or None Length of the epoch window in seconds. If 'None' the value stored in 'fourier_ica_obj' is used tICA: boolean should be True if temporal ICA was applied default: tICA=False **** parameter for spatial profiles **** vertno: list list containing two arrays with the order of the vertices. If list is empty it will be automatically generated default: vertno=[] subject: string subjects ID default: subject='fsaverage' subjects_dir: string or None string containing the subjects directory path default: subjects_dir=None --> system variable SUBJETCS_DIR is used percentile: integer value between 0 and 100 used to set a lower limit for the shown intensity range of the spatial plots default: percentile=97 add_foci: bool if True and the MNI coordinates are given a foci is plotted at the position of the MNI coordinate default: add_foci=True classification: dictionary classification object from the group_ica_object. It is a dictionary containing two sub-dictionaries 'lh' and 'rh' (for left and right hemisphere). In both sub-dictionaries the information about the groups is stored, i.e. a group/region name + the information which components are stored in this group default: classification={} mni_coords: list of strings if given the MNI coordinates are plotted beneath the spatial profiles default: mni_coords=[] labels: list of strings names of the labels which should be plotted. Note, the prefix 'lh.' and the suffix '.label' are automatically added default: labels=None **** parameter for spectral profiles **** flow: float or integer Lower frequency range for time frequency analysis fhigh: float or integer Upper frequency range for time frequency analysis bar_plot: boolean If set the results of the time-frequency analysis are shown as bar plot. This option is recommended when FourierICA was applied to resting-state data default: bar_plot=False **** general plotting parameter **** global_scaling: bool If set spatial, spectral and temporal profiles are globally scaled. Otherwise each component is scaled individually default: global_scaling=True ncomp_per_plot: integer number of components per plot fnout: string default: fnout=None temp_profile_names: list of string The list should have the same number of elements as conditions were used to generate the temporal envelopes. The names given here are used as headline for the temporal profiles in the plot default: temp_profile_name=[] """ # ------------------------------------------ # import necessary modules # ------------------------------------------ from matplotlib import pyplot as plt from matplotlib import gridspec as grd from matplotlib.colors import Normalize import numpy as np from os import remove, rmdir from os.path import exists, join from scipy import misc # ------------------------------------------- # check input parameter # ------------------------------------------- if tpre == None: tpre = fourier_ica_obj.tpre if flow == None: flow = fourier_ica_obj.flow if not fhigh: fhigh = fourier_ica_obj.fhigh if not win_length_sec: win_length_sec = fourier_ica_obj.win_length_sec # check if either 'src_loc_data' or # 'temporal_envelope' is given, otherwise stop if src_loc_data == [] and temporal_envelope == []: print(">>> ERROR: you have either to provide the variable") print(">>> 'src_loc_data' or 'temporal_envelope'.") import pdb pdb.set_trace() # estimate/set some simple parameter sfreq = fourier_ica_obj.sfreq win_ntsl = int(np.floor(sfreq * win_length_sec)) ncomp, nvoxel = W_orig.shape ylim_temp = [-0.55, 0.55] time_range = [tpre, tpre + win_length_sec] # ------------------------------------------- # get temporal envelopes, or rather check if # temporal envelopes already exist or must # be calculated # ------------------------------------------- temporal_envelope_mean, temporal_envelope = \ _get_temporal_envelopes(fourier_ica_obj, W_orig, temporal_envelope=temporal_envelope, src_loc_data=src_loc_data, tICA=tICA, global_scaling=global_scaling, win_length_sec=win_length_sec, tpre=tpre, flow=flow) ntemp = len(temporal_envelope) # ------------------------------------------- # get MNI-coordinates of the FourierICA # components # ------------------------------------------- if not classification and not mni_coords and not labels: mni_coords, hemi_loc_txt, classification, labels = \ get_mni_coordinates(A_orig, subject=subject, subjects_dir=subjects_dir, percentile=percentile) # otherwise we only have to get the 'hemi_loc_txt' variable else: hemi_loc = np.array([int(i != '') for i in mni_coords['lh']]) hemi_loc += np.array([2*int(i != '') for i in mni_coords['rh']]) hemi_loc_txt = np.array([' '] * len(hemi_loc)) for idx, hemi_name in enumerate(['left ', 'right', 'both ']): idx_change = np.where(hemi_loc == (idx + 1.0))[0] hemi_loc_txt[idx_change] = hemi_name # check if classification was performed prior to plotting keys, key_borders, idx_sort = _check_classification(classification, ncomp) # ------------------------------------------- # get spatial profiles of all components # Note: This will take a while # ------------------------------------------- temp_plot_dir = _get_spatial_profiles(A_orig, keys, hemi_loc_txt, vertno=vertno, subject=subject, subjects_dir=subjects_dir, labels=labels, classification=classification, percentile=percentile, mni_coord=mni_coords, add_foci=add_foci, fnout=fnout) # ------------------------------------------- # get spectral profiles of all components # Note: This will take a while # ------------------------------------------- average_power_all, freqs, vmin, vmax = \ _get_spectral_profile(temporal_envelope, tpre, sfreq, flow, fhigh, bar_plot=bar_plot) # check if bar plot should be used # --> if yes estimate histogram data and normalize results if bar_plot: # generate an array to store the results freq_heights = np.zeros((ntemp, ncomp, len(freqs))) # loop over all conditions for i_power, average_power in enumerate(average_power_all): freq_heights[i_power, :, :] = np.sum(np.abs(average_power), axis=2) # normalize to a range between 0 and 1 freq_heights /= np.max(freq_heights) # ------------------------------------------ # now generate plot containing spatial, # spectral and temporal profiles # ------------------------------------------ # set some general parameter plt.ioff() nimg = int(np.ceil(ncomp/(1.0*ncomp_per_plot))) idx_key = 0 nplot = list(range(ncomp_per_plot, nimg*ncomp_per_plot, ncomp_per_plot)) nplot.append(ncomp) # generate image and its layout for plotting fig = plt.figure('FourierICA plots', figsize=(14 + ntemp * 8, 34)) n_keys = len(key_borders) if len(key_borders) > 0 else 1 gs = grd.GridSpec(ncomp_per_plot * 20 + n_keys * 10, 10 + ntemp * 8, wspace=0.1, hspace=0.05, left=0.04, right=0.96, bottom=0.04, top=0.96) # ------------------------------------------ # loop over the estimated number of images # ------------------------------------------ for iimg in range(nimg): # clear figure (to start with a white image in each loop) plt.clf() # estimate how many plots on current image istart_plot = int(ncomp_per_plot * iimg) # set idx_class parameter idx_class = 1 if key_borders == [] else 0 # ------------------------------------------ # loop over all components which should be # plotted on the current image # ------------------------------------------ for icomp in range(istart_plot, nplot[iimg]): # ---------------------------------------------- # check if key_boarders is set and should be # written on the image # ---------------------------------------------- if (icomp == istart_plot and key_borders != []) or \ ((icomp + 1) in key_borders): # adjust key-index if (icomp + 1) in key_borders: idx_key += 1 # add sub-plot with 'key_text' p_text = fig.add_subplot(gs[20 * (icomp - istart_plot) + idx_class * 10: \ 20 * (icomp - istart_plot) + 8 + idx_class * 10, 0:10]) p_text.text(0, 0, keys[idx_key-1], fontsize=25) adjust_spines(p_text, []) # adjust idx_class parameter idx_class += 1 # ---------------------------------------------- # plot spatial profiles # ---------------------------------------------- # read spatial profile image fn_base = "IC%02d_spatial_profile.png" % (idx_sort[icomp] + 1) fnin_img = join(temp_plot_dir, fn_base) spat_tmp = misc.imread(fnin_img) remove(fnin_img) # rearrange image x_size, y_size, _ = spat_tmp.shape x_half, y_half = x_size / 2, y_size / 2 x_frame, y_frame = int(0.11 * x_half), int(0.01 * y_half) spatial_profile = np.concatenate((spat_tmp[x_frame:(x_half - x_frame), y_frame:(y_half - y_frame), :], spat_tmp[(x_half + x_frame):-x_frame, y_frame:(y_half - y_frame), :], spat_tmp[(x_half + x_frame):-x_frame, (y_half + y_frame):-y_frame, :], spat_tmp[x_frame:(x_half - x_frame), (y_half + y_frame):-y_frame, :]), axis=1) # set plot position and plot image p1 = fig.add_subplot( gs[20 * (icomp - istart_plot) + idx_class * 10:20 * (icomp - istart_plot) + 15 + idx_class * 10, 0:10]) p1.imshow(spatial_profile) # set some plotting options p1.yaxis.set_ticks([]) p1.xaxis.set_ticks([]) y_name = "IC#%02d" % (idx_sort[icomp] + 1) p1.set_ylabel(y_name, fontsize=18) # ---------------------------------------------- # if given write MNI coordinates under the image # ---------------------------------------------- if np.any(mni_coords): # left hemisphere plt.text(120, 360, mni_coords['lh'][int(idx_sort[int(icomp)])], color="black", fontsize=18) # right hemisphere plt.text(850, 360, mni_coords['rh'][int(idx_sort[int(icomp)])], color="black", fontsize=18) # add location information of the component # --> if located in 'both', 'left' or 'right' hemisphere plt.text(-220, 100, hemi_loc_txt[int(idx_sort[int(icomp)])], color="red", fontsize=25, rotation=90) # ---------------------------------------------- # temporal/spectral profiles # ---------------------------------------------- # loop over all time courses for itemp in range(ntemp): # ---------------------------------------------- # if given plot a headline above the time # courses of each condition # ---------------------------------------------- if icomp == istart_plot and len(temp_profile_names): # add a sub-plot for the text p_text = fig.add_subplot(gs[(idx_class - 1) * 10: 6 + (idx_class - 1) * 12, (itemp) * 8 + 11:(itemp + 1) * 8 + 9]) # plot the text and adjust spines p_text.text(0, 0, " " + temp_profile_names[itemp], fontsize=30) adjust_spines(p_text, []) # set plot position if bar_plot: p2 = plt.subplot( gs[20 * (icomp - istart_plot) + idx_class * 11:20 * (icomp - istart_plot) + 13 + idx_class * 10, itemp * 8 + 11:(itemp + 1) * 8 + 9]) else: p2 = plt.subplot( gs[20 * (icomp - istart_plot) + idx_class * 10:20 * (icomp - istart_plot) + 15 + idx_class * 10, itemp * 8 + 11:(itemp + 1) * 8 + 9]) # extract temporal plotting information times = (np.arange(win_ntsl) / sfreq + tpre)[5:-5] idx_start = np.argmin(np.abs(times - time_range[0])) idx_end = np.argmin(np.abs(times - time_range[1])) # ---------------------------------------------- # plot spectral profile # ---------------------------------------------- # check if global scaling should be used if global_scaling: vmin_cur, vmax_cur = np.min(vmin), np.max(vmax) else: vmin_cur, vmax_cur = vmin[icomp], vmax[icomp] # show spectral profile if bar_plot: plt.bar(freqs, freq_heights[itemp, int(idx_sort[icomp]), :], width=1.0, color='cornflowerblue') plt.xlim(flow, fhigh) plt.ylim(0.0, 1.0) # set some parameter p2.set_xlabel("freq. [Hz]") p2.set_ylabel("ampl. [a.u.]") # ---------------------------------------------- # plot temporal profile on the some spot # ---------------------------------------------- ax = plt.twiny() ax.set_xlabel("time [s]") ax.plot(times[idx_start:idx_end], 0.5+temporal_envelope_mean[itemp][0][int(idx_sort[icomp]), idx_start:idx_end], color='red', linewidth=3.0) ax.set_xlim(times[idx_start], times[idx_end]) ax.set_ylim(0.0, 1.0) else: average_power = average_power_all[itemp][int(idx_sort[icomp])] extent = (times[idx_start], times[idx_end], freqs[0], freqs[-1]) p2.imshow(average_power, extent=extent, aspect="auto", origin="lower", picker=False, cmap='RdBu_r', vmin=vmin_cur, vmax=vmax_cur) # set some parameter p2.set_xlabel("time [s]") p2.set_ylabel("freq. [Hz]") # ---------------------------------------------- # plot temporal profile on the some spot # ---------------------------------------------- ax = plt.twinx() ax.set_xlim(times[idx_start], times[idx_end]) ax.set_ylim(ylim_temp) ax.set_ylabel("ampl. [a.u.]") ax.plot(times[idx_start:idx_end], temporal_envelope_mean[itemp][0][int(idx_sort[icomp]), idx_start:idx_end], color='black', linewidth=3.0) # ---------------------------------------------- # finally plot a color bar # ---------------------------------------------- if not bar_plot: # first normalize the color table norm = Normalize(vmin=np.round(vmin_cur, 2), vmax=np.round(vmax_cur, 2)) sm = plt.cm.ScalarMappable(cmap='RdBu_r', norm=norm) sm.set_array(np.linspace(vmin_cur, 1.0)) # estimate position of the color bar xpos = 0.405 + 0.5/(ntemp + 1.0) if n_keys > 1: cbaxes = fig.add_axes([xpos, 0.135, 0.2, 0.006]) else: cbaxes = fig.add_axes([xpos, 0.03, 0.2, 0.006]) ticks_fac = (vmax_cur - vmin_cur) * 0.3333 ticks = np.round([vmin_cur, vmin_cur + ticks_fac, vmax_cur - ticks_fac, vmax_cur], 2) # ticks = [-1.0, -0.5, 0.0, 0.5, 1.0] # now plot color bar cb = plt.colorbar(sm, ax=p2, cax=cbaxes, use_gridspec=False, orientation='horizontal', ticks=ticks, format='%1.2g') cb.ax.tick_params(labelsize=18) # ---------------------------------------------- # save image # ---------------------------------------------- if fnout: fnout_complete = '%s_%02d.png' % (fnout, iimg + 1) plt.savefig(fnout_complete, format='png', dpi=300) # close plot and set plotting back to screen plt.close('FourierICA plots') plt.ion() # remove temporary directory for # spatial profile plots if exists(temp_plot_dir): rmdir(temp_plot_dir) return mni_coords, classification, labels
bsd-3-clause
RTHMaK/RPGOne
scipy-2017-sklearn-master/notebooks/helpers.py
3
5032
import numpy as np from collections import defaultdict import os from sklearn.model_selection import StratifiedShuffleSplit from sklearn.feature_extraction import DictVectorizer # Can also use pandas! def process_titanic_line(line): # Split line on "," to get fields without comma confusion vals = line.strip().split('",') # replace spurious " characters vals = [v.replace('"', '') for v in vals] pclass = int(vals[0]) survived = int(vals[1]) name = str(vals[2]) sex = str(vals[3]) try: age = float(vals[4]) except ValueError: # Blank age age = -1 sibsp = float(vals[5]) parch = int(vals[6]) ticket = str(vals[7]) try: fare = float(vals[8]) except ValueError: # Blank fare fare = -1 cabin = str(vals[9]) embarked = str(vals[10]) boat = str(vals[11]) homedest = str(vals[12]) line_dict = {'pclass': pclass, 'survived': survived, 'name': name, 'sex': sex, 'age': age, 'sibsp': sibsp, 'parch': parch, 'ticket': ticket, 'fare': fare, 'cabin': cabin, 'embarked': embarked, 'boat': boat, 'homedest': homedest} return line_dict def load_titanic(test_size=.25, feature_skip_tuple=(), random_state=1999): f = open(os.path.join('datasets', 'titanic', 'titanic3.csv')) # Remove . from home.dest, split on quotes because some fields have commas keys = f.readline().strip().replace('.', '').split('","') lines = f.readlines() f.close() string_keys = ['name', 'sex', 'ticket', 'cabin', 'embarked', 'boat', 'homedest'] string_keys = [s for s in string_keys if s not in feature_skip_tuple] numeric_keys = ['pclass', 'age', 'sibsp', 'parch', 'fare'] numeric_keys = [n for n in numeric_keys if n not in feature_skip_tuple] train_vectorizer_list = [] test_vectorizer_list = [] n_samples = len(lines) numeric_data = np.zeros((n_samples, len(numeric_keys))) numeric_labels = np.zeros((n_samples,), dtype=int) # Doing this twice is horribly inefficient but the file is small... for n, l in enumerate(lines): line_dict = process_titanic_line(l) strings = {k: line_dict[k] for k in string_keys} numeric_labels[n] = line_dict["survived"] sss = StratifiedShuffleSplit(n_iter=1, test_size=test_size, random_state=12) # This is a weird way to get the indices but it works train_idx = None test_idx = None for train_idx, test_idx in sss.split(numeric_data, numeric_labels): pass for n, l in enumerate(lines): line_dict = process_titanic_line(l) strings = {k: line_dict[k] for k in string_keys} if n in train_idx: train_vectorizer_list.append(strings) else: test_vectorizer_list.append(strings) numeric_data[n] = np.asarray([line_dict[k] for k in numeric_keys]) train_numeric = numeric_data[train_idx] test_numeric = numeric_data[test_idx] train_labels = numeric_labels[train_idx] test_labels = numeric_labels[test_idx] vec = DictVectorizer() # .toarray() due to returning a scipy sparse array train_categorical = vec.fit_transform(train_vectorizer_list).toarray() test_categorical = vec.transform(test_vectorizer_list).toarray() train_data = np.concatenate([train_numeric, train_categorical], axis=1) test_data = np.concatenate([test_numeric, test_categorical], axis=1) keys = numeric_keys + string_keys return keys, train_data, test_data, train_labels, test_labels FIELDNAMES = ('polarity', 'id', 'date', 'query', 'author', 'text') def read_sentiment_csv(csv_file, fieldnames=FIELDNAMES, max_count=None, n_partitions=1, partition_id=0): import csv # put the import inside for use in IPython.parallel def file_opener(csv_file): try: open(csv_file, 'r', encoding="latin1").close() return open(csv_file, 'r', encoding="latin1") except TypeError: # Python 2 does not have encoding arg return open(csv_file, 'rb') texts = [] targets = [] with file_opener(csv_file) as f: reader = csv.DictReader(f, fieldnames=fieldnames, delimiter=',', quotechar='"') pos_count, neg_count = 0, 0 for i, d in enumerate(reader): if i % n_partitions != partition_id: # Skip entry if not in the requested partition continue if d['polarity'] == '4': if max_count and pos_count >= max_count / 2: continue pos_count += 1 texts.append(d['text']) targets.append(1) elif d['polarity'] == '0': if max_count and neg_count >= max_count / 2: continue neg_count += 1 texts.append(d['text']) targets.append(-1) return texts, targets
apache-2.0
spennihana/h2o-3
h2o-py/tests/testdir_jira/pyunit_pubdev_5265.py
2
2111
import numpy as numpy from pandas import DataFrame import h2o import warnings from h2o.estimators import H2OGeneralizedLinearEstimator from tests import pyunit_utils def pubdev_5265(): training_data = { 'response': ['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C'], 'explanatory': ['nan', 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3] } test_data = { 'response': ['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C'], 'explanatory': ['nan', 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4] } training_data = h2o.H2OFrame(training_data) training_data['explanatory'] = training_data['explanatory'].asfactor() test_data = h2o.H2OFrame(test_data) test_data['explanatory'] = test_data['explanatory'].asfactor() glm_estimator = H2OGeneralizedLinearEstimator(family="multinomial", missing_values_handling="MeanImputation", seed=1234, Lambda=0) glm_estimator.train(x=["explanatory"], y="response", training_frame=training_data) # Training on the given dataset should not fail if there is a missing categorical variable (present in training dataset) with warnings.catch_warnings(record=True) as w: grouped_occurances = glm_estimator.predict(test_data=test_data).group_by((0)).count().get_frame() \ .as_data_frame() assert "Test/Validation dataset column 'explanatory' has levels not trained on: [4]" in str(w[-1].message) # The very first value corresponding to 'A' in the explanatory variable column should be replaced by the mode value, which is 3. # As a result, 8 occurances of type C should be predicted grouped_occurances.as_matrix().tolist() == [['A', 4], ['B', 6], ['C', 8]] if __name__ == "__main__": pyunit_utils.standalone_test(pubdev_5265) else: pubdev_5265()
apache-2.0
iiSeymour/pandashells
pandashells/test/p_df_test.py
1
5654
#! /usr/bin/env python import os import subprocess import tempfile from mock import patch, MagicMock from unittest import TestCase import pandas as pd try: from StringIO import StringIO except ImportError: # pragma nocover from io import StringIO from pandashells.bin.p_df import ( needs_plots, get_modules_and_shortcuts, framify, process_command, ) class NeedsPlots(TestCase): def test_doesnt_need_plots(self): command_list = ['df.reset_index()', 'df.head()'] self.assertFalse(needs_plots(command_list)) def test_needs_plots(self): command_list = ['set_xlim([1, 2])'] self.assertTrue(needs_plots(command_list)) class GetModulesAndShortcutsTests(TestCase): def test_no_extra_needed(self): command_list = ['df.reset_index()', 'df.head()'] self.assertEqual( set(get_modules_and_shortcuts(command_list)), { ('pandas', 'pd'), ('dateutil', 'dateutil'), } ) def test_get_extra_import_all_needed(self): command_list = [ 'pl.plot(df.x)', 'sns.distplot(df.x)', 'scp.stats.norm(1, 1)', 'np.random.randn(1)' ] self.assertEqual( set(get_modules_and_shortcuts(command_list)), { ('dateutil', 'dateutil'), ('pandas', 'pd'), ('scipy', 'scp'), ('pylab', 'pl'), ('seaborn', 'sns'), ('numpy', 'np'), }, ) class FramifyTests(TestCase): def test_dataframe_to_dataframe(self): cmd = '' df = pd.DataFrame([{'a': 1}]) out = framify(cmd, df) self.assertTrue(isinstance(out, pd.DataFrame)) def test_series_to_dataframe(self): cmd = '' df = pd.Series({'a': 1}) out = framify(cmd, df) self.assertTrue(isinstance(out, pd.DataFrame)) def test_list_to_dataframe(self): cmd = '' df = [1, 2, 3] out = framify(cmd, df) self.assertTrue(isinstance(out, pd.DataFrame)) @patch('pandashells.bin.p_df.sys') def test_number_to_dataframe(self, sys_mock): cmd = '' df = 7 sys_mock.stderr = MagicMock(write=MagicMock()) sys_mock.exit = MagicMock() framify(cmd, df) self.assertTrue(sys_mock.stderr.write.called) self.assertTrue(sys_mock.exit.called) class ProcessCommandTests(TestCase): def setUp(self): self.df = pd.DataFrame([ {'a': 1, 'b': 10}, {'a': 2, 'b': 20}, {'a': 3, 'b': 30}, {'a': 4, 'b': 40}, ]) def test_col_assignement(self): args = MagicMock() cmd = 'df["c"] = 2 * df["a"]' df = process_command(args, cmd, self.df) self.assertEqual(df.c.iloc[0], 2) @patch('pandashells.bin.p_df.sys') @patch('pandashells.bin.p_df.exec_plot_command') def test_plot_needed(self, exec_plot_mock, sys_mock): args = MagicMock() sys_mock.exit = MagicMock() cmd = 'df.plot(x="a", y="b")' process_command(args, cmd, self.df) self.assertTrue(exec_plot_mock.called) self.assertTrue(sys_mock.exit.called) def test_regular_command(self): args = MagicMock() cmd = 'df.a.value_counts()' df = process_command(args, cmd, self.df) self.assertEqual(set(df.index), {1, 2, 3, 4}) self.assertEqual(set(df[0]), {1}) class IntegrationTests(TestCase): def setUp(self): self.df = pd.DataFrame([ {'a': 1, 'b': 10}, {'a': 2, 'b': 20}, {'a': 3, 'b': 30}, {'a': 4, 'b': 40}, ]) def get_command_result(self, cmd, as_table=False): p = subprocess.Popen( ['bash', '-c', cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if as_table: stdout, stderr = p.communicate( self.df.to_string(index=False).encode('utf-8')) else: stdout, stderr = p.communicate( self.df.to_csv(index=False).encode('utf-8')) return stdout.decode('utf-8').strip() def test_no_command(self): cmd = 'p.df' df = pd.read_csv(StringIO(self.get_command_result(cmd))) self.assertEqual(list(df.a), [1, 2, 3, 4]) def test_names(self): cmd = 'p.df --names x y' df = pd.read_csv(StringIO(self.get_command_result(cmd))) self.assertEqual(list(df.columns), ['x', 'y']) def test_multiple_commands(self): cmd = """p.df 'df["y"] = -df.y' 'df["z"] = df["y"]' --names x y""" df = pd.read_csv(StringIO(self.get_command_result(cmd))) self.assertTrue(all(df.z < 0)) def test_input_table(self): cmd = 'p.df -i table' df = pd.read_csv(StringIO( self.get_command_result(cmd, as_table=True))) self.assertEqual(list(df.columns), ['a', 'b']) def test_output_table(self): cmd = 'p.df -o table' df = pd.read_csv( StringIO(self.get_command_result(cmd)), delimiter=r'\s+') self.assertEqual(list(df.columns), ['a', 'b']) def test_plotting(self): dir_name = tempfile.mkdtemp() file_name = os.path.join(dir_name, 'deleteme.png') cmd = """p.df 'df.plot(x="a", y="b")' --savefig {}""".format(file_name) self.get_command_result(cmd) file_existed = os.path.isfile(file_name) os.system('rm -rf {}'.format(dir_name)) self.assertTrue(file_existed)
bsd-2-clause
vondrejc/FFTHomPy
examples/lowRankTensorApproximations/plots.py
1
28051
import pickle import matplotlib as mpl import matplotlib.pyplot as plt import os from examples.lowRankTensorApproximations.fig_pars import set_labels, set_pars os.nice(19) def save_experiment_settings(kind_list, Ns, kinds, sol_rank_range_set, material_list, data_folder='data_for_plot'): if not os.path.exists('{}'.format(data_folder)): os.makedirs('{}/'.format(data_folder)) for dim in [2, 3]: for material in material_list: if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder, dim, material)): os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder, dim, material)) pickle.dump(kind_list, open("{}/kind_list.p".format(data_folder), "wb")) pickle.dump(Ns, open("{}/Ns.p".format(data_folder), "wb")) pickle.dump(kinds, open("{}/kinds.p".format(data_folder), "wb")) pickle.dump(sol_rank_range_set, open("{}/sol_rank_range_set.p".format(data_folder), "wb")) pickle.dump(material_list, open("{}/material_list.p".format(data_folder), "wb")) return def load_experiment_settings(data_folder='data_for_plot'): material_list = pickle.load(open("{}/material_list.p".format(data_folder), "rb")) sol_rank_range_set = pickle.load(open("{}/sol_rank_range_set.p".format(data_folder), "rb")) kinds = pickle.load(open("{}/kinds.p".format(data_folder), "rb")) Ns = pickle.load(open("{}/Ns.p".format(data_folder), "rb")) kind_list = pickle.load(open("{}/kind_list.p".format(data_folder), "rb")) solver = 'mr' return material_list, sol_rank_range_set, kinds, Ns, kind_list, solver def plot_error(): data_folder = "data_for_plot/error" material_list, sol_rank_range_set, kinds, Ns, kind_list, solver = load_experiment_settings( data_folder=data_folder) ylimit = [10**-11, 10**0] xlabel = 'rank of solution' ylabel = 'relative error' for dim in [2]: N = max(Ns['{}'.format(dim)]) xlimend = max(sol_rank_range_set['{}'.format(dim)]) if not os.path.exists('figures'): os.makedirs('figures') ##### BEGIN: figure 1 resiguum(solution rank) ########### for material in material_list: parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) plt.ylabel('relative error') plt.xlabel('rank of solution') sol_rank_range = sol_rank_range_set['{}'.format(dim)] i = 0 for kind in kinds['{}'.format(dim)]: sols_Ga = pickle.load(open("{}/dim_{}/mat_{}/sols_Ga_{}.p".format(data_folder, dim, material, N), "rb")) sols_GaNi = pickle.load( open("{}/dim_{}/mat_{}/sols_GaNi_{}.p".format(data_folder, dim, material, N), "rb")) sols_Ga_Spar = pickle.load( open("{}/dim_{}/mat_{}/sols_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) sols_GaNi_Spar = pickle.load( open("{}/dim_{}/mat_{}/sols_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) plt.semilogy(sol_rank_range, [abs((sols_Ga_Spar[i]-sols_Ga[1])/sols_Ga[1]) for i in range(len(sols_Ga_Spar))], lines['Ga_{}'.format(kind_list[kind])][i], label=labels['Ga{}'.format(kind_list[kind])], markevery=1) plt.semilogy(sol_rank_range, [abs((sols_GaNi_Spar[i]-sols_GaNi[1])/sols_GaNi[1]) for i in range(len(sols_GaNi_Spar))], lines['GaNi_{}'.format(kind_list[kind])][i], label=labels['GaNi{}'.format(kind_list[kind])], markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None') i = i + 1 ax = plt.gca() plt.xlabel(xlabel) ax.set_xlim([0, xlimend]) ax.set_ylim(ylimit) plt.xticks(sol_rank_range) plt.ylabel(ylabel) plt.legend(loc='best') fname = src + 'Error_dim{}_mat{}_{}_N{}{}'.format(dim, material, solver, N, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END plot errors 2D') ##### END: figure 1 resiguum(solution rank) ########### for dim in [3]: N = max(Ns['{}'.format(dim)]) xlimend = max(sol_rank_range_set['{}'.format(dim)]) if not os.path.exists('figures'): os.makedirs('figures') ##### BEGIN: figure 1 resiguum(solution rank) ########### for material in material_list: parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) sol_rank_range = sol_rank_range_set['{}'.format(dim)] i = 0 for kind in kinds['{}'.format(dim)]: sols_Ga = pickle.load( open("data_for_plot/error/dim_{}/mat_{}/sols_Ga_{}.p".format(dim, material, N), "rb")) sols_GaNi = pickle.load( open("{}/dim_{}/mat_{}/sols_GaNi_{}.p".format(data_folder, dim, material, N), "rb")) sols_Ga_Spar = pickle.load( open("{}/dim_{}/mat_{}/sols_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) sols_GaNi_Spar = pickle.load( open("{}/dim_{}/mat_{}/sols_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) plt.semilogy(sol_rank_range, [abs((sols_Ga_Spar[i]-sols_Ga[1])/sols_Ga[1]) for i in range(len(sols_Ga_Spar))], lines['Ga_{}'.format(kind_list[kind])][i], label=labels['Ga{}'.format(kind_list[kind])], markevery=1) plt.semilogy(sol_rank_range, [abs((sols_GaNi_Spar[i]-sols_GaNi[1])/sols_GaNi[1]) for i in range(len(sols_Ga_Spar))], lines['GaNi_{}'.format(kind_list[kind])][i], label=labels['GaNi{}'.format(kind_list[kind])], markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None') ax = plt.gca() plt.xlabel(xlabel) ax.set_xlim([0, xlimend]) plt.xticks(sol_rank_range) ax.set_ylim(ylimit) plt.ylabel(ylabel) plt.legend(loc='best') fname=src+'Error_dim{}_mat{}_{}_N{}{}'.format(dim, material, solver, N, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END plot errors 3D') ##### END: figure 1 resiguum(solution rank) ########### def plot_memory(): material_list, sol_rank_range_set, kinds, Ns, kind_list, solver = load_experiment_settings() xlabel = 'rank of solution' ylabel = 'memory efficiency' if not os.path.exists('figures'): os.makedirs('figures') for dim in [2]: sol_rank_range = sol_rank_range_set['{}'.format(dim)] N = max(Ns['{}'.format(dim)]) xlimend = max(sol_rank_range_set['{}'.format(dim)]) ##### BEGIN: figure 2 Memory efficiency ########### for material in material_list: parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) mem_GaNi = pickle.load(open("data_for_plot/dim_{}/mat_{}/mem_GaNi_{}.p".format(dim, material, N, ), "rb")) plt.semilogy(sol_rank_range, mem_GaNi, lines['full'], label=labels['GaNi{}'.format(('full'))], markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None') for N in Ns['{}'.format(dim)]: i = 0 for kind in kinds['{}'.format(dim)]: mem_GaNi_Spar = pickle.load(open( "data_for_plot/dim_{}/mat_{}/mem_GaNi_Spar_{}_{}_{}.p".format(dim, material, kind, N, solver), "rb")) plt.semilogy(sol_rank_range, mem_GaNi_Spar, lines['mem_{}'.format(kind_list[kind])][i], label='{}{}'.format(labels['GaNi{}N'.format(kind_list[kind])], N), markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None') i = i + 1 ax = plt.gca() plt.xticks(sol_rank_range) plt.xlabel(xlabel) ax.set_xlim([0, xlimend]) plt.ylabel(ylabel) plt.legend(loc='best') fname=src+'Memory_dim{}_mat{}{}'.format(dim, material, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END plot memory') ##### END: figure 2 memory ########### for dim in [3]: sol_rank_range = sol_rank_range_set['{}'.format(dim)] N = max(Ns['{}'.format(dim)]) xlimend = max(sol_rank_range_set['{}'.format(dim)]) ##### BEGIN: figure 2 Memory efficiency ########### for material in material_list: plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) mem_GaNi=pickle.load(open("data_for_plot/dim_{}/mat_{}/mem_GaNi_{}.p".format(dim, material, N), "rb")) plt.semilogy(sol_rank_range, mem_GaNi, lines['full'], label=labels['GaNi{}'.format(('full'))], markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None') for kind in kinds['{}'.format(dim)]: i = 0 for N in [max(Ns['{}'.format(dim)]), min(Ns['{}'.format(dim)])]: mem_GaNi_Spar = pickle.load(open( "data_for_plot/dim_{}/mat_{}/mem_GaNi_Spar_{}_{}_{}.p".format(dim, material, kind, N, solver), "rb")) plt.semilogy(sol_rank_range, mem_GaNi_Spar, lines['mem_{}'.format(kind_list[kind])][i], label='{}{}'.format(labels['GaNi{}N'.format(kind_list[kind])], N), markevery=1, markersize=7, markeredgewidth=1, markerfacecolor='None') i = i + 1 ax = plt.gca() plt.xticks(sol_rank_range) plt.xlabel(xlabel) ax.set_xlim([0, xlimend]) plt.ylabel(ylabel) plt.legend(loc='best') fname = src + 'Memory_dim{}_mat{}{}'.format(dim, material, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END plot memory') ##### END: figure 2 memory ########### def plot_residuals(): data_folder = "data_for_plot/residua" material_list, sol_rank_range_set, kinds, Ns, kind_list, solver=load_experiment_settings( data_folder=data_folder) xlabel = 'iteration' ylabel = 'norm of residuum' iter_rank_range_set = [1, 5, 10, 15, 20, 30, 40, 50] if not os.path.exists('figures'): os.makedirs('figures') for dim in [2]: xlimit = [0, 30] ylimit = [10**-7, 10**-1] for N in Ns['{}'.format(dim)]: ##### BEGIN: figure 5.1 Residuum for GA solution ########### for material in material_list: for kind in kinds['{}'.format(dim)]: # plt.figure(1).clear() parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ sol_rank_range = sol_rank_range_set['{}'.format(dim)] plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) res_Ga_Spar = pickle.load(open( "{}/dim_{}/mat_{}/res_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) for sol_rank in range(0, len(sol_rank_range)): plt.semilogy(list(range(len(res_Ga_Spar[sol_rank]))), res_Ga_Spar[sol_rank], lines['Ga'][sol_rank], label='{} {}'.format(labels['Garank'], sol_rank_range[sol_rank]), markevery=2) ax = plt.gca() plt.xticks(iter_rank_range_set) plt.xlabel(xlabel) ax.set_xlim(xlimit) plt.ylabel(ylabel) ax.set_ylim(ylimit) plt.legend(loc='upper right') fname = src + 'Residuum_dim{}_mat{}_kind_{}_Ga_{}_N{}{}'.format(dim, material, kind_list[kind], solver, N, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END Ga 2D residuum N={}'.format(N)) ##### END: figure 5.1 Residuum for Ga solution ########### ##### BEGIN: figure 5.2 Residuum for GaNi solution ########### for material in material_list: for kind in kinds['{}'.format(dim)]: parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ sol_rank_range = sol_rank_range_set['{}'.format(dim)] plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) res_GaNi_Spar = pickle.load( open("{}/dim_{}/mat_{}/res_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) plt.xticks(iter_rank_range_set) for sol_rank in range(0, len(sol_rank_range)): plt.semilogy(list(range(len(res_GaNi_Spar[sol_rank]))), res_GaNi_Spar[sol_rank], lines['GaNi'][sol_rank], label='{} {}'.format(labels['GaNirank'], sol_rank_range[sol_rank]), markevery=2, markersize=7, markeredgewidth=1, markerfacecolor='None') ax = plt.gca() plt.xticks(iter_rank_range_set) plt.xlabel(xlabel) ax.set_xlim(xlimit) plt.ylabel(ylabel) ax.set_ylim(ylimit) plt.legend(loc='upper right') fname = src + 'Residuum_dim{}_mat{}_kind_{}_GaNi_{}_N{}{}'.format(dim, material, kind_list[kind], solver, N, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END GaNi 2D residuum N={}'.format(N)) ##### END: figure 5.2 Residuum for GaNi solution ########### for dim in [3]: xlimit = [0, 30] ylimit = [10**-7, 10**-1] for N in Ns['{}'.format(dim)]: ##### 0 material ########### for material in material_list: for kind in kinds['{}'.format(dim)]: parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ sol_rank_range = sol_rank_range_set['{}'.format(dim)] plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) res_Ga_Spar = pickle.load( open( "{}/dim_{}/mat_{}/res_Ga_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) for sol_rank in range(0, len( sol_rank_range)): plt.semilogy(list(range(len(res_Ga_Spar[sol_rank]))), res_Ga_Spar[sol_rank], lines['Ga'][sol_rank], label='{} {}'.format(labels['Garank'], sol_rank_range[sol_rank]), markevery=2) ax = plt.gca() plt.xticks(iter_rank_range_set) plt.xlabel(xlabel) ax.set_xlim(xlimit) plt.ylabel(ylabel) ax.set_ylim(ylimit) plt.legend(loc='best') fname = src + 'Residuum_dim{}_mat{}_kind_{}_Ga_{}_N{}{}'.format(dim, material, kind_list[kind], solver, N, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END Ga 3D residuum N={} mat {}'.format(N, material)) ##### END: figure 5.1 Residuum for Ga solution ########### ##### BEGIN: figure 5.2 Residuum for GaNi solution ########### for material in material_list: for kind in kinds['{}'.format(dim)]: parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' # source folder\ sol_rank_range = sol_rank_range_set['{}'.format(dim)] plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) # plt.hold(True) res_GaNi_Spar = pickle.load( open("{}/dim_{}/mat_{}/res_GaNi_Spar_{}_{}_{}.p".format(data_folder, dim, material, kind, N, solver), "rb")) for sol_rank in range(0, len(sol_rank_range)): # range(len(sol_rank_range)): plt.semilogy(list(range(len(res_GaNi_Spar[sol_rank]))), res_GaNi_Spar[sol_rank], lines['GaNi'][sol_rank], label='{} {}'.format(labels['GaNirank'], sol_rank_range[sol_rank]), markevery=2, markersize=7, markeredgewidth=1, markerfacecolor='None') ax = plt.gca() plt.xticks(iter_rank_range_set) plt.xlabel(xlabel) ax.set_xlim(xlimit) plt.ylabel(ylabel) ax.set_ylim(ylimit) lg=plt.legend(loc='upper right') fname=src+'Residuum_dim{}_mat{}_kind_{}_GaNi_{}_N{}{}'.format(dim, material, kind_list[kind], solver, N, '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END GaNi 3D residuum N={} mat {}'.format(N, material)) ##### END: figure 5.2 Residuum for GaNi solution ########### def plot_time(): data_folder = "data_for_plot/time" kind_list = ['cano', 'tucker', 'tt'] kinds = {'2': 0, '3': 2, } for material in [0, 3]: for dim in [2, 3]: kind = kinds['{}'.format(dim)] xlabel = 'number of points - $ N $' ylabel = 'time cost [s]' if not os.path.exists('figures'): os.makedirs('figures') parf = set_pars(mpl) lines, labels=set_labels() src='figures/' plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) N_list = pickle.load( open("{}/dim_{}/mat_{}/N_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb")) full_time_list = pickle.load( open("{}/dim_{}/mat_{}/full_time_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb")) sparse_time_list = pickle.load( open("{}/dim_{}/mat_{}/sparse_time_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb")) plt.plot(N_list, full_time_list, lines['Gafull'], label='full', markevery=1, markerfacecolor='None') plt.plot(N_list, sparse_time_list, lines['GaSparse'], label='low-rank', markevery=1) ax = plt.gca() plt.xlabel(xlabel) plt.ylabel(ylabel) xlimit = [0, N_list[-1] + N_list[-1]/20] ylimit = [0 - full_time_list[-1]*0.05, full_time_list[-1]*1.05] ax.set_xlim(xlimit) ax.set_ylim(ylimit) plt.legend(loc='upper left') fname=src+'time_efficiency_dim{}_mat{}_{}{}'.format(dim, material, kind_list[kind], '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END Ga time efficiency') for material in [2, 4]: kind_list = ['cano', 'tucker', 'tt'] kinds = {'2': 0, '3': 2, } for dim in [2, 3]: kind = kinds['{}'.format(dim)] xlabel = 'number of points - $ N $' ylabel = 'time cost [s]' if not os.path.exists('figures'): os.makedirs('figures') parf = set_pars(mpl) lines, labels = set_labels() src = 'figures/' plt.figure(num=None, figsize=parf['figsize'], dpi=parf['dpi']) N_list = pickle.load( open("{}/dim_{}/mat_{}/N_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb")) full_time_list = pickle.load( open("{}/dim_{}/mat_{}/full_time_list_{}.p".format(data_folder, dim, material, kind_list[kind]), "rb")) sparse_time_list_1 = pickle.load( open("{}/dim_{}/mat_{}/sparse_time_list_{}_1e-03.p".format(data_folder, dim, material, kind_list[kind]), "rb")) sparse_time_list_4 = pickle.load( open("{}/dim_{}/mat_{}/sparse_time_list_{}_1e-06.p".format(data_folder, dim, material, kind_list[kind]), "rb")) plt.plot(N_list, full_time_list, lines['Gafull'], label='full', markevery=1, markerfacecolor='None') plt.plot(N_list, sparse_time_list_1, lines['GaSparse'], label='low-rank, err $<$ 1e-3', markevery=1) plt.plot(N_list, sparse_time_list_4, lines['GaSparse_4'], label='low-rank, err $<$ 1e-6', markevery=1) ax = plt.gca() plt.xlabel(xlabel) plt.ylabel(ylabel) xlimit = [0, N_list[-1] + N_list[-1]/20] ylimit = [0 - full_time_list[-1]*0.05, full_time_list[-1]*1.05] ax.set_xlim(xlimit) ax.set_ylim(ylimit) lg=plt.legend(loc='upper left') fname=src+'time_efficiency_dim{}_mat{}_{}{}'.format(dim, material, kind_list[kind], '.pdf') print(('create figure: {}'.format(fname))) plt.savefig(fname, dpi=parf['dpi'], pad_inches=parf['pad_inches'], bbox_inches='tight') print('END Ga time efficiency') def display_data(): kind_list = ['cano', 'tucker', 'tt'] kinds = {'2': 0, '3': 2, } for material in [0, 3]: for dim in [2, 3]: kind = kinds['{}'.format(dim)] N_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/N_list_{}.p".format(dim, material, kind_list[kind]), "rb")) rank_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/rank_list_{}.p".format(dim, material, kind_list[kind]), "rb")) full_time_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/full_time_list_{}.p".format(dim, material, kind_list[kind]), "rb")) sparse_time_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/sparse_time_list_{}.p".format(dim, material, kind_list[kind]), "rb")) print("dim={}, material={}, kind={} ".format(dim, material, kind_list[kind])) print("N list {} ".format(N_list)) print("rank list {} ".format(rank_list)) print("tensorsLowRank time list {} ".format(sparse_time_list)) print("full time list {} ".format(full_time_list)) print() for material in [2, 4]: for dim in [2, 3]: kind = kinds['{}'.format(dim)] N_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/N_list_{}.p".format(dim, material, kind_list[kind]), "rb")) rank_list_1 = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/rank_list_{}_1e-03.p".format(dim, material, kind_list[kind]), "rb")) print("dim={}, material={}, kind={}, err_tol=1e-03 ".format(dim, material, kind_list[kind])) print("N list {} ".format(N_list)) print("rank list {} ".format(rank_list_1)) print() rank_list_2 = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/rank_list_{}_1e-06.p".format(dim, material, kind_list[kind]), "rb")) print("dim={}, material={}, kind={}, err_tol=1e-06 ".format(dim, material, kind_list[kind])) print("N list {} ".format(N_list)) print("rank list {} ".format(rank_list_2)) print() for material in [0,3, 2, 4]: for dim in [2, 3]: kind = kinds['{}'.format(dim)] N_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/N_list_{}.p".format(dim, material, kind_list[kind]), "rb")) rank_list = pickle.load( open("data_for_plot/time/dim_{}/mat_{}/full_solution_rank_list_{}.p".format(dim, material, kind_list[kind]), "rb")) print("dim={}, material={}, kind={} ".format(dim, material, kind_list[kind])) print("N list {} ".format(N_list)) print("full solution rank list {} ".format(rank_list)) print() if __name__ == '__main__': # data used in plot_time have to be genereted first by experiment_time_efficiency.py # plot_time() # data used in plot_error, plot_memory() and plot_residuals() have to be genereted first by diffusion_comparison.py # plot_error() # plot_memory() # plot_residuals() display_data()
mit
wathen/PhD
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/SplitMatrix/ScottTest/InverseTest/Step.py
2
15238
#!/usr/bin/python # interpolate scalar gradient onto nedelec space import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc from dolfin import * Print = PETSc.Sys.Print # from MatrixOperations import * import numpy as np import PETScIO as IO import common import scipy import scipy.io import time import BiLinear as forms import IterOperations as Iter import MatrixOperations as MO import CheckPetsc4py as CP import ExactSol import Solver as S import MHDmatrixPrecondSetup as PrecondSetup import NSprecondSetup import MHDprec as MHDpreconditioner import memory_profiler import gc import MHDmulti import MHDmatrixSetup as MHDsetup import mshr import Lshaped #@profile m = 9 errL2u =np.zeros((m-1,1)) errH1u =np.zeros((m-1,1)) errL2p =np.zeros((m-1,1)) errL2b =np.zeros((m-1,1)) errCurlb =np.zeros((m-1,1)) errL2r =np.zeros((m-1,1)) errH1r =np.zeros((m-1,1)) l2uorder = np.zeros((m-1,1)) H1uorder =np.zeros((m-1,1)) l2porder = np.zeros((m-1,1)) l2border = np.zeros((m-1,1)) Curlborder =np.zeros((m-1,1)) l2rorder = np.zeros((m-1,1)) H1rorder = np.zeros((m-1,1)) NN = np.zeros((m-1,1)) DoF = np.zeros((m-1,1)) Velocitydim = np.zeros((m-1,1)) Magneticdim = np.zeros((m-1,1)) Pressuredim = np.zeros((m-1,1)) Lagrangedim = np.zeros((m-1,1)) Wdim = np.zeros((m-1,1)) iterations = np.zeros((m-1,1)) SolTime = np.zeros((m-1,1)) udiv = np.zeros((m-1,1)) MU = np.zeros((m-1,1)) level = np.zeros((m-1,1)) NSave = np.zeros((m-1,1)) Mave = np.zeros((m-1,1)) TotalTime = np.zeros((m-1,1)) nn = 2 dim = 2 ShowResultPlots = 'yes' split = 'Linear' parameters['form_compiler']['representation'] = 'quadrature' parameters['form_compiler']['optimize'] = True parameters['form_compiler'].add('eliminate_zeros', True) parameters['form_compiler']['cpp_optimize'] = True parameters["num_threads"] = 8 MU[0]= 1e0 for xx in xrange(1,m): print xx level[xx-1] = xx+ 0 nn = 2**(level[xx-1]) # parameters['form_compiler'].add = True # parameters['form_compiler']['cpp_optimize_flags'] = '-foo' # Create mesh and define function space nn = int(nn) NN[xx-1] = nn/2 mesh, boundaries, domains = Lshaped.Domain(nn) # parameters["form_compiler"]["quadrature_degree"] = 6 # parameters = CP.ParameterSetup() # mesh = UnitSquareMesh(nn,nn) # domain = mshr.Rectangle(Point(-1., -1.), Point(1., 1.)) - mshr.Rectangle(Point(0., 0.), Point(1., 1.) ) # mesh = mshr.generate_mesh(domain, nn) # plot(mesh).write_png() order = 2 parameters['reorder_dofs_serial'] = False Velocity = VectorFunctionSpace(mesh, "CG", order) Pressure = FunctionSpace(mesh, "CG", order-1) Magnetic = FunctionSpace(mesh, "N1curl", order-1) Lagrange = FunctionSpace(mesh, "CG", order-1) W = MixedFunctionSpace([Velocity, Pressure, Magnetic,Lagrange]) # W = Velocity*Pressure*Magnetic*Lagrange Velocitydim[xx-1] = Velocity.dim() Pressuredim[xx-1] = Pressure.dim() Magneticdim[xx-1] = Magnetic.dim() Lagrangedim[xx-1] = Lagrange.dim() Wdim[xx-1] = W.dim() print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n" dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()] def boundary(x, on_boundary): return on_boundary # class Step2d(Expression): # def __init__(self, mesh): # self.mesh = mesh # def eval_cell(self, values, x, ufc_cell): # if x[0] >= 0.5 or x[1] >= 0.5: # values[0] = 1 # values[1] = 0 # else: # values[0] = 0 # values[1] = 0 # def value_shape(self): # return (2,) # class Step1d(Expression): # def __init__(self, mesh): # self.mesh = mesh # def eval_cell(self, values, x, ufc_cell): # if x[0] <= 0.5: # values[0] = 1 # else: # values[0] = 0 # u0 = Expression(('1.0','1.0')) # # u0 = Step2d(mesh) # # p0 = Expression(('1.0')) # p0 = Step1d(mesh) # # b0 = Expression(('1.0','1.0')) # b0 = Step2d(mesh) # r0 = Expression(('0s.0')) # # r0 = Step1d(mesh) u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(1,1, mesh) # u0 = Step2d(mesh) bcu = DirichletBC(Velocity,u0, boundary) bcb = DirichletBC(Magnetic,b0, boundary) bcr = DirichletBC(Lagrange,r0, boundary) # bc = [u0,p0,b0,r0] bcs = [bcu,bcb,bcr] FSpaces = [Velocity,Pressure,Magnetic,Lagrange] (u, b, p, r) = TrialFunctions(W) (v, c, q, s) = TestFunctions(W) kappa = 10.0 Mu_m =10.0 MU = 1.0/1 IterType = 'Full' Split = "No" Saddle = "No" Stokes = "No" SetupType = 'python-class' F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple if kappa == 0: F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple else: F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple # F_NS = Expression(('0.0','0.0')) # F_M = Expression(('0.0','0.0')) params = [kappa,Mu_m,MU] MO.PrintStr("Seting up initial guess matricies",2,"=","\n\n","\n") BCtime = time.time() BC = MHDsetup.BoundaryIndices(mesh) MO.StrTimePrint("BC index function, time: ", time.time()-BCtime) Hiptmairtol = 1e-5 HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, Hiptmairtol, params) MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n") u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-10,Neumann=Expression(("0","0")),options ="New") b_t = TrialFunction(Velocity) c_t = TestFunction(Velocity) ones = Function(Pressure) ones.vector()[:]=(0*ones.vector().array()+1) # pConst = - assemble(p_k*dx)/assemble(ones*dx) p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx) x = Iter.u_prev(u_k,p_k,b_k,r_k) KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU) kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k) #plot(b_k) ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"CG",Saddle,Stokes) RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"CG",Saddle,Stokes) bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary) bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0")), boundary) bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary) bcs = [bcu,bcb,bcr] parameters['linear_algebra_backend'] = 'uBLAS' eps = 1.0 # error measure ||u-u_k|| tol = 1.0E-4 # tolerance iter = 0 # iteration counter maxiter = 20 # max no of iterations allowed SolutionTime = 0 outer = 0 # parameters['linear_algebra_backend'] = 'uBLAS' # FSpaces = [Velocity,Magnetic,Pressure,Lagrange] if IterType == "CD": MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n") Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType) Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType) A = Fnlin+Alin A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType) u = b.duplicate() u_is = PETSc.IS().createGeneral(range(Velocity.dim())) NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim())) M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim())) OuterTol = 1e-5 InnerTol = 1e-5 NSits =0 Mits =0 TotalStart =time.time() SolutionTime = 0 while eps > tol and iter < maxiter: iter += 1 MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n") AssembleTime = time.time() if IterType == "CD": MO.StrTimePrint("MHD CD RHS assemble, time: ", time.time()-AssembleTime) b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "CD",IterType) else: MO.PrintStr("Setting up PETSc "+SetupType,2,"=","\n","\n") if Split == "Yes": if iter == 1: Alin = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "Linear",IterType) Fnlin,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType) A = Fnlin+Alin A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType) u = b.duplicate() else: Fnline,b = MHDsetup.Assemble(W,ns,maxwell,CoupleTerm,Lns,Lmaxwell,RHSform,bcs+BC, "NonLinear",IterType) A = Fnlin+Alin A,b = MHDsetup.SystemAssemble(FSpaces,A,b,SetupType,IterType) else: AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs) # A = AA.sparray() # print A.getnnz() # A.eliminate_zeros() # print A.getnnz() # ssss A,b = CP.Assemble(AA,bb) # if iter == 1: MO.StrTimePrint("MHD total assemble, time: ", time.time()-AssembleTime) u = b.duplicate() kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k) print "Inititial guess norm: ", u.norm(PETSc.NormType.NORM_INFINITY) #A,Q if IterType == 'Full': n = FacetNormal(mesh) mat = as_matrix([[b_k[1]*b_k[1],-b_k[1]*b_k[0]],[-b_k[1]*b_k[0],b_k[0]*b_k[0]]]) a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1./2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1./2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh()) ShiftedMass = assemble(a) bcu.apply(ShiftedMass) ShiftedMass = CP.Assemble(ShiftedMass) kspF = NSprecondSetup.LSCKSPnonlinear(ShiftedMass) else: F = A.getSubMatrix(u_is,u_is) kspF = NSprecondSetup.LSCKSPnonlinear(F) stime = time.time() u, mits,nsits = S.solve(A,b,u,params,W,'Directss',IterType,OuterTol,InnerTol,HiptmairMatrices,Hiptmairtol,KSPlinearfluids, Fp,kspF) Soltime = time.time()- stime MO.StrTimePrint("MHD solve, time: ", Soltime) Mits += mits NSits += nsits SolutionTime += Soltime u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter) p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx) u_k.assign(u1) p_k.assign(p1) b_k.assign(b1) r_k.assign(r1) uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0) x = IO.arrayToVec(uOld) # plot(u_k).write_png() XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0) SolTime[xx-1] = SolutionTime/iter NSave[xx-1] = (float(NSits)/iter) Mave[xx-1] = (float(Mits)/iter) iterations[xx-1] = iter TotalTime[xx-1] = time.time() - TotalStart dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(),Lagrange.dim()] ExactSolution = [u0,p0,b0,r0] # errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(XX,mesh,FSpaces,ExactSolution,order,dim, "CG") if xx > 1: l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1])) H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1])) l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1])) l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1])) Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1])) l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1])) H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1])) import pandas as pd LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"] LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1) LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles) pd.set_option('precision',3) LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e") LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e") LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f") LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f") LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e") LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f") print LatexTable print "\n\n Magnetic convergence" MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"] MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1) MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles) pd.set_option('precision',3) MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e") MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e") MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f") MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f") print MagneticTable print "\n\n Lagrange convergence" LagrangeTitles = ["l","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"] LagrangeValues = np.concatenate((level,Lagrangedim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1) LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles) pd.set_option('precision',3) LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e") LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e") LagrangeTable = MO.PandasFormat(LagrangeTable,"L2-order","%1.2f") LagrangeTable = MO.PandasFormat(LagrangeTable,'H1-order',"%1.2f") print LagrangeTable import pandas as pd print "\n\n Iteration table" if IterType == "Full": IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",] else: IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"] IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,Mave,NSave),axis=1) IterTable= pd.DataFrame(IterValues, columns = IterTitles) if IterType == "Full": IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f") IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f") else: IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f") IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f") print IterTable print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol tableName = "2d_nu="+str(MU)+"_nu_m="+str(Mu_m)+"_kappa="+str(kappa)+"_l="+str(np.min(level))+"-"+str(np.max(level))+".tex" IterTable.to_latex(tableName) # # # if (ShowResultPlots == 'yes'): # plot(u_k) # plot(interpolate(u0,Velocity)) # # plot(p_k) # # plot(interpolate(p0,Pressure)) # # plot(b_k) # plot(interpolate(b0,Magnetic)) # # plot(r_k) # plot(interpolate(r0,Lagrange)) # # interactive() # interactive()
mit
silky/sms-tools
lectures/05-Sinusoidal-model/plots-code/sine-analysis-synthesis.py
22
1543
import numpy as np import matplotlib.pyplot as plt from scipy.signal import hamming, triang, blackmanharris import sys, os, functools, time from scipy.fftpack import fft, ifft, fftshift sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/')) import dftModel as DFT import utilFunctions as UF (fs, x) = UF.wavread('../../../sounds/oboe-A4.wav') M = 601 w = np.blackman(M) N = 1024 hN = N/2 Ns = 512 hNs = Ns/2 pin = 5000 t = -70 x1 = x[pin:pin+w.size] mX, pX = DFT.dftAnal(x1, w, N) ploc = UF.peakDetection(mX, t) iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) freqs = iploc*fs/N Y = UF.genSpecSines(freqs, ipmag, ipphase, Ns, fs) mY = 20*np.log10(abs(Y[:hNs])) pY = np.unwrap(np.angle(Y[:hNs])) y= fftshift(ifft(Y))*sum(blackmanharris(Ns)) plt.figure(1, figsize=(9, 6)) plt.subplot(4,1,1) plt.plot(np.arange(-M/2,M/2), x1, 'b', lw=1.5) plt.axis([-M/2,M/2, min(x1), max(x1)]) plt.title("x (oboe-A4.wav), M = 601") plt.subplot(4,1,2) plt.plot(np.arange(mX.size), mX, 'r', lw=1.5) plt.plot(iploc, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5) plt.axis([0, hN,-90,max(mX)+2]) plt.title("mX + spectral peaks; Blackman, N = 1024") plt.subplot(4,1,3) plt.plot(np.arange(mY.size), mY, 'r', lw=1.5) plt.axis([0, hNs,-90,max(mY)+2]) plt.title("mY; Blackman-Harris; Ns = 512") plt.subplot(4,1,4) plt.plot(np.arange(Ns), y, 'b', lw=1.5) plt.axis([0, Ns,min(y),max(y)]) plt.title("y; Ns = 512") plt.tight_layout() plt.savefig('sine-analysis-synthesis.png') plt.show()
agpl-3.0
kendricktan/rarepepes
pix2pix/test.py
1
1791
import matplotlib as mpl mpl.use('Agg') import utils import os import time import argparse import torch import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import matplotlib.pyplot as plt from tqdm import tqdm from options import TestOptions from loader import PepeLoader from models import Pix2PixModel # CUDA_VISIBLE_DEVICES os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Parse options opt = TestOptions().parse() if __name__ == '__main__': # pix2pix model model = Pix2PixModel() model.initialize(opt) dataset = PepeLoader( opt.dataroot, transform=transforms.Compose( [transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ] ), train=False ) dataloader = torch.utils.data.DataLoader( dataset, batch_size=1, shuffle=True, pin_memory=True ) total_steps = 0 for idx, data in enumerate(tqdm(dataloader)): if idx > opt.how_many: break model.set_input({ 'A': data[0], 'B': data[1] }) model.test() visuals = model.get_current_visuals() utils.mkdir('results') f, (ax1, ax2, ax3) = plt.subplots( 3, 1, sharey='row' ) ax1.imshow(visuals['real_A']) ax1.set_title('real A') ax2.imshow(visuals['fake_B']) ax2.set_title('fake B') ax3.imshow(visuals['real_B']) ax3.set_title('real B') f.savefig('results/{}.png'.format(int(time.time())))
mit
HolgerPeters/scikit-learn
examples/cluster/plot_ward_structured_vs_unstructured.py
320
3369
""" =========================================================== Hierarchical clustering: structured vs unstructured ward =========================================================== Example builds a swiss roll dataset and runs hierarchical clustering on their position. For more information, see :ref:`hierarchical_clustering`. In a first step, the hierarchical clustering is performed without connectivity constraints on the structure and is solely based on distance, whereas in a second step the clustering is restricted to the k-Nearest Neighbors graph: it's a hierarchical clustering with structure prior. Some of the clusters learned without connectivity constraints do not respect the structure of the swiss roll and extend across different folds of the manifolds. On the opposite, when opposing connectivity constraints, the clusters form a nice parcellation of the swiss roll. """ # Authors : Vincent Michel, 2010 # Alexandre Gramfort, 2010 # Gael Varoquaux, 2010 # License: BSD 3 clause print(__doc__) import time as time import numpy as np import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from sklearn.cluster import AgglomerativeClustering from sklearn.datasets.samples_generator import make_swiss_roll ############################################################################### # Generate data (swiss roll dataset) n_samples = 1500 noise = 0.05 X, _ = make_swiss_roll(n_samples, noise) # Make it thinner X[:, 1] *= .5 ############################################################################### # Compute clustering print("Compute unstructured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) ############################################################################### # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(np.float(l) / np.max(label + 1))) plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time) ############################################################################### # Define the structure A of the data. Here a 10 nearest neighbors from sklearn.neighbors import kneighbors_graph connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False) ############################################################################### # Compute clustering print("Compute structured hierarchical clustering...") st = time.time() ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity, linkage='ward').fit(X) elapsed_time = time.time() - st label = ward.labels_ print("Elapsed time: %.2fs" % elapsed_time) print("Number of points: %i" % label.size) ############################################################################### # Plot result fig = plt.figure() ax = p3.Axes3D(fig) ax.view_init(7, -80) for l in np.unique(label): ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2], 'o', color=plt.cm.jet(float(l) / np.max(label + 1))) plt.title('With connectivity constraints (time %.2fs)' % elapsed_time) plt.show()
bsd-3-clause
IBT-FMI/SAMRI
samri/report/registration.py
1
3792
import hashlib import multiprocessing as mp import nibabel as nib import pandas as pd from os import path from joblib import Parallel, delayed from nipype.interfaces import ants, fsl def measure_sim(image_path, reference, substitutions=False, mask='', metric='MI', radius_or_number_of_bins=8, sampling_strategy='None', sampling_percentage=0.3, ): """Return a similarity metric score for two 3d images Parameters ---------- image_path : str Path to moving image (moving and fixed image assignment is arbitrary for this function). reference : str Path to fixed image (moving and fixed image assignment is arbitrary for this function). substitutions : dict, optional Dictionary with keys which include 'subject', 'session', and 'acquisition', which will be applied to format the image_path string. mask : str Path to mask which selects a subregionfor which to compute the similarity. metric : {'CC', 'MI', 'Mattes', 'MeanSquares', 'Demons', 'GC'} Similarity metric, as accepted by `nipype.interfaces.ants.registration.MeasureImageSimilarity` (which wraps the ANTs command `MeasureImageSimilarity`). """ if substitutions: image_path = image_path.format(**substitutions) image_path = path.abspath(path.expanduser(image_path)) #some BIDS identifier combinations may not exist: if not path.isfile(image_path): return {} file_data = {} file_data["path"] = image_path if substitutions: file_data["subject"] = substitutions["subject"] file_data["session"] = substitutions["session"] file_data["acquisition"] = substitutions["acquisition"] img = nib.load(image_path) if img.header['dim'][0] > 3: image_name = path.basename(file_data["path"]) merged_image_name = "merged_"+image_name merged_image_path = path.join("/tmp",merged_image_name) if not path.isfile(merged_image_path): temporal_mean = fsl.MeanImage() temporal_mean.inputs.in_file = image_path temporal_mean.inputs.out_file = merged_image_path temporal_mean_res = temporal_mean.run() image_path = temporal_mean_res.outputs.out_file else: image_path = merged_image_path sim = ants.MeasureImageSimilarity() sim.inputs.dimension = 3 sim.inputs.metric = metric sim.inputs.fixed_image = reference sim.inputs.moving_image = image_path sim.inputs.metric_weight = 1.0 sim.inputs.radius_or_number_of_bins = radius_or_number_of_bins sim.inputs.sampling_strategy = sampling_strategy sim.inputs.sampling_percentage = sampling_percentage if mask: sim.inputs.fixed_image_mask = mask sim_res = sim.run() file_data["similarity"] = sim_res.outputs.similarity return file_data def iter_measure_sim(file_template, reference, substitutions, metric="MI", radius_or_number_of_bins = 8, sampling_strategy = "None", sampling_percentage=0.3, save_as="", mask="", ): """Create a `pandas.DataFrame` (optionally savable as `.csv`), containing the similarity scores and BIDS identifier fields for images from a BIDS directory. """ reference = path.abspath(path.expanduser(reference)) n_jobs = mp.cpu_count()-2 similarity_data = Parallel(n_jobs=n_jobs, verbose=0, backend="threading")(map(delayed(measure_sim), [file_template]*len(substitutions), [reference]*len(substitutions), substitutions, [mask] * len(substitutions), [metric]*len(substitutions), [radius_or_number_of_bins]*len(substitutions), [sampling_strategy]*len(substitutions), [sampling_percentage]*len(substitutions), )) df = pd.DataFrame.from_dict(similarity_data) df.dropna(axis=0, how='any', inplace=True) #some rows will be emtpy if save_as: save_as = path.abspath(path.expanduser(save_as)) if save_as.lower().endswith('.csv'): df.to_csv(save_as) else: raise ValueError("Please specify an output path ending in any one of "+",".join((".csv",))+".") return df
gpl-3.0
yask123/scikit-learn
sklearn/linear_model/setup.py
146
1713
import os from os.path import join import numpy from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('linear_model', parent_package, top_path) cblas_libs, blas_info = get_blas_info() if os.name == 'posix': cblas_libs.append('m') config.add_extension('cd_fast', sources=['cd_fast.c'], libraries=cblas_libs, include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=blas_info.pop('extra_compile_args', []), **blas_info) config.add_extension('sgd_fast', sources=['sgd_fast.c'], include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], libraries=cblas_libs, extra_compile_args=blas_info.pop('extra_compile_args', []), **blas_info) config.add_extension('sag_fast', sources=['sag_fast.c'], include_dirs=numpy.get_include()) # add other directories config.add_subpackage('tests') return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
mathialo/sigpy
sigpy/imaging.py
1
3927
def to_grayscale(img, mode="norm"): """ convert color image to grayscale Arguments: img -- image Keyword arguments: mode -- what algorithm to use to calculate gray value. Avilable options are: norm (standard), mean, max and min. """ from numpy import sqrt if len(img.shape) < 3: return img elif len(img.shape) > 3: raise TypeError("Dimension %d not supported!" % len(img.shape)) if mode == "norm": img_new = sqrt( img[:,:,0]*img[:,:,0] + img[:,:,1]*img[:,:,1] + img[:,:,2]*img[:,:,2] ) elif mode == "mean": img_new = (img[:,:,0] + img[:,:,1] + img[:,:,2]) / 3. elif mode == "max": raise Exception("Mode %s not yet implemented" % mode) elif mode == "min": raise Exception("Mode %s not yet implemented" % mode) else: raise TypeError("Mode %s not recognized!" % mode) return img_new def to_bitlevel(img, bitlevel=8, standardize=True): """ Converts from floating point numbers ranging from 0 to 1 to 8 bit ints ranging from 0 to 255 Arguments: img -- image Keyword arguments: standardize -- Use whole dynamic spectrum (ie, map max instead of 1 to 255) """ from numpy import uint8 if standardize: img = map_to_01(img) img *= 2**bitlevel - 1 img = img.astype(uint8) return img def map_to_01(img): from numpy import float64 minval, maxval = img.min(), img.max() img_new = img.astype(float64) img_new -= minval img_new /= (maxval-minval) return img_new def plot_dft2(image, filename=None, show_plot=True): from numpy import log, abs from numpy.fft import fft2, fftshift from matplotlib.pyplot import imshow, show f = fft2(image) fs = fftshift(f) logscale = 20*log(abs(fs)) imshow(logscale, cmap = "gray") if filename is not None: savefig(filename) if show_plot: show() def display(img, colormap="gray", filename=None, show=True, interpolation=None): """ Saves/shows the image without all the ugly axes and spacing matplotlib usually gives you. This is a slightly modified version of the funciton found on https://fengl.org/2014/07/09/matplotlib-savefig-without-borderframe/ Arguments: img -- image Keyword arguments: colormap -- colormap for plotting, same as cmap in matplotlib's imshow """ import numpy as np import matplotlib.pyplot as plt sizes = np.shape(img) height = float(sizes[0]) width = float(sizes[1]) fig = plt.figure() fig.set_size_inches(width/height, 1, forward=False) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) ax.imshow(img, cmap=colormap, interpolation=interpolation) if filename: plt.savefig(filename, dpi=height) if show: plt.show() plt.close() def glcm(img, gsnum=16, d=1, theta=0, use_degrees=False): import numpy as np if use_degrees: # Convert to radians theta = np.pi*theta/180 # Compute step directions dx = int(round(d*np.cos(theta))) dy = -int(round(d*np.sin(theta))) img_new = (img / (np.max(img)))*(gsnum-1) img_new = np.round(img_new).astype(np.uint8) result = np.zeros([gsnum, gsnum], dtype=np.float64) if dx < 0: rangex = range(-dx, img.shape[0]) else: rangex = range(img.shape[0] - dx) if dy < 0: rangey = range(-dy, img.shape[1]) else: rangey = range(img.shape[1] - dy) for i in rangex: for j in rangey: going_from = img_new[i, j] going_to = img_new[i+dx, j+dy] result[going_from, going_to] += 1 result /= np.sum(result) return result def image_window(img, center, size): return img[center[0]-int(size/2) : center[0]+int(size/2)+1 , center[1]-int(size/2) : center[1]+int(size/2)+1]
mit
jayflo/scikit-learn
benchmarks/bench_random_projections.py
397
8900
""" =========================== Random projection benchmark =========================== Benchmarks for random projections. """ from __future__ import division from __future__ import print_function import gc import sys import optparse from datetime import datetime import collections import numpy as np import scipy.sparse as sp from sklearn import clone from sklearn.externals.six.moves import xrange from sklearn.random_projection import (SparseRandomProjection, GaussianRandomProjection, johnson_lindenstrauss_min_dim) def type_auto_or_float(val): if val == "auto": return "auto" else: return float(val) def type_auto_or_int(val): if val == "auto": return "auto" else: return int(val) def compute_time(t_start, delta): mu_second = 0.0 + 10 ** 6 # number of microseconds in a second return delta.seconds + delta.microseconds / mu_second def bench_scikit_transformer(X, transfomer): gc.collect() clf = clone(transfomer) # start time t_start = datetime.now() clf.fit(X) delta = (datetime.now() - t_start) # stop time time_to_fit = compute_time(t_start, delta) # start time t_start = datetime.now() clf.transform(X) delta = (datetime.now() - t_start) # stop time time_to_transform = compute_time(t_start, delta) return time_to_fit, time_to_transform # Make some random data with uniformly located non zero entries with # Gaussian distributed values def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None): rng = np.random.RandomState(random_state) data_coo = sp.coo_matrix( (rng.randn(n_nonzeros), (rng.randint(n_samples, size=n_nonzeros), rng.randint(n_features, size=n_nonzeros))), shape=(n_samples, n_features)) return data_coo.toarray(), data_coo.tocsr() def print_row(clf_type, time_fit, time_transform): print("%s | %s | %s" % (clf_type.ljust(30), ("%.4fs" % time_fit).center(12), ("%.4fs" % time_transform).center(12))) if __name__ == "__main__": ########################################################################### # Option parser ########################################################################### op = optparse.OptionParser() op.add_option("--n-times", dest="n_times", default=5, type=int, help="Benchmark results are average over n_times experiments") op.add_option("--n-features", dest="n_features", default=10 ** 4, type=int, help="Number of features in the benchmarks") op.add_option("--n-components", dest="n_components", default="auto", help="Size of the random subspace." " ('auto' or int > 0)") op.add_option("--ratio-nonzeros", dest="ratio_nonzeros", default=10 ** -3, type=float, help="Number of features in the benchmarks") op.add_option("--n-samples", dest="n_samples", default=500, type=int, help="Number of samples in the benchmarks") op.add_option("--random-seed", dest="random_seed", default=13, type=int, help="Seed used by the random number generators.") op.add_option("--density", dest="density", default=1 / 3, help="Density used by the sparse random projection." " ('auto' or float (0.0, 1.0]") op.add_option("--eps", dest="eps", default=0.5, type=float, help="See the documentation of the underlying transformers.") op.add_option("--transformers", dest="selected_transformers", default='GaussianRandomProjection,SparseRandomProjection', type=str, help="Comma-separated list of transformer to benchmark. " "Default: %default. Available: " "GaussianRandomProjection,SparseRandomProjection") op.add_option("--dense", dest="dense", default=False, action="store_true", help="Set input space as a dense matrix.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) opts.n_components = type_auto_or_int(opts.n_components) opts.density = type_auto_or_float(opts.density) selected_transformers = opts.selected_transformers.split(',') ########################################################################### # Generate dataset ########################################################################### n_nonzeros = int(opts.ratio_nonzeros * opts.n_features) print('Dataset statics') print("===========================") print('n_samples \t= %s' % opts.n_samples) print('n_features \t= %s' % opts.n_features) if opts.n_components == "auto": print('n_components \t= %s (auto)' % johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)) else: print('n_components \t= %s' % opts.n_components) print('n_elements \t= %s' % (opts.n_features * opts.n_samples)) print('n_nonzeros \t= %s per feature' % n_nonzeros) print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros) print('') ########################################################################### # Set transformer input ########################################################################### transformers = {} ########################################################################### # Set GaussianRandomProjection input gaussian_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed } transformers["GaussianRandomProjection"] = \ GaussianRandomProjection(**gaussian_matrix_params) ########################################################################### # Set SparseRandomProjection input sparse_matrix_params = { "n_components": opts.n_components, "random_state": opts.random_seed, "density": opts.density, "eps": opts.eps, } transformers["SparseRandomProjection"] = \ SparseRandomProjection(**sparse_matrix_params) ########################################################################### # Perform benchmark ########################################################################### time_fit = collections.defaultdict(list) time_transform = collections.defaultdict(list) print('Benchmarks') print("===========================") print("Generate dataset benchmarks... ", end="") X_dense, X_sparse = make_sparse_random_data(opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed) X = X_dense if opts.dense else X_sparse print("done") for name in selected_transformers: print("Perform benchmarks for %s..." % name) for iteration in xrange(opts.n_times): print("\titer %s..." % iteration, end="") time_to_fit, time_to_transform = bench_scikit_transformer(X_dense, transformers[name]) time_fit[name].append(time_to_fit) time_transform[name].append(time_to_transform) print("done") print("") ########################################################################### # Print results ########################################################################### print("Script arguments") print("===========================") arguments = vars(opts) print("%s \t | %s " % ("Arguments".ljust(16), "Value".center(12),)) print(25 * "-" + ("|" + "-" * 14) * 1) for key, value in arguments.items(): print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12))) print("") print("Transformer performance:") print("===========================") print("Results are averaged over %s repetition(s)." % opts.n_times) print("") print("%s | %s | %s" % ("Transformer".ljust(30), "fit".center(12), "transform".center(12))) print(31 * "-" + ("|" + "-" * 14) * 2) for name in sorted(selected_transformers): print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name])) print("") print("")
bsd-3-clause
bundgus/python-playground
matplotlib-playground/examples/pylab_examples/demo_text_rotation_mode.py
2
1427
from mpl_toolkits.axes_grid1.axes_grid import ImageGrid def test_rotation_mode(fig, mode, subplot_location): ha_list = "left center right".split() va_list = "top center baseline bottom".split() grid = ImageGrid(fig, subplot_location, nrows_ncols=(len(va_list), len(ha_list)), share_all=True, aspect=True, #label_mode='1', cbar_mode=None) for ha, ax in zip(ha_list, grid.axes_row[-1]): ax.axis["bottom"].label.set_text(ha) grid.axes_row[0][1].set_title(mode, size="large") for va, ax in zip(va_list, grid.axes_column[0]): ax.axis["left"].label.set_text(va) i = 0 for va in va_list: for ha in ha_list: ax = grid[i] for axis in ax.axis.values(): axis.toggle(ticks=False, ticklabels=False) ax.text(0.5, 0.5, "Tpg", size="large", rotation=40, bbox=dict(boxstyle="square,pad=0.", ec="none", fc="0.5", alpha=0.5), ha=ha, va=va, rotation_mode=mode) ax.axvline(0.5) ax.axhline(0.5) i += 1 if 1: import matplotlib.pyplot as plt fig = plt.figure(1, figsize=(5.5, 4)) fig.clf() test_rotation_mode(fig, "default", 121) test_rotation_mode(fig, "anchor", 122) plt.show()
mit
tequa/ammisoft
ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/mpl_toolkits/exceltools.py
10
3958
""" Some io tools for excel -- requires xlwt Example usage: import matplotlib.mlab as mlab import mpl_toolkits.exceltools as exceltools r = mlab.csv2rec('somefile.csv', checkrows=0) formatd = dict( weight = mlab.FormatFloat(2), change = mlab.FormatPercent(2), cost = mlab.FormatThousands(2), ) exceltools.rec2excel(r, 'test.xls', formatd=formatd) mlab.rec2csv(r, 'test.csv', formatd=formatd) """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six import copy import numpy as np import xlwt as excel import matplotlib.cbook as cbook import matplotlib.mlab as mlab cbook.warn_deprecated("2.0", name="mpl_toolkits.exceltools", alternative="openpyxl", obj_type="module") def xlformat_factory(format): """ copy the format, perform any overrides, and attach an xlstyle instance copied format is returned """ #if we have created an excel format already using this format, #don't recreate it; mlab.FormatObj override has to make objs with #the same props hash to the same value key = hash(format) fmt_ = xlformat_factory.created_formats.get(key) if fmt_ is not None: return fmt_ format = copy.deepcopy(format) xlstyle = excel.XFStyle() if isinstance(format, mlab.FormatPercent): zeros = ''.join(['0']*format.precision) xlstyle.num_format_str = '0.%s%%;[RED]-0.%s%%'%(zeros, zeros) format.scale = 1. elif isinstance(format, mlab.FormatFloat): if format.precision>0: zeros = ''.join(['0']*format.precision) xlstyle.num_format_str = '#,##0.%s;[RED]-#,##0.%s'%(zeros, zeros) else: xlstyle.num_format_str = '#,##;[RED]-#,##' elif isinstance(format, mlab.FormatInt): xlstyle.num_format_str = '#,##;[RED]-#,##' else: xlstyle = None format.xlstyle = xlstyle xlformat_factory.created_formats[ key ] = format return format xlformat_factory.created_formats = {} def rec2excel(r, ws, formatd=None, rownum=0, colnum=0, nanstr='NaN', infstr='Inf'): """ save record array r to excel xlwt worksheet ws starting at rownum. if ws is string like, assume it is a filename and save to it start writing at rownum, colnum formatd is a dictionary mapping dtype name -> mlab.Format instances nanstr is the string that mpl will put into excel for np.nan value The next rownum after writing is returned """ autosave = False if cbook.is_string_like(ws): filename = ws wb = excel.Workbook() ws = wb.add_sheet('worksheet') autosave = True if formatd is None: formatd = dict() formats = [] font = excel.Font() font.bold = True stylehdr = excel.XFStyle() stylehdr.font = font for i, name in enumerate(r.dtype.names): dt = r.dtype[name] format = formatd.get(name) if format is None: format = mlab.defaultformatd.get(dt.type, mlab.FormatObj()) format = xlformat_factory(format) ws.write(rownum, colnum+i, name, stylehdr) formats.append(format) rownum+=1 ind = np.arange(len(r.dtype.names)) for row in r: for i in ind: val = row[i] format = formats[i] val = format.toval(val) if mlab.safe_isnan(val): ws.write(rownum, colnum+i, nanstr) elif mlab.safe_isinf(val): sgn = np.sign(val) if sgn>0: s = infstr else: s = '-%s'%infstr ws.write(rownum, colnum+i, s) elif format.xlstyle is None: ws.write(rownum, colnum+i, val) else: ws.write(rownum, colnum+i, val, format.xlstyle) rownum += 1 if autosave: wb.save(filename) return rownum
bsd-3-clause
NicovincX2/Python-3.5
Statistiques/Estimation (statistique)/Régression/Gradient boosting/gradient_boosting_regression.py
1
2287
# -*- coding: utf-8 -*- import os # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn import datasets from sklearn.utils import shuffle from sklearn.metrics import mean_squared_error ############################################################################### # Load data boston = datasets.load_boston() X, y = shuffle(boston.data, boston.target, random_state=13) X = X.astype(np.float32) offset = int(X.shape[0] * 0.9) X_train, y_train = X[:offset], y[:offset] X_test, y_test = X[offset:], y[offset:] ############################################################################### # Fit regression model params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1, 'learning_rate': 0.01, 'loss': 'ls'} clf = ensemble.GradientBoostingRegressor(**params) clf.fit(X_train, y_train) mse = mean_squared_error(y_test, clf.predict(X_test)) print("MSE: %.4f" % mse) ############################################################################### # Plot training deviance # compute test set deviance test_score = np.zeros((params['n_estimators'],), dtype=np.float64) for i, y_pred in enumerate(clf.staged_predict(X_test)): test_score[i] = clf.loss_(y_test, y_pred) plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.title('Deviance') plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-', label='Training Set Deviance') plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-', label='Test Set Deviance') plt.legend(loc='upper right') plt.xlabel('Boosting Iterations') plt.ylabel('Deviance') ############################################################################### # Plot feature importance feature_importance = clf.feature_importances_ # make importances relative to max importance feature_importance = 100.0 * (feature_importance / feature_importance.max()) sorted_idx = np.argsort(feature_importance) pos = np.arange(sorted_idx.shape[0]) + .5 plt.subplot(1, 2, 2) plt.barh(pos, feature_importance[sorted_idx], align='center') plt.yticks(pos, boston.feature_names[sorted_idx]) plt.xlabel('Relative Importance') plt.title('Variable Importance') plt.show() os.system("pause")
gpl-3.0
hrjn/scikit-learn
examples/mixture/plot_concentration_prior.py
16
5657
""" ======================================================================== Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture ======================================================================== This example plots the ellipsoids obtained from a toy dataset (mixture of three Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a Dirichlet distribution prior (``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet process prior (``weight_concentration_prior_type='dirichlet_process'``). On each figure, we plot the results for three different values of the weight concentration prior. The ``BayesianGaussianMixture`` class can adapt its number of mixture componentsautomatically. The parameter ``weight_concentration_prior`` has a direct link with the resulting number of components with non-zero weights. Specifying a low value for the concentration prior will make the model put most of the weight on few components set the remaining components weights very close to zero. High values of the concentration prior will allow a larger number of components to be active in the mixture. The Dirichlet process prior allows to define an infinite number of components and automatically selects the correct number of components: it activates a component only if it is necessary. On the contrary the classical finite mixture model with a Dirichlet distribution prior will favor more uniformly weighted components and therefore tends to divide natural clusters into unnecessary sub-components. """ # Author: Thierry Guillemot <thierry.guillemot.work@gmail.com> # License: BSD 3 clause import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from sklearn.mixture import BayesianGaussianMixture print(__doc__) def plot_ellipses(ax, weights, means, covars): for n in range(means.shape[0]): eig_vals, eig_vecs = np.linalg.eigh(covars[n]) unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0]) angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0]) # Ellipse needs degrees angle = 180 * angle / np.pi # eigenvector normalization eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals) ell = mpl.patches.Ellipse(means[n], eig_vals[0], eig_vals[1], 180 + angle) ell.set_clip_box(ax.bbox) ell.set_alpha(weights[n]) ell.set_facecolor('#56B4E9') ax.add_artist(ell) def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False): ax1.set_title(title) ax1.scatter(X[:, 0], X[:, 1], s=5, marker='o', color=colors[y], alpha=0.8) ax1.set_xlim(-2., 2.) ax1.set_ylim(-3., 3.) ax1.set_xticks(()) ax1.set_yticks(()) plot_ellipses(ax1, estimator.weights_, estimator.means_, estimator.covariances_) ax2.get_xaxis().set_tick_params(direction='out') ax2.yaxis.grid(True, alpha=0.7) for k, w in enumerate(estimator.weights_): ax2.bar(k, w, width=0.9, color='#56B4E9', zorder=3, align='center') ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.), horizontalalignment='center') ax2.set_xlim(-.6, 2 * n_components - .4) ax2.set_ylim(0., 1.1) ax2.tick_params(axis='y', which='both', left='off', right='off', labelleft='off') ax2.tick_params(axis='x', which='both', top='off') if plot_title: ax1.set_ylabel('Estimated Mixtures') ax2.set_ylabel('Weight of each component') # Parameters of the dataset random_state, n_components, n_features = 2, 3, 2 colors = np.array(['#0072B2', '#F0E442', '#D55E00']) covars = np.array([[[.7, .0], [.0, .1]], [[.5, .0], [.0, .1]], [[.5, .0], [.0, .1]]]) samples = np.array([200, 500, 200]) means = np.array([[.0, -.70], [.0, .0], [.0, .70]]) # mean_precision_prior= 0.8 to minimize the influence of the prior estimators = [ ("Finite mixture with a Dirichlet distribution\nprior and " r"$\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_distribution", n_components=2 * n_components, reg_covar=0, init_params='random', max_iter=1500, mean_precision_prior=.8, random_state=random_state), [0.001, 1, 1000]), ("Infinite mixture with a Dirichlet process\n prior and" r"$\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_process", n_components=2 * n_components, reg_covar=0, init_params='random', max_iter=1500, mean_precision_prior=.8, random_state=random_state), [1, 1000, 100000])] # Generate data rng = np.random.RandomState(random_state) X = np.vstack([ rng.multivariate_normal(means[j], covars[j], samples[j]) for j in range(n_components)]) y = np.concatenate([j * np.ones(samples[j], dtype=int) for j in range(n_components)]) # Plot results in two different figures for (title, estimator, concentrations_prior) in estimators: plt.figure(figsize=(4.7 * 3, 8)) plt.subplots_adjust(bottom=.04, top=0.90, hspace=.05, wspace=.05, left=.03, right=.99) gs = gridspec.GridSpec(3, len(concentrations_prior)) for k, concentration in enumerate(concentrations_prior): estimator.weight_concentration_prior = concentration estimator.fit(X) plot_results(plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator, X, y, r"%s$%.1e$" % (title, concentration), plot_title=k == 0) plt.show()
bsd-3-clause
JohnGriffiths/nipype
nipype/algorithms/modelgen.py
7
34756
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The modelgen module provides classes for specifying designs for individual subject analysis of task-based fMRI experiments. In particular it also includes algorithms for generating regressors for sparse and sparse-clustered acquisition experiments. These functions include: * SpecifyModel: allows specification of sparse and non-sparse models Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from copy import deepcopy import os from nibabel import load import numpy as np from scipy.special import gammaln from nipype.interfaces.base import (BaseInterface, TraitedSpec, InputMultiPath, traits, File, Bunch, BaseInterfaceInputSpec, isdefined) from nipype.utils.filemanip import filename_to_list from .. import config, logging from nipype.external import six iflogger = logging.getLogger('interface') def gcd(a, b): """Returns the greatest common divisor of two integers uses Euclid's algorithm >>> gcd(4, 5) 1 >>> gcd(4, 8) 4 >>> gcd(22, 55) 11 """ while b > 0: a, b = b, a % b return a def spm_hrf(RT, P=None, fMRI_T=16): """ python implementation of spm_hrf see spm_hrf for implementation details % RT - scan repeat time % p - parameters of the response function (two gamma % functions) % defaults (seconds) % p(0) - delay of response (relative to onset) 6 % p(1) - delay of undershoot (relative to onset) 16 % p(2) - dispersion of response 1 % p(3) - dispersion of undershoot 1 % p(4) - ratio of response to undershoot 6 % p(5) - onset (seconds) 0 % p(6) - length of kernel (seconds) 32 % % hrf - hemodynamic response function % p - parameters of the response function the following code using scipy.stats.distributions.gamma doesn't return the same result as the spm_Gpdf function hrf = gamma.pdf(u, p[0]/p[2], scale=dt/p[2]) - gamma.pdf(u, p[1]/p[3], scale=dt/p[3])/p[4] >>> print spm_hrf(2) [ 0.00000000e+00 8.65660810e-02 3.74888236e-01 3.84923382e-01 2.16117316e-01 7.68695653e-02 1.62017720e-03 -3.06078117e-02 -3.73060781e-02 -3.08373716e-02 -2.05161334e-02 -1.16441637e-02 -5.82063147e-03 -2.61854250e-03 -1.07732374e-03 -4.10443522e-04 -1.46257507e-04] """ p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float) if P is not None: p[0:len(P)] = P _spm_Gpdf = lambda x, h, l: np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h)) # modelled hemodynamic response function - {mixture of Gammas} dt = RT / float(fMRI_T) u = np.arange(0, int(p[6] / dt + 1)) - p[5] / dt hrf = _spm_Gpdf(u, p[0] / p[2], dt / p[2]) - _spm_Gpdf(u, p[1] / p[3], dt / p[3]) / p[4] idx = np.arange(0, int((p[6] / RT) + 1)) * fMRI_T hrf = hrf[idx] hrf = hrf / np.sum(hrf) return hrf def orth(x_in, y_in): """Orthoganlize y_in with respect to x_in >>> orth_expected = np.array([1.7142857142857144, 0.42857142857142883, \ -0.85714285714285676]) >>> err = np.abs(np.array(orth([1, 2, 3],[4, 5, 6]) - orth_expected)) >>> all(err < np.finfo(float).eps) True """ x = np.array(x_in)[:, None] y = np.array(y_in)[:, None] y = y - np.dot(x, np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, y))) if np.linalg.norm(y, 1) > np.exp(-32): y = y[:, 0].tolist() else: y = y_in return y def scale_timings(timelist, input_units, output_units, time_repetition): """Scales timings given input and output units (scans/secs) Parameters ---------- timelist: list of times to scale input_units: 'secs' or 'scans' output_units: Ibid. time_repetition: float in seconds """ if input_units==output_units: _scalefactor = 1. if (input_units == 'scans') and (output_units == 'secs'): _scalefactor = time_repetition if (input_units == 'secs') and (output_units == 'scans'): _scalefactor = 1./time_repetition timelist = [np.max([0., _scalefactor * t]) for t in timelist] return timelist def gen_info(run_event_files): """Generate subject_info structure from a list of event files """ info = [] for i, event_files in enumerate(run_event_files): runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for event_file in event_files: _, name = os.path.split(event_file) if '.run' in name: name, _ = name.split('.run%03d' % (i+1)) elif '.txt' in name: name, _ = name.split('.txt') runinfo.conditions.append(name) event_info = np.atleast_2d(np.loadtxt(event_file)) runinfo.onsets.append(event_info[:, 0].tolist()) if event_info.shape[1] > 1: runinfo.durations.append(event_info[:, 1].tolist()) else: runinfo.durations.append([0]) if event_info.shape[1] > 2: runinfo.amplitudes.append(event_info[:, 2].tolist()) else: delattr(runinfo, 'amplitudes') info.append(runinfo) return info class SpecifyModelInputSpec(BaseInterfaceInputSpec): subject_info = InputMultiPath(Bunch, mandatory=True, xor=['subject_info', 'event_files'], desc=("Bunch or List(Bunch) subject specific condition information. " "see :ref:`SpecifyModel` or SpecifyModel.__doc__ for details")) event_files = InputMultiPath(traits.List(File(exists=True)), mandatory=True, xor=['subject_info', 'event_files'], desc=('list of event description files 1, 2 or 3 column format ' 'corresponding to onsets, durations and amplitudes')) realignment_parameters = InputMultiPath(File(exists=True), desc="Realignment parameters returned by motion correction algorithm", copyfile=False) outlier_files = InputMultiPath(File(exists=True), desc="Files containing scan outlier indices that should be tossed", copyfile=False) functional_runs = InputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), mandatory=True, desc=("Data files for model. List of 4D files or list of list of 3D " "files per session"), copyfile=False) input_units = traits.Enum('secs', 'scans', mandatory=True, desc=("Units of event onsets and durations (secs or scans). Output " "units are always in secs")) high_pass_filter_cutoff = traits.Float(mandatory=True, desc="High-pass filter cutoff in secs") time_repetition = traits.Float(mandatory=True, desc=("Time between the start of one volume to the start of " "the next image volume.")) # Not implemented yet #polynomial_order = traits.Range(0, low=0, # desc ="Number of polynomial functions to model high pass filter.") class SpecifyModelOutputSpec(TraitedSpec): session_info = traits.Any(desc="session info for level1designs") class SpecifyModel(BaseInterface): """Makes a model specification compatible with spm/fsl designers. The subject_info field should contain paradigm information in the form of a Bunch or a list of Bunch. The Bunch should contain the following information:: [Mandatory] - conditions : list of names - onsets : lists of onsets corresponding to each condition - durations : lists of durations corresponding to each condition. Should be left to a single 0 if all events are being modelled as impulses. [Optional] - regressor_names : list of str list of names corresponding to each column. Should be None if automatically assigned. - regressors : list of lists values for each regressor - must correspond to the number of volumes in the functional run - amplitudes : lists of amplitudes for each event. This will be ignored by SPM's Level1Design. The following two (tmod, pmod) will be ignored by any Level1Design class other than SPM: - tmod : lists of conditions that should be temporally modulated. Should default to None if not being used. - pmod : list of Bunch corresponding to conditions - name : name of parametric modulator - param : values of the modulator - poly : degree of modulation Alternatively, you can provide information through event files. The event files have to be in 1, 2 or 3 column format with the columns corresponding to Onsets, Durations and Amplitudes and they have to have the name event_name.runXXX... e.g.: Words.run001.txt. The event_name part will be used to create the condition names. Examples -------- >>> from nipype.interfaces.base import Bunch >>> s = SpecifyModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.high_pass_filter_cutoff = 128. >>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]],\ durations=[[1]]), \ Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], \ durations=[[1]])] >>> s.inputs.subject_info = info Using pmod: >>> info = [Bunch(conditions=['cond1', 'cond2'], \ onsets=[[2, 50],[100, 180]], durations=[[0],[0]], \ pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]),\ None]), \ Bunch(conditions=['cond1', 'cond2'], \ onsets=[[20, 120],[80, 160]], durations=[[0],[0]], \ pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), \ None])] >>> s.inputs.subject_info = info """ input_spec = SpecifyModelInputSpec output_spec = SpecifyModelOutputSpec def _generate_standard_design(self, infolist, functional_runs=None, realignment_parameters=None, outliers=None): """ Generates a standard design matrix paradigm given information about each run """ sessinfo = [] output_units = 'secs' if 'output_units' in self.inputs.traits(): output_units = self.inputs.output_units for i, info in enumerate(infolist): sessinfo.insert(i, dict(cond=[])) if isdefined(self.inputs.high_pass_filter_cutoff): sessinfo[i]['hpf'] = \ np.float(self.inputs.high_pass_filter_cutoff) if hasattr(info, 'conditions') and info.conditions is not None: for cid, cond in enumerate(info.conditions): sessinfo[i]['cond'].insert(cid, dict()) sessinfo[i]['cond'][cid]['name'] = info.conditions[cid] scaled_onset = scale_timings(info.onsets[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i]['cond'][cid]['onset'] = scaled_onset scaled_duration = scale_timings(info.durations[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i]['cond'][cid]['duration'] = scaled_duration if hasattr(info, 'amplitudes') and info.amplitudes: sessinfo[i]['cond'][cid]['amplitudes'] = \ info.amplitudes[cid] if hasattr(info, 'tmod') and info.tmod and \ len(info.tmod) > cid: sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid] if hasattr(info, 'pmod') and info.pmod and \ len(info.pmod) > cid: if info.pmod[cid]: sessinfo[i]['cond'][cid]['pmod'] = [] for j, name in enumerate(info.pmod[cid].name): sessinfo[i]['cond'][cid]['pmod'].insert(j, {}) sessinfo[i]['cond'][cid]['pmod'][j]['name'] = \ name sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = \ info.pmod[cid].poly[j] sessinfo[i]['cond'][cid]['pmod'][j]['param'] = \ info.pmod[cid].param[j] sessinfo[i]['regress']= [] if hasattr(info, 'regressors') and info.regressors is not None: for j, r in enumerate(info.regressors): sessinfo[i]['regress'].insert(j, dict(name='', val=[])) if hasattr(info, 'regressor_names') and \ info.regressor_names is not None: sessinfo[i]['regress'][j]['name'] = \ info.regressor_names[j] else: sessinfo[i]['regress'][j]['name'] = 'UR%d' % (j+1) sessinfo[i]['regress'][j]['val'] = info.regressors[j] sessinfo[i]['scans'] = functional_runs[i] if realignment_parameters is not None: for i, rp in enumerate(realignment_parameters): mc = realignment_parameters[i] for col in range(mc.shape[1]): colidx = len(sessinfo[i]['regress']) sessinfo[i]['regress'].insert(colidx, dict(name='', val=[])) sessinfo[i]['regress'][colidx]['name'] = 'Realign%d' % (col + 1) sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist() if outliers is not None: for i, out in enumerate(outliers): numscans = 0 for f in filename_to_list(sessinfo[i]['scans']): shape = load(f).get_shape() if len(shape) == 3 or shape[3] == 1: iflogger.warning(("You are using 3D instead of 4D " "files. Are you sure this was " "intended?")) numscans += 1 else: numscans += shape[3] for j, scanno in enumerate(out): colidx = len(sessinfo[i]['regress']) sessinfo[i]['regress'].insert(colidx, dict(name='', val=[])) sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d'%(j+1) sessinfo[i]['regress'][colidx]['val'] = \ np.zeros((1, numscans))[0].tolist() sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1 return sessinfo def _generate_design(self, infolist=None): """Generate design specification for a typical fmri paradigm """ realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): for parfile in self.inputs.realignment_parameters: realignment_parameters.append(np.loadtxt(parfile)) outliers = [] if isdefined(self.inputs.outlier_files): for filename in self.inputs.outlier_files: try: outindices = np.loadtxt(filename, dtype=int) except IOError: outliers.append([]) else: if outindices.size == 1: outliers.append([outindices.tolist()]) else: outliers.append(outindices.tolist()) if infolist is None: if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) self._sessinfo = self._generate_standard_design(infolist, functional_runs=self.inputs.functional_runs, realignment_parameters=realignment_parameters, outliers=outliers) def _run_interface(self, runtime): """ """ self._sessioninfo = None self._generate_design() return runtime def _list_outputs(self): outputs = self._outputs().get() if not hasattr(self, '_sessinfo'): self._generate_design() outputs['session_info'] = self._sessinfo return outputs class SpecifySPMModelInputSpec(SpecifyModelInputSpec): concatenate_runs = traits.Bool(False, usedefault=True, desc="Concatenate all runs to look like a single session.") output_units = traits.Enum('secs', 'scans', usedefault=True, desc="Units of design event onsets and durations (secs or scans)") class SpecifySPMModel(SpecifyModel): """Adds SPM specific options to SpecifyModel adds: - concatenate_runs - output_units Examples -------- >>> from nipype.interfaces.base import Bunch >>> s = SpecifySPMModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.output_units = 'scans' >>> s.inputs.high_pass_filter_cutoff = 128. >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.concatenate_runs = True >>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], \ durations=[[1]]), \ Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], \ durations=[[1]])] >>> s.inputs.subject_info = info """ input_spec = SpecifySPMModelInputSpec def _concatenate_info(self, infolist): nscans = [] for i, f in enumerate(self.inputs.functional_runs): if isinstance(f, list): numscans = len(f) elif isinstance(f, six.string_types): img = load(f) numscans = img.get_shape()[3] else: raise Exception('Functional input not specified correctly') nscans.insert(i, numscans) # now combine all fields into 1 # names, onsets, durations, amplitudes, pmod, tmod, regressor_names, # regressors infoout = infolist[0] for i, info in enumerate(infolist[1:]): #info.[conditions, tmod] remain the same if info.onsets: for j, val in enumerate(info.onsets): if self.inputs.input_units == 'secs': onsets = np.array(info.onsets[j]) +\ self.inputs.time_repetition * \ sum(nscans[0:(i + 1)]) infoout.onsets[j].extend(onsets.tolist()) else: onsets = np.array(info.onsets[j]) + \ sum(nscans[0:(i + 1)]) infoout.onsets[j].extend(onsets.tolist()) for j, val in enumerate(info.durations): if len(val) > 1: infoout.durations[j].extend(info.durations[j]) if hasattr(info, 'amplitudes') and info.amplitudes: for j, val in enumerate(info.amplitudes): infoout.amplitudes[j].extend(info.amplitudes[j]) if hasattr(info, 'pmod') and info.pmod: for j, val in enumerate(info.pmod): if val: for key, data in enumerate(val.param): infoout.pmod[j].param[key].extend(data) if hasattr(info, 'regressors') and info.regressors: #assumes same ordering of regressors across different #runs and the same names for the regressors for j, v in enumerate(info.regressors): infoout.regressors[j].extend(info.regressors[j]) #insert session regressors if not hasattr(infoout, 'regressors') or not infoout.regressors: infoout.regressors = [] onelist = np.zeros((1, sum(nscans))) onelist[0, sum(nscans[0:i]):sum(nscans[0:(i + 1)])] = 1 infoout.regressors.insert(len(infoout.regressors), onelist.tolist()[0]) return [infoout], nscans def _generate_design(self, infolist=None): if not isdefined(self.inputs.concatenate_runs) or \ not self.inputs.concatenate_runs: super(SpecifySPMModel, self)._generate_design(infolist=infolist) return if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) concatlist, nscans = self._concatenate_info(infolist) functional_runs = [filename_to_list(self.inputs.functional_runs)] realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): realignment_parameters = [] for parfile in self.inputs.realignment_parameters: mc = np.loadtxt(parfile) if not realignment_parameters: realignment_parameters.insert(0, mc) else: realignment_parameters[0] = \ np.concatenate((realignment_parameters[0], mc)) outliers = [] if isdefined(self.inputs.outlier_files): outliers = [[]] for i, filename in enumerate(self.inputs.outlier_files): try: out = np.loadtxt(filename, dtype=int) except IOError: out = np.array([]) if out.size > 0: if out.size == 1: outliers[0].extend([(np.array(out) + sum(nscans[0:i])).tolist()]) else: outliers[0].extend((np.array(out) + sum(nscans[0:i])).tolist()) self._sessinfo = self._generate_standard_design(concatlist, functional_runs=functional_runs, realignment_parameters=realignment_parameters, outliers=outliers) class SpecifySparseModelInputSpec(SpecifyModelInputSpec): time_acquisition = traits.Float(0, mandatory=True, desc="Time in seconds to acquire a single image volume") volumes_in_cluster=traits.Range(1, usedefault=True, desc="Number of scan volumes in a cluster") model_hrf = traits.Bool(desc="model sparse events with hrf") stimuli_as_impulses = traits.Bool(True, desc="Treat each stimulus to be impulse like.", usedefault=True) use_temporal_deriv = traits.Bool(requires=['model_hrf'], desc="Create a temporal derivative in addition to regular regressor") scale_regressors = traits.Bool(True, desc="Scale regressors by the peak", usedefault=True) scan_onset = traits.Float(0.0, desc="Start of scanning relative to onset of run in secs", usedefault=True) save_plot = traits.Bool(desc=('save plot of sparse design calculation ' '(Requires matplotlib)')) class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec): sparse_png_file = File(desc='PNG file showing sparse design') sparse_svg_file = File(desc='SVG file showing sparse design') class SpecifySparseModel(SpecifyModel): """ Specify a sparse model that is compatible with spm/fsl designers References ---------- .. [1] Perrachione TK and Ghosh SS (2013) Optimized design and analysis of sparse-sampling fMRI experiments. Front. Neurosci. 7:55 http://journal.frontiersin.org/Journal/10.3389/fnins.2013.00055/abstract Examples -------- >>> from nipype.interfaces.base import Bunch >>> s = SpecifySparseModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.time_acquisition = 2 >>> s.inputs.high_pass_filter_cutoff = 128. >>> s.inputs.model_hrf = True >>> info = [Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], \ durations=[[1]]), \ Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], \ durations=[[1]])] >>> s.inputs.subject_info = info """ input_spec = SpecifySparseModelInputSpec output_spec = SpecifySparseModelOutputSpec def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): """Generates a regressor for a sparse/clustered-sparse acquisition """ bplot = False if isdefined(self.inputs.save_plot) and self.inputs.save_plot: bplot=True import matplotlib matplotlib.use(config.get("execution", "matplotlib_backend")) import matplotlib.pyplot as plt TR = np.round(self.inputs.time_repetition * 1000) # in ms if self.inputs.time_acquisition: TA = np.round(self.inputs.time_acquisition * 1000) # in ms else: TA = TR # in ms nvol = self.inputs.volumes_in_cluster SCANONSET = np.round(self.inputs.scan_onset * 1000) total_time = TR * (nscans - nvol) / nvol + TA * nvol + SCANONSET SILENCE = TR - TA * nvol dt = TA / 10.0 durations = np.round(np.array(i_durations) * 1000) if len(durations) == 1: durations = durations*np.ones((len(i_onsets))) onsets = np.round(np.array(i_onsets) * 1000) dttemp = gcd(TA, gcd(SILENCE, TR)) if dt < dttemp: if dttemp % dt != 0: dt = float(gcd(dttemp, dt)) if dt < 1: raise Exception("Time multiple less than 1 ms") iflogger.info("Setting dt = %d ms\n" % dt) npts = int(np.ceil(total_time / dt)) times = np.arange(0, total_time, dt) * 1e-3 timeline = np.zeros((npts)) timeline2 = np.zeros((npts)) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: hrf = spm_hrf(dt * 1e-3) reg_scale = 1.0 if self.inputs.scale_regressors: boxcar = np.zeros((50.0 * 1e3 / dt)) if self.inputs.stimuli_as_impulses: boxcar[1.0 * 1e3 / dt] = 1.0 reg_scale = float(TA / dt) else: boxcar[(1.0 * 1e3 / dt):(2.0 * 1e3 / dt)] = 1.0 if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: response = np.convolve(boxcar, hrf) reg_scale = 1.0 / response.max() iflogger.info('response sum: %.4f max: %.4f' % (response.sum(), response.max())) iflogger.info('reg_scale: %.4f' % reg_scale) for i, t in enumerate(onsets): idx = int(np.round(t / dt)) if i_amplitudes: if len(i_amplitudes) > 1: timeline2[idx] = i_amplitudes[i] else: timeline2[idx] = i_amplitudes[0] else: timeline2[idx] = 1 if bplot: plt.subplot(4, 1, 1) plt.plot(times, timeline2) if not self.inputs.stimuli_as_impulses: if durations[i] == 0: durations[i] = TA * nvol stimdur = np.ones((int(durations[i] / dt))) timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)] timeline += timeline2 timeline2[:] = 0 if bplot: plt.subplot(4, 1, 2) plt.plot(times, timeline) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: timeline = np.convolve(timeline, hrf)[0:len(timeline)] if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: #create temporal deriv timederiv = np.concatenate(([0], np.diff(timeline))) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: plt.plot(times, timederiv) # sample timeline timeline2 = np.zeros((npts)) reg = [] regderiv = [] for i, trial in enumerate(np.arange(nscans)/nvol): scanstart = int((SCANONSET + trial * TR + (i % nvol) * TA) / dt) scanidx = scanstart+np.arange(int(TA/dt)) timeline2[scanidx] = np.max(timeline) reg.insert(i, np.mean(timeline[scanidx]) * reg_scale) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: regderiv.insert(i, np.mean(timederiv[scanidx]) * reg_scale) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: iflogger.info('orthoganlizing derivative w.r.t. main regressor') regderiv = orth(reg, regderiv) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline2) plt.subplot(4, 1, 4) plt.bar(np.arange(len(reg)), reg, width=0.5) plt.savefig('sparse.png') plt.savefig('sparse.svg') if regderiv: return [reg, regderiv] else: return reg def _cond_to_regress(self, info, nscans): """Converts condition information to full regressors """ reg = [] regnames = [] for i, cond in enumerate(info.conditions): if hasattr(info, 'amplitudes') and info.amplitudes: amplitudes = info.amplitudes[i] else: amplitudes = None regnames.insert(len(regnames), cond) scaled_onsets = scale_timings(info.onsets[i], self.inputs.input_units, 'secs', self.inputs.time_repetition) scaled_durations = scale_timings(info.durations[i], self.inputs.input_units, 'secs', self.inputs.time_repetition) regressor = self._gen_regress(scaled_onsets, scaled_durations, amplitudes, nscans) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: reg.insert(len(reg), regressor[0]) regnames.insert(len(regnames), cond + '_D') reg.insert(len(reg), regressor[1]) else: reg.insert(len(reg), regressor) # need to deal with temporal and parametric modulators # for sparse-clustered acquisitions enter T1-effect regressors nvol = self.inputs.volumes_in_cluster if nvol > 1: for i in range(nvol-1): treg = np.zeros((nscans/nvol, nvol)) treg[:, i] = 1 reg.insert(len(reg), treg.ravel().tolist()) regnames.insert(len(regnames), 'T1effect_%d' % i) return reg, regnames def _generate_clustered_design(self, infolist): """Generates condition information for sparse-clustered designs. """ infoout = deepcopy(infolist) for i, info in enumerate(infolist): infoout[i].conditions = None infoout[i].onsets = None infoout[i].durations = None if info.conditions: img = load(self.inputs.functional_runs[i]) nscans = img.get_shape()[3] reg, regnames = self._cond_to_regress(info, nscans) if hasattr(infoout[i], 'regressors') and infoout[i].regressors: if not infoout[i].regressor_names: infoout[i].regressor_names = \ ['R%d'%j for j in range(len(infoout[i].regressors))] else: infoout[i].regressors = [] infoout[i].regressor_names = [] for j, r in enumerate(reg): regidx = len(infoout[i].regressors) infoout[i].regressor_names.insert(regidx, regnames[j]) infoout[i].regressors.insert(regidx, r) return infoout def _generate_design(self, infolist=None): if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) sparselist = self._generate_clustered_design(infolist) super(SpecifySparseModel, self)._generate_design(infolist = sparselist) def _list_outputs(self): outputs = self._outputs().get() if not hasattr(self, '_sessinfo'): self._generate_design() outputs['session_info'] = self._sessinfo if isdefined(self.inputs.save_plot) and self.inputs.save_plot: outputs['sparse_png_file'] = os.path.join(os.getcwd(), 'sparse.png') outputs['sparse_svg_file'] = os.path.join(os.getcwd(), 'sparse.svg') return outputs
bsd-3-clause
ricket1978/ggplot
ggplot/components/loess.py
13
1602
from __future__ import (absolute_import, division, print_function, unicode_literals) """ loess(formula, data, weights, subset, na.action, model = FALSE, span = 0.75, enp.target, degree = 2, parametric = FALSE, drop.square = FALSE, normalize = TRUE, family = c("gaussian", "symmetric"), method = c("loess", "model.frame"), control = loess.control(...), ...) a formula specifying the numeric response and one to four numeric predictors (best specified via an interaction, but can also be specified additively). Will be coerced to a formula if necessary. """ import pylab as pl import pandas as pd import numpy as np def loess( x, h, xp, yp ): "loess func" """args: x => location h => bandwidth (not sure how to choose this automatically) xp => vector yp => vector example: X = np.arange(1, 501) y = np.random.random_integers(low=75, high=130, size=len(X)) data = np.array(zip(X,y)) s1, s2 = [], [] for k in data[:,0]: s1.append( loess( k, 5, data[:,0], data[:,1] ) ) s2.append( loess( k, 100, data[:,0], data[:,1] ) ) pl.plot( data[:,0], data[:,1], 'o', color="white", markersize=1, linewidth=3 ) pl.plot( data[:,0], np.array(s1), 'k-', data[:,0], np.array(s2), 'k--' ) pl.show() """ w = np.exp( -0.5*( ((x-xp)/h)**2 )/np.sqrt(2*np.pi*h**2) ) b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp) b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2) a = ( sum(w*yp) - b*sum(w*xp) )/sum(w) return a + b*x
bsd-2-clause
yavalvas/yav_com
build/matplotlib/lib/mpl_examples/images_contours_and_fields/pcolormesh_levels.py
6
1509
""" Shows how to combine Normalization and Colormap instances to draw "levels" in pcolor, pcolormesh and imshow type plots in a similar way to the levels keyword argument to contour/contourf. """ import matplotlib.pyplot as plt from matplotlib.colors import BoundaryNorm from matplotlib.ticker import MaxNLocator import numpy as np # make these smaller to increase the resolution dx, dy = 0.05, 0.05 # generate 2 2d grids for the x & y bounds y, x = np.mgrid[slice(1, 5 + dy, dy), slice(1, 5 + dx, dx)] z = np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x) # x and y are bounds, so z should be the value *inside* those bounds. # Therefore, remove the last value from the z array. z = z[:-1, :-1] levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max()) # pick the desired colormap, sensible levels, and define a normalization # instance which takes data values and translates those into levels. cmap = plt.get_cmap('PiYG') norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True) plt.subplot(2, 1, 1) im = plt.pcolormesh(x, y, z, cmap=cmap, norm=norm) plt.colorbar() # set the limits of the plot to the limits of the data plt.axis([x.min(), x.max(), y.min(), y.max()]) plt.title('pcolormesh with levels') plt.subplot(2, 1, 2) # contours are *point* based plots, so convert our bound into point # centers plt.contourf(x[:-1, :-1] + dx / 2., y[:-1, :-1] + dy / 2., z, levels=levels, cmap=cmap) plt.colorbar() plt.title('contourf with levels') plt.show()
mit
stuart-knock/bokeh
examples/glyphs/colors.py
25
8920
from __future__ import print_function from math import pi import pandas as pd from bokeh.models import Plot, ColumnDataSource, FactorRange, CategoricalAxis, TapTool, HoverTool, OpenURL from bokeh.models.glyphs import Rect from bokeh.document import Document from bokeh.embed import file_html from bokeh.resources import INLINE from bokeh.browserlib import view css3_colors = pd.DataFrame([ ("Pink", "#FFC0CB", "Pink"), ("LightPink", "#FFB6C1", "Pink"), ("HotPink", "#FF69B4", "Pink"), ("DeepPink", "#FF1493", "Pink"), ("PaleVioletRed", "#DB7093", "Pink"), ("MediumVioletRed", "#C71585", "Pink"), ("LightSalmon", "#FFA07A", "Red"), ("Salmon", "#FA8072", "Red"), ("DarkSalmon", "#E9967A", "Red"), ("LightCoral", "#F08080", "Red"), ("IndianRed", "#CD5C5C", "Red"), ("Crimson", "#DC143C", "Red"), ("FireBrick", "#B22222", "Red"), ("DarkRed", "#8B0000", "Red"), ("Red", "#FF0000", "Red"), ("OrangeRed", "#FF4500", "Orange"), ("Tomato", "#FF6347", "Orange"), ("Coral", "#FF7F50", "Orange"), ("DarkOrange", "#FF8C00", "Orange"), ("Orange", "#FFA500", "Orange"), ("Yellow", "#FFFF00", "Yellow"), ("LightYellow", "#FFFFE0", "Yellow"), ("LemonChiffon", "#FFFACD", "Yellow"), ("LightGoldenrodYellow", "#FAFAD2", "Yellow"), ("PapayaWhip", "#FFEFD5", "Yellow"), ("Moccasin", "#FFE4B5", "Yellow"), ("PeachPuff", "#FFDAB9", "Yellow"), ("PaleGoldenrod", "#EEE8AA", "Yellow"), ("Khaki", "#F0E68C", "Yellow"), ("DarkKhaki", "#BDB76B", "Yellow"), ("Gold", "#FFD700", "Yellow"), ("Cornsilk", "#FFF8DC", "Brown"), ("BlanchedAlmond", "#FFEBCD", "Brown"), ("Bisque", "#FFE4C4", "Brown"), ("NavajoWhite", "#FFDEAD", "Brown"), ("Wheat", "#F5DEB3", "Brown"), ("BurlyWood", "#DEB887", "Brown"), ("Tan", "#D2B48C", "Brown"), ("RosyBrown", "#BC8F8F", "Brown"), ("SandyBrown", "#F4A460", "Brown"), ("Goldenrod", "#DAA520", "Brown"), ("DarkGoldenrod", "#B8860B", "Brown"), ("Peru", "#CD853F", "Brown"), ("Chocolate", "#D2691E", "Brown"), ("SaddleBrown", "#8B4513", "Brown"), ("Sienna", "#A0522D", "Brown"), ("Brown", "#A52A2A", "Brown"), ("Maroon", "#800000", "Brown"), ("DarkOliveGreen", "#556B2F", "Green"), ("Olive", "#808000", "Green"), ("OliveDrab", "#6B8E23", "Green"), ("YellowGreen", "#9ACD32", "Green"), ("LimeGreen", "#32CD32", "Green"), ("Lime", "#00FF00", "Green"), ("LawnGreen", "#7CFC00", "Green"), ("Chartreuse", "#7FFF00", "Green"), ("GreenYellow", "#ADFF2F", "Green"), ("SpringGreen", "#00FF7F", "Green"), ("MediumSpringGreen", "#00FA9A", "Green"), ("LightGreen", "#90EE90", "Green"), ("PaleGreen", "#98FB98", "Green"), ("DarkSeaGreen", "#8FBC8F", "Green"), ("MediumSeaGreen", "#3CB371", "Green"), ("SeaGreen", "#2E8B57", "Green"), ("ForestGreen", "#228B22", "Green"), ("Green", "#008000", "Green"), ("DarkGreen", "#006400", "Green"), ("MediumAquamarine", "#66CDAA", "Cyan"), ("Aqua", "#00FFFF", "Cyan"), ("Cyan", "#00FFFF", "Cyan"), ("LightCyan", "#E0FFFF", "Cyan"), ("PaleTurquoise", "#AFEEEE", "Cyan"), ("Aquamarine", "#7FFFD4", "Cyan"), ("Turquoise", "#40E0D0", "Cyan"), ("MediumTurquoise", "#48D1CC", "Cyan"), ("DarkTurquoise", "#00CED1", "Cyan"), ("LightSeaGreen", "#20B2AA", "Cyan"), ("CadetBlue", "#5F9EA0", "Cyan"), ("DarkCyan", "#008B8B", "Cyan"), ("Teal", "#008080", "Cyan"), ("LightSteelBlue", "#B0C4DE", "Blue"), ("PowderBlue", "#B0E0E6", "Blue"), ("LightBlue", "#ADD8E6", "Blue"), ("SkyBlue", "#87CEEB", "Blue"), ("LightSkyBlue", "#87CEFA", "Blue"), ("DeepSkyBlue", "#00BFFF", "Blue"), ("DodgerBlue", "#1E90FF", "Blue"), ("CornflowerBlue", "#6495ED", "Blue"), ("SteelBlue", "#4682B4", "Blue"), ("RoyalBlue", "#4169E1", "Blue"), ("Blue", "#0000FF", "Blue"), ("MediumBlue", "#0000CD", "Blue"), ("DarkBlue", "#00008B", "Blue"), ("Navy", "#000080", "Blue"), ("MidnightBlue", "#191970", "Blue"), ("Lavender", "#E6E6FA", "Purple"), ("Thistle", "#D8BFD8", "Purple"), ("Plum", "#DDA0DD", "Purple"), ("Violet", "#EE82EE", "Purple"), ("Orchid", "#DA70D6", "Purple"), ("Fuchsia", "#FF00FF", "Purple"), ("Magenta", "#FF00FF", "Purple"), ("MediumOrchid", "#BA55D3", "Purple"), ("MediumPurple", "#9370DB", "Purple"), ("BlueViolet", "#8A2BE2", "Purple"), ("DarkViolet", "#9400D3", "Purple"), ("DarkOrchid", "#9932CC", "Purple"), ("DarkMagenta", "#8B008B", "Purple"), ("Purple", "#800080", "Purple"), ("Indigo", "#4B0082", "Purple"), ("DarkSlateBlue", "#483D8B", "Purple"), ("SlateBlue", "#6A5ACD", "Purple"), ("MediumSlateBlue", "#7B68EE", "Purple"), ("White", "#FFFFFF", "White"), ("Snow", "#FFFAFA", "White"), ("Honeydew", "#F0FFF0", "White"), ("MintCream", "#F5FFFA", "White"), ("Azure", "#F0FFFF", "White"), ("AliceBlue", "#F0F8FF", "White"), ("GhostWhite", "#F8F8FF", "White"), ("WhiteSmoke", "#F5F5F5", "White"), ("Seashell", "#FFF5EE", "White"), ("Beige", "#F5F5DC", "White"), ("OldLace", "#FDF5E6", "White"), ("FloralWhite", "#FFFAF0", "White"), ("Ivory", "#FFFFF0", "White"), ("AntiqueWhite", "#FAEBD7", "White"), ("Linen", "#FAF0E6", "White"), ("LavenderBlush", "#FFF0F5", "White"), ("MistyRose", "#FFE4E1", "White"), ("Gainsboro", "#DCDCDC", "Gray/Black"), ("LightGray", "#D3D3D3", "Gray/Black"), ("Silver", "#C0C0C0", "Gray/Black"), ("DarkGray", "#A9A9A9", "Gray/Black"), ("Gray", "#808080", "Gray/Black"), ("DimGray", "#696969", "Gray/Black"), ("LightSlateGray", "#778899", "Gray/Black"), ("SlateGray", "#708090", "Gray/Black"), ("DarkSlateGray", "#2F4F4F", "Gray/Black"), ("Black", "#000000", "Gray/Black"), ], columns=["Name", "Color", "Group"]) source = ColumnDataSource(dict( names = list(css3_colors.Name), groups = list(css3_colors.Group), colors = list(css3_colors.Color), )) xdr = FactorRange(factors=list(css3_colors.Group.unique())) ydr = FactorRange(factors=list(reversed(css3_colors.Name))) plot = Plot(title="CSS3 Color Names", x_range=xdr, y_range=ydr, plot_width=600, plot_height=2000) rect = Rect(x="groups", y="names", width=1, height=1, fill_color="colors", line_color=None) rect_renderer = plot.add_glyph(source, rect) xaxis_above = CategoricalAxis(major_label_orientation=pi/4) plot.add_layout(xaxis_above, 'above') xaxis_below = CategoricalAxis(major_label_orientation=pi/4) plot.add_layout(xaxis_below, 'below') plot.add_layout(CategoricalAxis(), 'left') url = "http://www.colors.commutercreative.com/@names/" tooltips = """Click the color to go to:<br /><a href="{url}">{url}</a>""".format(url=url) tap = TapTool(plot=plot, renderers=[rect_renderer], action=OpenURL(url=url)) hover = HoverTool(plot=plot, renderers=[rect_renderer], tooltips=tooltips) plot.tools.extend([tap, hover]) doc = Document() doc.add(plot) if __name__ == "__main__": filename = "colors.html" with open(filename, "w") as f: f.write(file_html(doc, INLINE, "CSS3 Color Names")) print("Wrote %s" % filename) view(filename)
bsd-3-clause
herilalaina/scikit-learn
sklearn/utils/estimator_checks.py
1
75664
from __future__ import print_function import types import warnings import sys import traceback import pickle from copy import deepcopy import numpy as np from scipy import sparse from scipy.stats import rankdata import struct from sklearn.externals.six.moves import zip from sklearn.externals.joblib import hash, Memory from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_allclose_dense_sparse from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import META_ESTIMATORS from sklearn.utils.testing import set_random_state from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_dict_equal from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.base import (clone, TransformerMixin, ClusterMixin, BaseEstimator, is_classifier, is_regressor) from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score from sklearn.random_projection import BaseRandomProjection from sklearn.feature_selection import SelectKBest from sklearn.svm.base import BaseLibSVM from sklearn.linear_model.stochastic_gradient import BaseSGD from sklearn.pipeline import make_pipeline from sklearn.exceptions import ConvergenceWarning from sklearn.exceptions import DataConversionWarning from sklearn.exceptions import SkipTestWarning from sklearn.model_selection import train_test_split from sklearn.metrics.pairwise import (rbf_kernel, linear_kernel, pairwise_distances) from sklearn.utils import shuffle from sklearn.utils.fixes import signature from sklearn.utils.validation import has_fit_parameter, _num_samples from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris, load_boston, make_blobs BOSTON = None CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'] MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet', 'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess', 'GaussianProcessRegressor', 'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso', 'LassoLars', 'LinearRegression', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression', 'RANSACRegressor', 'RadiusNeighborsRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV'] def _yield_non_meta_checks(name, estimator): yield check_estimators_dtypes yield check_fit_score_takes_y yield check_dtype_object yield check_sample_weights_pandas_series yield check_sample_weights_list yield check_estimators_fit_returns_self yield check_complex_data # Check that all estimator yield informative messages when # trained on empty datasets yield check_estimators_empty_data_messages if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']: # SpectralEmbedding is non-deterministic, # see issue #4236 # cross-decomposition's "transform" returns X and Y yield check_pipeline_consistency if name not in ['Imputer']: # Test that all estimators check their input for NaN's and infs yield check_estimators_nan_inf if name not in ['GaussianProcess']: # FIXME! # in particular GaussianProcess! yield check_estimators_overwrite_params if hasattr(estimator, 'sparsify'): yield check_sparsify_coefficients yield check_estimator_sparse_data # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_estimators_pickle def _yield_classifier_checks(name, classifier): # test classifiers can handle non-array data yield check_classifier_data_not_an_array # test classifiers trained on a single label always return this label yield check_classifiers_one_label yield check_classifiers_classes yield check_estimators_partial_fit_n_features # basic consistency testing yield check_classifiers_train yield check_classifiers_regression_target if (name not in ["MultinomialNB", "ComplementNB", "LabelPropagation", "LabelSpreading"] and # TODO some complication with -1 label name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]): # We don't raise a warning in these classifiers, as # the column y interface is used by the forests. yield check_supervised_y_2d yield check_supervised_y_no_nan # test if NotFittedError is raised yield check_estimators_unfitted if 'class_weight' in classifier.get_params().keys(): yield check_class_weight_classifiers yield check_non_transformer_estimators_n_iter # test if predict_proba is a monotonic transformation of decision_function yield check_decision_proba_consistency @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_supervised_y_no_nan(name, estimator_orig): # Checks that the Estimator targets are not NaN. estimator = clone(estimator_orig) rng = np.random.RandomState(888) X = rng.randn(10, 5) y = np.ones(10) * np.inf y = multioutput_estimator_convert_y_2d(estimator, y) errmsg = "Input contains NaN, infinity or a value too large for " \ "dtype('float64')." try: estimator.fit(X, y) except ValueError as e: if str(e) != errmsg: raise ValueError("Estimator {0} raised error as expected, but " "does not match expected error message" .format(name)) else: raise ValueError("Estimator {0} should have raised error on fitting " "array y with NaN value.".format(name)) def _yield_regressor_checks(name, regressor): # TODO: test with intercept # TODO: test with multiple responses # basic testing yield check_regressors_train yield check_regressor_data_not_an_array yield check_estimators_partial_fit_n_features yield check_regressors_no_decision_function yield check_supervised_y_2d yield check_supervised_y_no_nan if name != 'CCA': # check that the regressor handles int input yield check_regressors_int if name != "GaussianProcessRegressor": # Test if NotFittedError is raised yield check_estimators_unfitted yield check_non_transformer_estimators_n_iter def _yield_transformer_checks(name, transformer): # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer', 'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']: yield check_transformer_data_not_an_array # these don't actually fit the data, so don't raise errors if name not in ['AdditiveChi2Sampler', 'Binarizer', 'FunctionTransformer', 'Normalizer']: # basic tests yield check_transformer_general yield check_transformers_unfitted # Dependent on external solvers and hence accessing the iter # param is non-trivial. external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding', 'RandomizedLasso', 'LogisticRegressionCV'] if name not in external_solver: yield check_transformer_n_iter def _yield_clustering_checks(name, clusterer): yield check_clusterer_compute_labels_predict if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering yield check_estimators_partial_fit_n_features yield check_non_transformer_estimators_n_iter def _yield_all_checks(name, estimator): for check in _yield_non_meta_checks(name, estimator): yield check if is_classifier(estimator): for check in _yield_classifier_checks(name, estimator): yield check if is_regressor(estimator): for check in _yield_regressor_checks(name, estimator): yield check if isinstance(estimator, TransformerMixin): for check in _yield_transformer_checks(name, estimator): yield check if isinstance(estimator, ClusterMixin): for check in _yield_clustering_checks(name, estimator): yield check yield check_fit2d_predict1d if name != 'GaussianProcess': # FIXME # XXX GaussianProcess deprecated in 0.20 yield check_fit2d_1sample yield check_fit2d_1feature yield check_fit1d yield check_get_params_invariance yield check_dict_unchanged yield check_dont_overwrite_parameters def check_estimator(Estimator): """Check if estimator adheres to scikit-learn conventions. This estimator will run an extensive test-suite for input validation, shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin from sklearn.base. This test can be applied to classes or instances. Classes currently have some additional tests that related to construction, while passing instances allows the testing of multiple options. Parameters ---------- estimator : estimator object or class Estimator to check. Estimator is a class object or instance. """ if isinstance(Estimator, type): # got a class name = Estimator.__name__ check_parameters_default_constructible(name, Estimator) check_no_fit_attributes_set_in_init(name, Estimator) estimator = Estimator() else: # got an instance estimator = Estimator name = type(estimator).__name__ for check in _yield_all_checks(name, estimator): try: check(name, estimator) except SkipTest as message: # the only SkipTest thrown currently results from not # being able to import pandas. warnings.warn(message, SkipTestWarning) def _boston_subset(n_samples=200): global BOSTON if BOSTON is None: boston = load_boston() X, y = boston.data, boston.target X, y = shuffle(X, y, random_state=0) X, y = X[:n_samples], y[:n_samples] X = StandardScaler().fit_transform(X) BOSTON = X, y return BOSTON def set_checking_parameters(estimator): # set parameters to speed up some estimators and # avoid deprecated behaviour params = estimator.get_params() if ("n_iter" in params and estimator.__class__.__name__ != "TSNE" and not isinstance(estimator, BaseSGD)): estimator.set_params(n_iter=5) if "max_iter" in params: warnings.simplefilter("ignore", ConvergenceWarning) if estimator.max_iter is not None: estimator.set_params(max_iter=min(5, estimator.max_iter)) # LinearSVR, LinearSVC if estimator.__class__.__name__ in ['LinearSVR', 'LinearSVC']: estimator.set_params(max_iter=20) # NMF if estimator.__class__.__name__ == 'NMF': estimator.set_params(max_iter=100) # MLP if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']: estimator.set_params(max_iter=100) if "n_resampling" in params: # randomized lasso estimator.set_params(n_resampling=5) if "n_estimators" in params: # especially gradient boosting with default 100 estimator.set_params(n_estimators=min(5, estimator.n_estimators)) if "max_trials" in params: # RANSAC estimator.set_params(max_trials=10) if "n_init" in params: # K-Means estimator.set_params(n_init=2) if "decision_function_shape" in params: # SVC estimator.set_params(decision_function_shape='ovo') if estimator.__class__.__name__ == "SelectFdr": # be tolerant of noisy datasets (not actually speed) estimator.set_params(alpha=.5) if estimator.__class__.__name__ == "TheilSenRegressor": estimator.max_subpopulation = 100 if isinstance(estimator, BaseRandomProjection): # Due to the jl lemma and often very few samples, the number # of components of the random matrix projection will be probably # greater than the number of features. # So we impose a smaller number (avoid "auto" mode) estimator.set_params(n_components=2) if isinstance(estimator, SelectKBest): # SelectKBest has a default of k=10 # which is more feature than we have in most case. estimator.set_params(k=1) class NotAnArray(object): " An object that is convertable to an array" def __init__(self, data): self.data = data def __array__(self, dtype=None): return self.data def _is_32bit(): """Detect if process is 32bit Python.""" return struct.calcsize('P') * 8 == 32 def _is_pairwise(estimator): """Returns True if estimator has a _pairwise attribute set to True. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if _pairwise is set to True and False otherwise. """ return bool(getattr(estimator, "_pairwise", False)) def _is_pairwise_metric(estimator): """Returns True if estimator accepts pairwise metric. Parameters ---------- estimator : object Estimator object to test. Returns ------- out : bool True if _pairwise is set to True and False otherwise. """ metric = getattr(estimator, "metric", None) return bool(metric == 'precomputed') def pairwise_estimator_convert_X(X, estimator, kernel=linear_kernel): if _is_pairwise_metric(estimator): return pairwise_distances(X, metric='euclidean') if _is_pairwise(estimator): return kernel(X, X) return X def check_estimator_sparse_data(name, estimator_orig): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 X = pairwise_estimator_convert_X(X, estimator_orig) X_csr = sparse.csr_matrix(X) y = (4 * rng.rand(40)).astype(np.int) # catch deprecation warnings with ignore_warnings(category=DeprecationWarning): estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']: X = X_csr.asformat(sparse_format) # catch deprecation warnings with ignore_warnings(category=(DeprecationWarning, FutureWarning)): if name in ['Scaler', 'StandardScaler']: estimator = clone(estimator).set_params(with_mean=False) else: estimator = clone(estimator) # fit and predict try: with ignore_warnings(category=(DeprecationWarning, FutureWarning)): estimator.fit(X, y) if hasattr(estimator, "predict"): pred = estimator.predict(X) assert_equal(pred.shape, (X.shape[0],)) if hasattr(estimator, 'predict_proba'): probs = estimator.predict_proba(X) assert_equal(probs.shape, (X.shape[0], 4)) except (TypeError, ValueError) as e: if 'sparse' not in repr(e).lower(): print("Estimator %s doesn't seem to fail gracefully on " "sparse data: error message state explicitly that " "sparse input is not supported if this is not the case." % name) raise except Exception: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: it should raise a TypeError if sparse input " "is explicitly not supported." % name) raise @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_sample_weights_pandas_series(name, estimator_orig): # check that estimators will accept a 'sample_weight' parameter of # type pandas.Series in the 'fit' function. estimator = clone(estimator_orig) if has_fit_parameter(estimator, "sample_weight"): try: import pandas as pd X = np.array([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]]) X = pd.DataFrame(pairwise_estimator_convert_X(X, estimator_orig)) y = pd.Series([1, 1, 1, 2, 2, 2]) weights = pd.Series([1] * 6) try: estimator.fit(X, y, sample_weight=weights) except ValueError: raise ValueError("Estimator {0} raises error if " "'sample_weight' parameter is of " "type pandas.Series".format(name)) except ImportError: raise SkipTest("pandas is not installed: not testing for " "input of type pandas.Series to class weight.") @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_sample_weights_list(name, estimator_orig): # check that estimators will accept a 'sample_weight' parameter of # type list in the 'fit' function. if has_fit_parameter(estimator_orig, "sample_weight"): estimator = clone(estimator_orig) rnd = np.random.RandomState(0) X = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)), estimator_orig) y = np.arange(10) % 3 y = multioutput_estimator_convert_y_2d(estimator, y) sample_weight = [3] * 10 # Test that estimators don't raise any exception estimator.fit(X, y, sample_weight=sample_weight) @ignore_warnings(category=(DeprecationWarning, FutureWarning, UserWarning)) def check_dtype_object(name, estimator_orig): # check that estimators treat dtype object as numeric if possible rng = np.random.RandomState(0) X = pairwise_estimator_convert_X(rng.rand(40, 10), estimator_orig) X = X.astype(object) y = (X[:, 0] * 4).astype(np.int) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) estimator.fit(X, y) if hasattr(estimator, "predict"): estimator.predict(X) if hasattr(estimator, "transform"): estimator.transform(X) try: estimator.fit(X, y.astype(object)) except Exception as e: if "Unknown label type" not in str(e): raise X[0, 0] = {'foo': 'bar'} msg = "argument must be a string or a number" assert_raises_regex(TypeError, msg, estimator.fit, X, y) def check_complex_data(name, estimator_orig): # check that estimators raise an exception on providing complex data X = np.random.sample(10) + 1j * np.random.sample(10) X = X.reshape(-1, 1) y = np.random.sample(10) + 1j * np.random.sample(10) estimator = clone(estimator_orig) assert_raises_regex(ValueError, "Complex data not supported", estimator.fit, X, y) @ignore_warnings def check_dict_unchanged(name, estimator_orig): # this estimator raises # ValueError: Found array with 0 feature(s) (shape=(23, 0)) # while a minimum of 1 is required. # error if name in ['SpectralCoclustering']: return rnd = np.random.RandomState(0) if name in ['RANSACRegressor']: X = 3 * rnd.uniform(size=(20, 3)) else: X = 2 * rnd.uniform(size=(20, 3)) X = pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(np.int) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 if hasattr(estimator, "n_best"): estimator.n_best = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): dict_before = estimator.__dict__.copy() getattr(estimator, method)(X) assert_dict_equal(estimator.__dict__, dict_before, 'Estimator changes __dict__ during %s' % method) def is_public_parameter(attr): return not (attr.startswith('_') or attr.endswith('_')) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_dont_overwrite_parameters(name, estimator_orig): # check that fit method only changes or sets private attributes if hasattr(estimator_orig.__init__, "deprecated_original"): # to not check deprecated classes return estimator = clone(estimator_orig) rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) X = pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) dict_before_fit = estimator.__dict__.copy() estimator.fit(X, y) dict_after_fit = estimator.__dict__ public_keys_after_fit = [key for key in dict_after_fit.keys() if is_public_parameter(key)] attrs_added_by_fit = [key for key in public_keys_after_fit if key not in dict_before_fit.keys()] # check that fit doesn't add any public attribute assert_true(not attrs_added_by_fit, ('Estimator adds public attribute(s) during' ' the fit method.' ' Estimators are only allowed to add private attributes' ' either started with _ or ended' ' with _ but %s added' % ', '.join(attrs_added_by_fit))) # check that fit doesn't change any public attribute attrs_changed_by_fit = [key for key in public_keys_after_fit if (dict_before_fit[key] is not dict_after_fit[key])] assert_true(not attrs_changed_by_fit, ('Estimator changes public attribute(s) during' ' the fit method. Estimators are only allowed' ' to change attributes started' ' or ended with _, but' ' %s changed' % ', '.join(attrs_changed_by_fit))) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_fit2d_predict1d(name, estimator_orig): # check by fitting a 2d array and predicting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) X = pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(np.int) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): assert_raise_message(ValueError, "Reshape your data", getattr(estimator, method), X[0]) @ignore_warnings def check_fit2d_1sample(name, estimator_orig): # Check that fitting a 2d array with only one sample either works or # returns an informative message. The error message should either mention # the number of samples or the number of classes. rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(1, 10)) y = X[:, 0].astype(np.int) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) msgs = ["1 sample", "n_samples = 1", "n_samples=1", "one sample", "1 class", "one class"] try: estimator.fit(X, y) except ValueError as e: if all(msg not in repr(e) for msg in msgs): raise e @ignore_warnings def check_fit2d_1feature(name, estimator_orig): # check fitting a 2d array with only 1 feature either works or returns # informative message rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(10, 1)) X = pairwise_estimator_convert_X(X, estimator_orig) y = X[:, 0].astype(np.int) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 # ensure two labels in subsample for RandomizedLogisticRegression if name == 'RandomizedLogisticRegression': estimator.sample_fraction = 1 # ensure non skipped trials for RANSACRegressor if name == 'RANSACRegressor': estimator.residual_threshold = 0.5 y = multioutput_estimator_convert_y_2d(estimator, y) set_random_state(estimator, 1) msgs = ["1 feature(s)", "n_features = 1", "n_features=1"] try: estimator.fit(X, y) except ValueError as e: if all(msg not in repr(e) for msg in msgs): raise e @ignore_warnings def check_fit1d(name, estimator_orig): # check fitting 1d X array raises a ValueError rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = X.astype(np.int) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) assert_raises(ValueError, estimator.fit, X, y) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_transformer_general(name, transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) X -= X.min() _check_transformer(name, transformer, X, y) _check_transformer(name, transformer, X.tolist(), y.tolist()) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_transformer_data_not_an_array(name, transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 this_X = NotAnArray(X) this_y = NotAnArray(np.asarray(y)) _check_transformer(name, transformer, this_X, this_y) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_transformers_unfitted(name, transformer): X, y = _boston_subset() transformer = clone(transformer) with assert_raises((AttributeError, ValueError), msg="The unfitted " "transformer {} does not raise an error when " "transform is called. Perhaps use " "check_is_fitted in transform.".format(name)): transformer.transform(X) def _check_transformer(name, transformer_orig, X, y): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # on numpy & scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) n_samples, n_features = np.asarray(X).shape transformer = clone(transformer_orig) set_random_state(transformer) # fit if name in CROSS_DECOMPOSITION: y_ = np.c_[y, y] y_[::2, 1] *= 2 else: y_ = y transformer.fit(X, y_) # fit_transform method should work on non fitted estimator transformer_clone = clone(transformer) X_pred = transformer_clone.fit_transform(X, y=y_) if isinstance(X_pred, tuple): for x_pred in X_pred: assert_equal(x_pred.shape[0], n_samples) else: # check for consistent n_samples assert_equal(X_pred.shape[0], n_samples) if hasattr(transformer, 'transform'): if name in CROSS_DECOMPOSITION: X_pred2 = transformer.transform(X, y_) X_pred3 = transformer.fit_transform(X, y=y_) else: X_pred2 = transformer.transform(X) X_pred3 = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): assert_allclose_dense_sparse( x_pred, x_pred2, atol=1e-2, err_msg="fit_transform and transform outcomes " "not consistent in %s" % transformer) assert_allclose_dense_sparse( x_pred, x_pred3, atol=1e-2, err_msg="consecutive fit_transform outcomes " "not consistent in %s" % transformer) else: assert_allclose_dense_sparse( X_pred, X_pred2, err_msg="fit_transform and transform outcomes " "not consistent in %s" % transformer, atol=1e-2) assert_allclose_dense_sparse( X_pred, X_pred3, atol=1e-2, err_msg="consecutive fit_transform outcomes " "not consistent in %s" % transformer) assert_equal(_num_samples(X_pred2), n_samples) assert_equal(_num_samples(X_pred3), n_samples) # raises error on malformed input for transform if hasattr(X, 'T'): # If it's not an array, it does not have a 'T' property with assert_raises(ValueError, msg="The transformer {} does " "not raise an error when the number of " "features in transform is different from" " the number of features in " "fit.".format(name)): transformer.transform(X.T) @ignore_warnings def check_pipeline_consistency(name, estimator_orig): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) # check that make_pipeline(est) gives same score as est X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) set_random_state(estimator) pipeline = make_pipeline(estimator) estimator.fit(X, y) pipeline.fit(X, y) funcs = ["score", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func_pipeline = getattr(pipeline, func_name) result = func(X, y) result_pipe = func_pipeline(X, y) assert_allclose_dense_sparse(result, result_pipe) @ignore_warnings def check_fit_score_takes_y(name, estimator_orig): # check that all estimators accept an optional y # in fit and score so they can be used in pipelines rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) X = pairwise_estimator_convert_X(X, estimator_orig) y = np.arange(10) % 3 estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) set_random_state(estimator) funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = [p.name for p in signature(func).parameters.values()] if args[0] == "self": # if_delegate_has_method makes methods into functions # with an explicit "self", so need to shift arguments args = args[1:] assert_true(args[1] in ["y", "Y"], "Expected y or Y as second argument for method " "%s of %s. Got arguments: %r." % (func_name, type(estimator).__name__, args)) @ignore_warnings def check_estimators_dtypes(name, estimator_orig): rnd = np.random.RandomState(0) X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) X_train_32 = pairwise_estimator_convert_X(X_train_32, estimator_orig) X_train_64 = X_train_32.astype(np.float64) X_train_int_64 = X_train_32.astype(np.int64) X_train_int_32 = X_train_32.astype(np.int32) y = X_train_int_64[:, 0] y = multioutput_estimator_convert_y_2d(estimator_orig, y) methods = ["predict", "transform", "decision_function", "predict_proba"] for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: estimator = clone(estimator_orig) set_random_state(estimator, 1) estimator.fit(X_train, y) for method in methods: if hasattr(estimator, method): getattr(estimator, method)(X_train) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_estimators_empty_data_messages(name, estimator_orig): e = clone(estimator_orig) set_random_state(e, 1) X_zero_samples = np.empty(0).reshape(0, 3) # The precise message can change depending on whether X or y is # validated first. Let us test the type of exception only: with assert_raises(ValueError, msg="The estimator {} does not" " raise an error when an empty data is used " "to train. Perhaps use " "check_array in train.".format(name)): e.fit(X_zero_samples, []) X_zero_features = np.empty(0).reshape(3, 0) # the following y should be accepted by both classifiers and regressors # and ignored by unsupervised models y = multioutput_estimator_convert_y_2d(e, np.array([1, 0, 1])) msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* " "is required.") assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y) @ignore_warnings(category=DeprecationWarning) def check_estimators_nan_inf(name, estimator_orig): # Checks that Estimator X's do not contain NaN or inf. rnd = np.random.RandomState(0) X_train_finite = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)), estimator_orig) X_train_nan = rnd.uniform(size=(10, 3)) X_train_nan[0, 0] = np.nan X_train_inf = rnd.uniform(size=(10, 3)) X_train_inf[0, 0] = np.inf y = np.ones(10) y[:5] = 0 y = multioutput_estimator_convert_y_2d(estimator_orig, y) error_string_fit = "Estimator doesn't check for NaN and inf in fit." error_string_predict = ("Estimator doesn't check for NaN and inf in" " predict.") error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: # catch deprecation warnings with ignore_warnings(category=(DeprecationWarning, FutureWarning)): estimator = clone(estimator_orig) set_random_state(estimator, 1) # try to fit try: estimator.fit(X_train, y) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_fit, estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_fit, estimator, exc) traceback.print_exc(file=sys.stdout) raise exc else: raise AssertionError(error_string_fit, estimator) # actually fit estimator.fit(X_train_finite, y) # predict if hasattr(estimator, "predict"): try: estimator.predict(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_predict, estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_predict, estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_predict, estimator) # transform if hasattr(estimator, "transform"): try: estimator.transform(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_transform, estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_transform, estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_transform, estimator) @ignore_warnings def check_estimators_pickle(name, estimator_orig): """Test that we can pickle all estimators""" check_methods = ["predict", "transform", "decision_function", "predict_proba"] X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) # some estimators can't do features less than 0 X -= X.min() X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel) estimator = clone(estimator_orig) # some estimators only take multioutputs y = multioutput_estimator_convert_y_2d(estimator, y) set_random_state(estimator) estimator.fit(X, y) result = dict() for method in check_methods: if hasattr(estimator, method): result[method] = getattr(estimator, method)(X) # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) if estimator.__module__.startswith('sklearn.'): assert_true(b"version" in pickled_estimator) unpickled_estimator = pickle.loads(pickled_estimator) for method in result: unpickled_result = getattr(unpickled_estimator, method)(X) assert_allclose_dense_sparse(result[method], unpickled_result) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_estimators_partial_fit_n_features(name, estimator_orig): # check if number of features changes between calls to partial_fit. if not hasattr(estimator_orig, 'partial_fit'): return estimator = clone(estimator_orig) X, y = make_blobs(n_samples=50, random_state=1) X -= X.min() try: if is_classifier(estimator): classes = np.unique(y) estimator.partial_fit(X, y, classes=classes) else: estimator.partial_fit(X, y) except NotImplementedError: return with assert_raises(ValueError, msg="The estimator {} does not raise an" " error when the number of features" " changes between calls to " "partial_fit.".format(name)): estimator.partial_fit(X[:, :-1], y) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_clustering(name, clusterer_orig): clusterer = clone(clusterer_orig) X, y = make_blobs(n_samples=50, random_state=1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) n_samples, n_features = X.shape # catch deprecation and neighbors warnings if hasattr(clusterer, "n_clusters"): clusterer.set_params(n_clusters=3) set_random_state(clusterer) if name == 'AffinityPropagation': clusterer.set_params(preference=-100) clusterer.set_params(max_iter=100) # fit clusterer.fit(X) # with lists clusterer.fit(X.tolist()) pred = clusterer.labels_ assert_equal(pred.shape, (n_samples,)) assert_greater(adjusted_rand_score(pred, y), 0.4) # fit another time with ``fit_predict`` and compare results if name == 'SpectralClustering': # there is no way to make Spectral clustering deterministic :( return set_random_state(clusterer) with warnings.catch_warnings(record=True): pred2 = clusterer.fit_predict(X) assert_array_equal(pred, pred2) # fit_predict(X) and labels_ should be of type int assert_in(pred.dtype, [np.dtype('int32'), np.dtype('int64')]) assert_in(pred2.dtype, [np.dtype('int32'), np.dtype('int64')]) # Add noise to X to test the possible values of the labels rng = np.random.RandomState(7) X_noise = np.concatenate([X, rng.uniform(low=-3, high=3, size=(5, 2))]) labels = clusterer.fit_predict(X_noise) # There should be at least one sample in every cluster. Equivalently # labels_ should contain all the consecutive values between its # min and its max. labels_sorted = np.unique(labels) assert_array_equal(labels_sorted, np.arange(labels_sorted[0], labels_sorted[-1] + 1)) # Labels are expected to start at 0 (no noise) or -1 (if noise) assert_true(labels_sorted[0] in [0, -1]) # Labels should be less than n_clusters - 1 if hasattr(clusterer, 'n_clusters'): n_clusters = getattr(clusterer, 'n_clusters') assert_greater_equal(n_clusters - 1, labels_sorted[-1]) # else labels should be less than max(labels_) which is necessarily true @ignore_warnings(category=DeprecationWarning) def check_clusterer_compute_labels_predict(name, clusterer_orig): """Check that predict is invariant of compute_labels""" X, y = make_blobs(n_samples=20, random_state=0) clusterer = clone(clusterer_orig) if hasattr(clusterer, "compute_labels"): # MiniBatchKMeans if hasattr(clusterer, "random_state"): clusterer.set_params(random_state=0) X_pred1 = clusterer.fit(X).predict(X) clusterer.set_params(compute_labels=False) X_pred2 = clusterer.fit(X).predict(X) assert_array_equal(X_pred1, X_pred2) @ignore_warnings(category=DeprecationWarning) def check_classifiers_one_label(name, classifier_orig): error_string_fit = "Classifier can't train when only one class is present." error_string_predict = ("Classifier can't predict when only one class is " "present.") rnd = np.random.RandomState(0) X_train = rnd.uniform(size=(10, 3)) X_test = rnd.uniform(size=(10, 3)) y = np.ones(10) # catch deprecation warnings with ignore_warnings(category=(DeprecationWarning, FutureWarning)): classifier = clone(classifier_orig) # try to fit try: classifier.fit(X_train, y) except ValueError as e: if 'class' not in repr(e): print(error_string_fit, classifier, e) traceback.print_exc(file=sys.stdout) raise e else: return except Exception as exc: print(error_string_fit, classifier, exc) traceback.print_exc(file=sys.stdout) raise exc # predict try: assert_array_equal(classifier.predict(X_test), y) except Exception as exc: print(error_string_predict, classifier, exc) raise exc @ignore_warnings # Warnings are raised by decision function def check_classifiers_train(name, classifier_orig): X_m, y_m = make_blobs(n_samples=300, random_state=0) X_m, y_m = shuffle(X_m, y_m, random_state=7) X_m = StandardScaler().fit_transform(X_m) # generate binary problem from multi-class one y_b = y_m[y_m != 2] X_b = X_m[y_m != 2] for (X, y) in [(X_m, y_m), (X_b, y_b)]: classes = np.unique(y) n_classes = len(classes) n_samples, n_features = X.shape classifier = clone(classifier_orig) if name in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']: X -= X.min() X = pairwise_estimator_convert_X(X, classifier_orig) set_random_state(classifier) # raises error on malformed input for fit with assert_raises(ValueError, msg="The classifer {} does not" " raise an error when incorrect/malformed input " "data for fit is passed. The number of training " "examples is not the same as the number of labels." " Perhaps use check_X_y in fit.".format(name)): classifier.fit(X, y[:-1]) # fit classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) assert_true(hasattr(classifier, "classes_")) y_pred = classifier.predict(X) assert_equal(y_pred.shape, (n_samples,)) # training set performance if name not in ['BernoulliNB', 'MultinomialNB', 'ComplementNB']: assert_greater(accuracy_score(y, y_pred), 0.83) # raises error on malformed input for predict if _is_pairwise(classifier): with assert_raises(ValueError, msg="The classifier {} does not" " raise an error when shape of X" "in predict is not equal to (n_test_samples," "n_training_samples)".format(name)): classifier.predict(X.reshape(-1, 1)) else: with assert_raises(ValueError, msg="The classifier {} does not" " raise an error when the number of features " "in predict is different from the number of" " features in fit.".format(name)): classifier.predict(X.T) if hasattr(classifier, "decision_function"): try: # decision_function agrees with predict decision = classifier.decision_function(X) if n_classes == 2: assert_equal(decision.shape, (n_samples,)) dec_pred = (decision.ravel() > 0).astype(np.int) assert_array_equal(dec_pred, y_pred) if (n_classes == 3 and # 1on1 of LibSVM works differently not isinstance(classifier, BaseLibSVM)): assert_equal(decision.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(decision, axis=1), y_pred) # raises error on malformed input for decision_function if _is_pairwise(classifier): with assert_raises(ValueError, msg="The classifier {} does" " not raise an error when the " "shape of X in decision_function is " "not equal to (n_test_samples, " "n_training_samples) in fit." .format(name)): classifier.decision_function(X.reshape(-1, 1)) else: with assert_raises(ValueError, msg="The classifier {} does" " not raise an error when the number " "of features in decision_function is " "different from the number of features" " in fit.".format(name)): classifier.decision_function(X.T) except NotImplementedError: pass if hasattr(classifier, "predict_proba"): # predict_proba agrees with predict y_prob = classifier.predict_proba(X) assert_equal(y_prob.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(y_prob, axis=1), y_pred) # check that probas for all classes sum to one assert_allclose(np.sum(y_prob, axis=1), np.ones(n_samples)) # raises error on malformed input for predict_proba if _is_pairwise(classifier_orig): with assert_raises(ValueError, msg="The classifier {} does not" " raise an error when the shape of X" "in predict_proba is not equal to " "(n_test_samples, n_training_samples)." .format(name)): classifier.predict_proba(X.reshape(-1, 1)) else: with assert_raises(ValueError, msg="The classifier {} does not" " raise an error when the number of " "features in predict_proba is different " "from the number of features in fit." .format(name)): classifier.predict_proba(X.T) if hasattr(classifier, "predict_log_proba"): # predict_log_proba is a transformation of predict_proba y_log_prob = classifier.predict_log_proba(X) assert_allclose(y_log_prob, np.log(y_prob), 8, atol=1e-9) assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob)) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_estimators_fit_returns_self(name, estimator_orig): """Check if self is returned when calling fit""" X, y = make_blobs(random_state=0, n_samples=9, n_features=4) # some want non-negative input X -= X.min() X = pairwise_estimator_convert_X(X, estimator_orig) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) set_random_state(estimator) assert_true(estimator.fit(X, y) is estimator) @ignore_warnings def check_estimators_unfitted(name, estimator_orig): """Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise either AttributeError or ValueError. The specific exception type NotFittedError inherits from both and can therefore be adequately raised for that purpose. """ # Common test for Regressors as well as Classifiers X, y = _boston_subset() est = clone(estimator_orig) msg = "fit" if hasattr(est, 'predict'): assert_raise_message((AttributeError, ValueError), msg, est.predict, X) if hasattr(est, 'decision_function'): assert_raise_message((AttributeError, ValueError), msg, est.decision_function, X) if hasattr(est, 'predict_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_proba, X) if hasattr(est, 'predict_log_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_log_proba, X) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_supervised_y_2d(name, estimator_orig): if "MultiTask" in name: # These only work on 2d, so this test makes no sense return rnd = np.random.RandomState(0) X = pairwise_estimator_convert_X(rnd.uniform(size=(10, 3)), estimator_orig) y = np.arange(10) % 3 estimator = clone(estimator_orig) set_random_state(estimator) # fit estimator.fit(X, y) y_pred = estimator.predict(X) set_random_state(estimator) # Check that when a 2D y is given, a DataConversionWarning is # raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DataConversionWarning) warnings.simplefilter("ignore", RuntimeWarning) estimator.fit(X, y[:, np.newaxis]) y_pred_2d = estimator.predict(X) msg = "expected 1 DataConversionWarning, got: %s" % ( ", ".join([str(w_x) for w_x in w])) if name not in MULTI_OUTPUT: # check that we warned if we don't support multi-output assert_greater(len(w), 0, msg) assert_true("DataConversionWarning('A column-vector y" " was passed when a 1d array was expected" in msg) assert_allclose(y_pred.ravel(), y_pred_2d.ravel()) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_classifiers_classes(name, classifier_orig): X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 X = pairwise_estimator_convert_X(X, classifier_orig) y_names = np.array(["one", "two", "three"])[y] for y_names in [y_names, y_names.astype('O')]: if name in ["LabelPropagation", "LabelSpreading"]: # TODO some complication with -1 label y_ = y else: y_ = y_names classes = np.unique(y_) classifier = clone(classifier_orig) if name == 'BernoulliNB': X = X > X.mean() set_random_state(classifier) # fit classifier.fit(X, y_) y_pred = classifier.predict(X) # training set performance if name != "ComplementNB": # This is a pathological data set for ComplementNB. assert_array_equal(np.unique(y_), np.unique(y_pred)) if np.any(classifier.classes_ != classes): print("Unexpected classes_ attribute for %r: " "expected %s, got %s" % (classifier, classes, classifier.classes_)) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_regressors_int(name, regressor_orig): X, _ = _boston_subset() X = pairwise_estimator_convert_X(X[:50], regressor_orig) rnd = np.random.RandomState(0) y = rnd.randint(3, size=X.shape[0]) y = multioutput_estimator_convert_y_2d(regressor_orig, y) rnd = np.random.RandomState(0) # separate estimators to control random seeds regressor_1 = clone(regressor_orig) regressor_2 = clone(regressor_orig) set_random_state(regressor_1) set_random_state(regressor_2) if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y # fit regressor_1.fit(X, y_) pred1 = regressor_1.predict(X) regressor_2.fit(X, y_.astype(np.float)) pred2 = regressor_2.predict(X) assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_regressors_train(name, regressor_orig): X, y = _boston_subset() X = pairwise_estimator_convert_X(X, regressor_orig) y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled y = y.ravel() regressor = clone(regressor_orig) y = multioutput_estimator_convert_y_2d(regressor, y) rnd = np.random.RandomState(0) if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == 'PassiveAggressiveRegressor': regressor.C = 0.01 # raises error on malformed input for fit with assert_raises(ValueError, msg="The classifer {} does not" " raise an error when incorrect/malformed input " "data for fit is passed. The number of training " "examples is not the same as the number of " "labels. Perhaps use check_X_y in fit.".format(name)): regressor.fit(X, y[:-1]) # fit if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert_equal(y_pred.shape, y_.shape) # TODO: find out why PLS and CCA fail. RANSAC is random # and furthermore assumes the presence of outliers, hence # skipped if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'): assert_greater(regressor.score(X, y_), 0.5) @ignore_warnings def check_regressors_no_decision_function(name, regressor_orig): # checks whether regressors have decision_function or predict_proba rng = np.random.RandomState(0) X = rng.normal(size=(10, 4)) regressor = clone(regressor_orig) y = multioutput_estimator_convert_y_2d(regressor, X[:, 0]) if hasattr(regressor, "n_components"): # FIXME CCA, PLS is not robust to rank 1 effects regressor.n_components = 1 regressor.fit(X, y) funcs = ["decision_function", "predict_proba", "predict_log_proba"] for func_name in funcs: func = getattr(regressor, func_name, None) if func is None: # doesn't have function continue # has function. Should raise deprecation warning msg = func_name assert_warns_message(DeprecationWarning, msg, func, X) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_class_weight_classifiers(name, classifier_orig): if name == "NuSVC": # the sparse version has a parameter that doesn't do anything raise SkipTest("Not testing NuSVC class weight as it is ignored.") if name.endswith("NB"): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! raise SkipTest for n_centers in [2, 3]: # create a very noisy dataset X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # can't use gram_if_pairwise() here, setting up gram matrix manually if _is_pairwise(classifier_orig): X_test = rbf_kernel(X_test, X_train) X_train = rbf_kernel(X_train, X_train) n_centers = len(np.unique(y_train)) if n_centers == 2: class_weight = {0: 1000, 1: 0.0001} else: class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} classifier = clone(classifier_orig).set_params( class_weight=class_weight) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "max_iter"): classifier.set_params(max_iter=1000) if hasattr(classifier, "min_weight_fraction_leaf"): classifier.set_params(min_weight_fraction_leaf=0.01) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) # XXX: Generally can use 0.89 here. On Windows, LinearSVC gets # 0.88 (Issue #9111) assert_greater(np.mean(y_pred == 0), 0.87) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_class_weight_balanced_classifiers(name, classifier_orig, X_train, y_train, X_test, y_test, weights): classifier = clone(classifier_orig) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "max_iter"): classifier.set_params(max_iter=1000) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifier.set_params(class_weight='balanced') classifier.fit(X_train, y_train) y_pred_balanced = classifier.predict(X_test) assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'), f1_score(y_test, y_pred, average='weighted')) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_class_weight_balanced_linear_classifier(name, Classifier): """Test class weights with non-contiguous class labels.""" # this is run on classes, not instances, though this should be changed X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = np.array([1, 1, 1, -1, -1]) classifier = Classifier() if hasattr(classifier, "n_iter"): # This is a very small dataset, default n_iter are likely to prevent # convergence classifier.set_params(n_iter=1000) if hasattr(classifier, "max_iter"): classifier.set_params(max_iter=1000) set_random_state(classifier) # Let the model compute the class frequencies classifier.set_params(class_weight='balanced') coef_balanced = classifier.fit(X, y).coef_.copy() # Count each label occurrence to reweight manually n_samples = len(y) n_classes = float(len(np.unique(y))) class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)} classifier.set_params(class_weight=class_weight) coef_manual = classifier.fit(X, y).coef_.copy() assert_allclose(coef_balanced, coef_manual) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_estimators_overwrite_params(name, estimator_orig): X, y = make_blobs(random_state=0, n_samples=9) # some want non-negative input X -= X.min() X = pairwise_estimator_convert_X(X, estimator_orig, kernel=rbf_kernel) estimator = clone(estimator_orig) y = multioutput_estimator_convert_y_2d(estimator, y) set_random_state(estimator) # Make a physical copy of the original estimator parameters before fitting. params = estimator.get_params() original_params = deepcopy(params) # Fit the model estimator.fit(X, y) # Compare the state of the model parameters with the original parameters new_params = estimator.get_params() for param_name, original_value in original_params.items(): new_value = new_params[param_name] # We should never change or mutate the internal state of input # parameters by default. To check this we use the joblib.hash function # that introspects recursively any subobjects to compute a checksum. # The only exception to this rule of immutable constructor parameters # is possible RandomState instance but in this check we explicitly # fixed the random_state params recursively to be integer seeds. assert_equal(hash(new_value), hash(original_value), "Estimator %s should not change or mutate " " the parameter %s from %s to %s during fit." % (name, param_name, original_value, new_value)) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_no_fit_attributes_set_in_init(name, Estimator): """Check that Estimator.__init__ doesn't set trailing-_ attributes.""" # this check works on classes, not instances estimator = Estimator() for attr in dir(estimator): if attr.endswith("_") and not attr.startswith("__"): # This check is for properties, they can be listed in dir # while at the same time have hasattr return False as long # as the property getter raises an AttributeError assert_false( hasattr(estimator, attr), "By convention, attributes ending with '_' are " 'estimated from data in scikit-learn. Consequently they ' 'should not be initialized in the constructor of an ' 'estimator but in the fit method. Attribute {!r} ' 'was found in estimator {}'.format(attr, name)) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_sparsify_coefficients(name, estimator_orig): X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]]) y = [1, 1, 1, 2, 2, 2, 3, 3, 3] est = clone(estimator_orig) est.fit(X, y) pred_orig = est.predict(X) # test sparsify with dense inputs est.sparsify() assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) @ignore_warnings(category=DeprecationWarning) def check_classifier_data_not_an_array(name, estimator_orig): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]]) X = pairwise_estimator_convert_X(X, estimator_orig) y = [1, 1, 1, 2, 2, 2] y = multioutput_estimator_convert_y_2d(estimator_orig, y) check_estimators_data_not_an_array(name, estimator_orig, X, y) @ignore_warnings(category=DeprecationWarning) def check_regressor_data_not_an_array(name, estimator_orig): X, y = _boston_subset(n_samples=50) X = pairwise_estimator_convert_X(X, estimator_orig) y = multioutput_estimator_convert_y_2d(estimator_orig, y) check_estimators_data_not_an_array(name, estimator_orig, X, y) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_estimators_data_not_an_array(name, estimator_orig, X, y): if name in CROSS_DECOMPOSITION: raise SkipTest("Skipping check_estimators_data_not_an_array " "for cross decomposition module as estimators " "are not deterministic.") # separate estimators to control random seeds estimator_1 = clone(estimator_orig) estimator_2 = clone(estimator_orig) set_random_state(estimator_1) set_random_state(estimator_2) y_ = NotAnArray(np.asarray(y)) X_ = NotAnArray(np.asarray(X)) # fit estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_allclose(pred1, pred2, atol=1e-2, err_msg=name) def check_parameters_default_constructible(name, Estimator): # this check works on classes, not instances classifier = LinearDiscriminantAnalysis() # test default-constructibility # get rid of deprecation warnings with ignore_warnings(category=(DeprecationWarning, FutureWarning)): if name in META_ESTIMATORS: estimator = Estimator(classifier) else: estimator = Estimator() # test cloning clone(estimator) # test __repr__ repr(estimator) # test that set_params returns self assert_true(estimator.set_params() is estimator) # test if init does nothing but set parameters # this is important for grid_search etc. # We get the default parameters from init and then # compare these against the actual values of the attributes. # this comes from getattr. Gets rid of deprecation decorator. init = getattr(estimator.__init__, 'deprecated_original', estimator.__init__) try: def param_filter(p): """Identify hyper parameters of an estimator""" return (p.name != 'self' and p.kind != p.VAR_KEYWORD and p.kind != p.VAR_POSITIONAL) init_params = [p for p in signature(init).parameters.values() if param_filter(p)] except (TypeError, ValueError): # init is not a python function. # true for mixins return params = estimator.get_params() if name in META_ESTIMATORS: # they can need a non-default argument init_params = init_params[1:] for init_param in init_params: assert_not_equal(init_param.default, init_param.empty, "parameter %s for %s has no default value" % (init_param.name, type(estimator).__name__)) assert_in(type(init_param.default), [str, int, float, bool, tuple, type(None), np.float64, types.FunctionType, Memory]) if init_param.name not in params.keys(): # deprecated parameter, not in get_params assert_true(init_param.default is None) continue if (issubclass(Estimator, BaseSGD) and init_param.name in ['tol', 'max_iter']): # To remove in 0.21, when they get their future default values continue param_value = params[init_param.name] if isinstance(param_value, np.ndarray): assert_array_equal(param_value, init_param.default) else: assert_equal(param_value, init_param.default, init_param.name) def multioutput_estimator_convert_y_2d(estimator, y): # Estimators in mono_output_task_error raise ValueError if y is of 1-D # Convert into a 2-D y for those estimators. if "MultiTask" in estimator.__class__.__name__: return np.reshape(y, (-1, 1)) return y @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_non_transformer_estimators_n_iter(name, estimator_orig): # Test that estimators that are not transformers with a parameter # max_iter, return the attribute of n_iter_ at least 1. # These models are dependent on external solvers like # libsvm and accessing the iter parameter is non-trivial. not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC', 'RidgeClassifier', 'SVC', 'RandomizedLasso', 'LogisticRegressionCV', 'LinearSVC', 'LogisticRegression'] # Tested in test_transformer_n_iter not_run_check_n_iter += CROSS_DECOMPOSITION if name in not_run_check_n_iter: return # LassoLars stops early for the default alpha=1.0 the iris dataset. if name == 'LassoLars': estimator = clone(estimator_orig).set_params(alpha=0.) else: estimator = clone(estimator_orig) if hasattr(estimator, 'max_iter'): iris = load_iris() X, y_ = iris.data, iris.target y_ = multioutput_estimator_convert_y_2d(estimator, y_) set_random_state(estimator, 0) if name == 'AffinityPropagation': estimator.fit(X) else: estimator.fit(X, y_) # HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b # which doesn't return a n_iter for old versions of SciPy. if not (name == 'HuberRegressor' and estimator.n_iter_ is None): assert_greater_equal(estimator.n_iter_, 1) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_transformer_n_iter(name, estimator_orig): # Test that transformers with a parameter max_iter, return the # attribute of n_iter_ at least 1. estimator = clone(estimator_orig) if hasattr(estimator, "max_iter"): if name in CROSS_DECOMPOSITION: # Check using default data X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]] y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] else: X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() - 0.1 set_random_state(estimator, 0) estimator.fit(X, y_) # These return a n_iter per component. if name in CROSS_DECOMPOSITION: for iter_ in estimator.n_iter_: assert_greater_equal(iter_, 1) else: assert_greater_equal(estimator.n_iter_, 1) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_get_params_invariance(name, estimator_orig): # Checks if get_params(deep=False) is a subset of get_params(deep=True) class T(BaseEstimator): """Mock classifier """ def __init__(self): pass def fit(self, X, y): return self def transform(self, X): return X e = clone(estimator_orig) shallow_params = e.get_params(deep=False) deep_params = e.get_params(deep=True) assert_true(all(item in deep_params.items() for item in shallow_params.items())) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_classifiers_regression_target(name, estimator_orig): # Check if classifier throws an exception when fed regression targets boston = load_boston() X, y = boston.data, boston.target e = clone(estimator_orig) msg = 'Unknown label type: ' assert_raises_regex(ValueError, msg, e.fit, X, y) @ignore_warnings(category=(DeprecationWarning, FutureWarning)) def check_decision_proba_consistency(name, estimator_orig): # Check whether an estimator having both decision_function and # predict_proba methods has outputs with perfect rank correlation. centers = [(2, 2), (4, 4)] X, y = make_blobs(n_samples=100, random_state=0, n_features=4, centers=centers, cluster_std=1.0, shuffle=True) X_test = np.random.randn(20, 2) + 4 estimator = clone(estimator_orig) if (hasattr(estimator, "decision_function") and hasattr(estimator, "predict_proba")): estimator.fit(X, y) a = estimator.predict_proba(X_test)[:, 1] b = estimator.decision_function(X_test) assert_array_equal(rankdata(a), rankdata(b))
bsd-3-clause
architecture-building-systems/CityEnergyAnalyst
cea/technologies/solar/photovoltaic.py
1
39532
""" Photovoltaic """ import os import time from itertools import repeat from math import * from multiprocessing import Pool import numpy as np import pandas as pd from geopandas import GeoDataFrame as gdf from scipy import interpolate import cea.config import cea.inputlocator import cea.utilities.parallel from cea.analysis.costs.equations import calc_capex_annualized from cea.constants import HOURS_IN_YEAR from cea.technologies.solar import constants from cea.utilities import epwreader from cea.utilities import solar_equations from cea.utilities.standardize_coordinates import get_lat_lon_projected_shapefile __author__ = "Jimeno A. Fonseca" __copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich" __credits__ = ["Jimeno A. Fonseca, Shanshan Hsieh"] __license__ = "MIT" __version__ = "0.1" __maintainer__ = "Daren Thomas" __email__ = "cea@arch.ethz.ch" __status__ = "Production" def calc_PV(locator, config, latitude, longitude, weather_data, datetime_local, building_name): """ This function first determines the surface area with sufficient solar radiation, and then calculates the optimal tilt angles of panels at each surface location. The panels are categorized into groups by their surface azimuths, tilt angles, and global irradiation. In the last, electricity generation from PV panels of each group is calculated. :param locator: An InputLocator to locate input files :type locator: cea.inputlocator.InputLocator :param radiation_path: solar insulation data on all surfaces of each building (path :type radiation_path: String :param metadata_csv: data of sensor points measuring solar insulation of each building :type metadata_csv: .csv :param latitude: latitude of the case study location :type latitude: float :param longitude: longitude of the case study location :type longitude: float :param weather_path: path to the weather data file of the case study location :type weather_path: .epw :param building_name: list of building names in the case study :type building_name: Series :return: Building_PV.csv with PV generation potential of each building, Building_sensors.csv with sensor data of each PV panel. """ t0 = time.perf_counter() radiation_path = locator.get_radiation_building_sensors(building_name) metadata_csv_path = locator.get_radiation_metadata(building_name) # solar properties solar_properties = solar_equations.calc_sun_properties(latitude, longitude, weather_data, datetime_local, config) print('calculating solar properties done') # calculate properties of PV panel panel_properties_PV = calc_properties_PV_db(locator.get_database_conversion_systems(), config) print('gathering properties of PV panel') # select sensor point with sufficient solar radiation max_annual_radiation, annual_radiation_threshold, sensors_rad_clean, sensors_metadata_clean = \ solar_equations.filter_low_potential(radiation_path, metadata_csv_path, config) print('filtering low potential sensor points done') # set the maximum roof coverage if config.solar.custom_roof_coverage: max_roof_coverage = config.solar.max_roof_coverage else: max_roof_coverage = 1.0 if not sensors_metadata_clean.empty: if not config.solar.custom_tilt_angle: # calculate optimal angle and tilt for panels sensors_metadata_cat = solar_equations.optimal_angle_and_tilt(sensors_metadata_clean, latitude, solar_properties, max_annual_radiation, panel_properties_PV, max_roof_coverage) print('calculating optimal tilt angle and separation done') else: # calculate spacing required by user-supplied tilt angle for panels sensors_metadata_cat = solar_equations.calc_spacing_custom_angle(sensors_metadata_clean, solar_properties, max_annual_radiation, panel_properties_PV, config.solar.panel_tilt_angle, max_roof_coverage) print('calculating separation for custom tilt angle done') # group the sensors with the same tilt, surface azimuth, and total radiation sensor_groups = solar_equations.calc_groups(sensors_rad_clean, sensors_metadata_cat) print('generating groups of sensor points done') final = calc_pv_generation(sensor_groups, weather_data, datetime_local, solar_properties, latitude, panel_properties_PV) final.to_csv(locator.PV_results(building=building_name), index=True, float_format='%.2f') # print PV generation potential sensors_metadata_cat.to_csv(locator.PV_metadata_results(building=building_name), index=True, index_label='SURFACE', float_format='%.2f', na_rep='nan') # print selected metadata of the selected sensors print(building_name, 'done - time elapsed: %.2f seconds' % (time.perf_counter() - t0)) else: # This loop is activated when a building has not sufficient solar potential final = pd.DataFrame( {'Date': datetime_local, 'PV_walls_north_E_kWh': 0, 'PV_walls_north_m2': 0, 'PV_walls_south_E_kWh': 0, 'PV_walls_south_m2': 0, 'PV_walls_east_E_kWh': 0, 'PV_walls_east_m2': 0, 'PV_walls_west_E_kWh': 0, 'PV_walls_west_m2': 0, 'PV_roofs_top_E_kWh': 0, 'PV_roofs_top_m2': 0, 'E_PV_gen_kWh': 0, 'Area_PV_m2': 0, 'radiation_kWh': 0}, index=range(HOURS_IN_YEAR)) final.to_csv(locator.PV_results(building=building_name), index=False, float_format='%.2f', na_rep='nan') sensors_metadata_cat = pd.DataFrame( {'SURFACE': 0, 'AREA_m2': 0, 'BUILDING': 0, 'TYPE': 0, 'Xcoor': 0, 'Xdir': 0, 'Ycoor': 0, 'Ydir': 0, 'Zcoor': 0, 'Zdir': 0, 'orientation': 0, 'total_rad_Whm2': 0, 'tilt_deg': 0, 'B_deg': 0, 'array_spacing_m': 0, 'surface_azimuth_deg': 0, 'area_installed_module_m2': 0, 'CATteta_z': 0, 'CATB': 0, 'CATGB': 0, 'type_orientation': 0}, index=range(2)) sensors_metadata_cat.to_csv(locator.PV_metadata_results(building=building_name), index=False, float_format='%.2f', na_rep='nan') # ========================= # PV electricity generation # ========================= def calc_pv_generation(sensor_groups, weather_data, date_local, solar_properties, latitude, panel_properties_PV): """ To calculate the electricity generated from PV panels. :param hourly_radiation: mean hourly radiation of sensors in each group [Wh/m2] :type hourly_radiation: dataframe :param number_groups: number of groups of sensor points :type number_groups: float :param number_points: number of sensor points in each group :type number_points: float :param prop_observers: mean values of sensor properties of each group of sensors :type prop_observers: dataframe :param weather_data: weather data read from the epw file :type weather_data: dataframe :param g: declination :type g: float :param Sz: zenith angle :type Sz: float :param Az: solar azimuth :param ha: hour angle :param latitude: latitude of the case study location :return: """ # local variables number_groups = sensor_groups['number_groups'] # number of groups of sensor points prop_observers = sensor_groups['prop_observers'] # mean values of sensor properties of each group of sensors hourly_radiation = sensor_groups['hourlydata_groups'] # mean hourly radiation of sensors in each group [Wh/m2] # convert degree to radians lat = radians(latitude) g_rad = np.radians(solar_properties.g) ha_rad = np.radians(solar_properties.ha) Sz_rad = np.radians(solar_properties.Sz) Az_rad = np.radians(solar_properties.Az) # empty list to store results list_groups_area = [0 for i in range(number_groups)] total_el_output_PV_kWh = [0 for i in range(number_groups)] total_radiation_kWh = [0 for i in range(number_groups)] potential = pd.DataFrame(index=range(HOURS_IN_YEAR)) panel_orientations = ['walls_south', 'walls_north', 'roofs_top', 'walls_east', 'walls_west'] for panel_orientation in panel_orientations: potential['PV_' + panel_orientation + '_E_kWh'] = 0 potential['PV_' + panel_orientation + '_m2'] = 0 eff_nom = panel_properties_PV['PV_n'] Bref = panel_properties_PV['PV_Bref'] misc_losses = panel_properties_PV['misc_losses'] # cabling, resistances etc.. for group in prop_observers.index.values: # calculate radiation types (direct/diffuse) in group radiation_Wperm2 = solar_equations.cal_radiation_type(group, hourly_radiation, weather_data) # read panel properties of each group teta_z_deg = prop_observers.loc[group, 'surface_azimuth_deg'] tot_module_area_m2 = prop_observers.loc[group, 'area_installed_module_m2'] tilt_angle_deg = prop_observers.loc[group, 'B_deg'] # tilt angle of panels # degree to radians tilt_rad = radians(tilt_angle_deg) # tilt angle teta_z_deg = radians(teta_z_deg) # surface azimuth # calculate effective indicent angles necessary teta_rad = np.vectorize(solar_equations.calc_angle_of_incidence)(g_rad, lat, ha_rad, tilt_rad, teta_z_deg) teta_ed_rad, teta_eg_rad = calc_diffuseground_comp(tilt_rad) absorbed_radiation_Wperm2 = np.vectorize(calc_absorbed_radiation_PV)(radiation_Wperm2.I_sol, radiation_Wperm2.I_direct, radiation_Wperm2.I_diffuse, tilt_rad, Sz_rad, teta_rad, teta_ed_rad, teta_eg_rad, panel_properties_PV) T_cell_C = np.vectorize(calc_cell_temperature)(absorbed_radiation_Wperm2, weather_data.drybulb_C, panel_properties_PV) el_output_PV_kW = np.vectorize(calc_PV_power)(absorbed_radiation_Wperm2, T_cell_C, eff_nom, tot_module_area_m2, Bref, misc_losses) # write results from each group panel_orientation = prop_observers.loc[group, 'type_orientation'] potential['PV_' + panel_orientation + '_E_kWh'] = potential[ 'PV_' + panel_orientation + '_E_kWh'] + el_output_PV_kW potential['PV_' + panel_orientation + '_m2'] = potential['PV_' + panel_orientation + '_m2'] + tot_module_area_m2 # aggregate results from all modules list_groups_area[group] = tot_module_area_m2 total_el_output_PV_kWh[group] = el_output_PV_kW total_radiation_kWh[group] = (radiation_Wperm2['I_sol'] * tot_module_area_m2 / 1000) # kWh # check for missing groups and asign 0 as el_output_PV_kW # panel_orientations = ['walls_south', 'walls_north', 'roofs_top', 'walls_east', 'walls_west'] # for panel_orientation in panel_orientations: # if panel_orientation not in prop_observers['type_orientation'].values: # potential['PV_' + panel_orientation + '_E_kWh'] = 0 # potential['PV_' + panel_orientation + '_m2'] = 0 potential['E_PV_gen_kWh'] = sum(total_el_output_PV_kWh) potential['radiation_kWh'] = sum(total_radiation_kWh).values potential['Area_PV_m2'] = sum(list_groups_area) potential['Date'] = date_local potential = potential.set_index('Date') return potential def calc_cell_temperature(absorbed_radiation_Wperm2, T_external_C, panel_properties_PV): """ calculates cell temperatures based on the absorbed radiation :param absorbed_radiation_Wperm2: absorbed radiation on panel :type absorbed_radiation_Wperm2: np.array :param T_external_C: drybulb temperature from the weather file :type T_external_C: series :param panel_properties_PV: panel property from the supply system database :type panel_properties_PV: dataframe :return T_cell_C: cell temprature of PV panels :rtype T_cell_C: series """ NOCT = panel_properties_PV['PV_noct'] # temperature of cell T_cell_C = T_external_C + absorbed_radiation_Wperm2 * (NOCT - 20) / ( 800) # assuming linear temperature rise vs radiation according to NOCT condition return T_cell_C def calc_angle_of_incidence(g, lat, ha, tilt, teta_z): """ To calculate angle of incidence from solar vector and surface normal vector. (Validated with Sandia pvlib.irrandiance.aoi) :param lat: latitude of the loacation of case study [radians] :param g: declination of the solar position [radians] :param ha: hour angle [radians] :param tilt: panel surface tilt angle [radians] :param teta_z: panel surface azimuth angle [radians] :type lat: float :type g: float :type ha: float :type tilt: float :type teta_z: float :return teta_B: angle of incidence [radians] :rtype teta_B: float .. [Sproul, A. B., 2017] Sproul, A.B. (2007). Derivation of the solar geometric relationships using vector analysis. Renewable Energy, 32(7), 1187-1205. """ # surface normal vector n_E = sin(tilt) * sin(teta_z) n_N = sin(tilt) * cos(teta_z) n_Z = cos(tilt) # solar vector s_E = -cos(g) * sin(ha) s_N = sin(g) * cos(lat) - cos(g) * sin(lat) * cos(ha) s_Z = cos(g) * cos(lat) * cos(ha) + sin(g) * sin(lat) # angle of incidence teta_B = acos(n_E * s_E + n_N * s_N + n_Z * s_Z) return teta_B def calc_diffuseground_comp(tilt_radians): """ To calculate reflected radiation and diffuse radiation. :param tilt_radians: surface tilt angle [rad] :type tilt_radians: float :return teta_ed: effective incidence angle from diffuse radiation [rad] :return teta_eg: effective incidence angle from ground-reflected radiation [rad] :rtype teta_ed: float :rtype teta_eg: float :References: Duffie, J. A. and Beckman, W. A. (2013) Radiation Transmission through Glazing: Absorbed Radiation, in Solar Engineering of Thermal Processes, Fourth Edition, John Wiley & Sons, Inc., Hoboken, NJ, USA. doi: 10.1002/9781118671603.ch5 """ tilt = degrees(tilt_radians) teta_ed = 59.68 - 0.1388 * tilt + 0.001497 * tilt ** 2 # [degrees] (5.4.2) teta_eG = 90 - 0.5788 * tilt + 0.002693 * tilt ** 2 # [degrees] (5.4.1) return radians(teta_ed), radians(teta_eG) def calc_absorbed_radiation_PV(I_sol, I_direct, I_diffuse, tilt, Sz, teta, tetaed, tetaeg, panel_properties_PV): """ :param I_sol: total solar radiation [Wh/m2] :param I_direct: direct solar radiation [Wh/m2] :param I_diffuse: diffuse solar radiation [Wh/m2] :param tilt: solar panel tilt angle [rad] :param Sz: solar zenith angle [rad] :param teta: angle of incidence [rad] :param tetaed: effective incidence angle from diffuse radiation [rad] :param tetaeg: effective incidence angle from ground-reflected radiation [rad] :type I_sol: float :type I_direct: float :type I_diffuse: float :type tilt: float :type Sz: float :type teta: float :type tetaed: float :type tetaeg: float :param panel_properties_PV: properties of the PV panel :type panel_properties_PV: dataframe :return: :References: Duffie, J. A. and Beckman, W. A. (2013) Radiation Transmission through Glazing: Absorbed Radiation, in Solar Engineering of Thermal Processes, Fourth Edition, John Wiley & Sons, Inc., Hoboken, NJ, USA. doi: 10.1002/9781118671603.ch5 """ # read variables n = constants.n # refractive index of glass Pg = constants.Pg # ground reflectance K = constants.K # glazing extinction coefficient NOCT = panel_properties_PV['PV_noct'] a0 = panel_properties_PV['PV_a0'] a1 = panel_properties_PV['PV_a1'] a2 = panel_properties_PV['PV_a2'] a3 = panel_properties_PV['PV_a3'] a4 = panel_properties_PV['PV_a4'] L = panel_properties_PV['PV_th'] # calcualte ratio of beam radiation on a tilted plane # to avoid inconvergence when I_sol = 0 lim1 = radians(0) lim2 = radians(90) lim3 = radians(89.999) if teta < lim1: teta = min(lim3, abs(teta)) if teta >= lim2: teta = lim3 if Sz < lim1: Sz = min(lim3, abs(Sz)) if Sz >= lim2: Sz = lim3 # Rb: ratio of beam radiation of tilted surface to that on horizontal surface if Sz <= radians(85): # Sz is Zenith angle # TODO: FIND REFERENCE Rb = cos(teta) / cos(Sz) else: Rb = 0 # Assume there is no direct radiation when the sun is close to the horizon. # calculate air mass modifier m = 1 / cos(Sz) # air mass M = a0 + a1 * m + a2 * m ** 2 + a3 * m ** 3 + a4 * m ** 4 # air mass modifier # incidence angle modifier for direct (beam) radiation teta_r = asin(sin(teta) / n) # refraction angle in radians(aproximation accrding to Soteris A.) (5.1.4) Ta_n = exp(-K * L) * (1 - ((n - 1) / (n + 1)) ** 2) if teta < radians(90): # 90 degrees in radians part1 = teta_r + teta part2 = teta_r - teta Ta_B = exp((-K * L) / cos(teta_r)) * ( 1 - 0.5 * ((sin(part2) ** 2) / (sin(part1) ** 2) + (tan(part2) ** 2) / (tan(part1) ** 2))) kteta_B = Ta_B / Ta_n else: kteta_B = 0 # incidence angle modifier for diffuse radiation teta_r = asin(sin(tetaed) / n) # refraction angle for diffuse radiation [rad] part1 = teta_r + tetaed part2 = teta_r - tetaed Ta_D = exp((-K * L) / cos(teta_r)) * ( 1 - 0.5 * ((sin(part2) ** 2) / (sin(part1) ** 2) + (tan(part2) ** 2) / (tan(part1) ** 2))) kteta_D = Ta_D / Ta_n # incidence angle modifier for ground-reflected radiation teta_r = asin(sin(tetaeg) / n) # refraction angle for ground-reflected radiation [rad] part1 = teta_r + tetaeg part2 = teta_r - tetaeg Ta_eG = exp((-K * L) / cos(teta_r)) * ( 1 - 0.5 * ((sin(part2) ** 2) / (sin(part1) ** 2) + (tan(part2) ** 2) / (tan(part1) ** 2))) kteta_eG = Ta_eG / Ta_n # absorbed solar radiation absorbed_radiation_Wperm2 = M * Ta_n * ( kteta_B * I_direct * Rb + kteta_D * I_diffuse * (1 + cos(tilt)) / 2 + kteta_eG * I_sol * Pg * ( 1 - cos(tilt)) / 2) # [W/m2] (5.12.1) if absorbed_radiation_Wperm2 < 0.0: # when points are 0 and too much losses # print ('the absorbed radiation', absorbed_radiation_Wperm2 ,'is negative, please check calc_absorbed_radiation_PVT') absorbed_radiation_Wperm2 = 0.0 return absorbed_radiation_Wperm2 def calc_PV_power(absorbed_radiation_Wperm2, T_cell_C, eff_nom, tot_module_area_m2, Bref_perC, misc_losses): """ To calculate the power production of PV panels. :param absorbed_radiation_Wperm2: absorbed radiation [W/m2] :type absorbed_radiation_Wperm2: float :param T_cell_C: cell temperature [degree] :param eff_nom: nominal efficiency of PV module [-] :type eff_nom: float :param tot_module_area_m2: total PV module area [m2] :type tot_module_area_m2: float :param Bref_perC: cell maximum power temperature coefficient [degree C^(-1)] :type Bref_perC: float :param misc_losses: expected system loss [-] :type misc_losses: float :return el_output_PV_kW: Power production [kW] :rtype el_output_PV_kW: float ..[Osterwald, C. R., 1986] Osterwald, C. R. (1986). Translation of device performance measurements to reference conditions. Solar Cells, 18, 269-279. """ T_standard_C = 25.0 # temperature at the standard testing condition el_output_PV_kW = eff_nom * tot_module_area_m2 * absorbed_radiation_Wperm2 * \ (1 - Bref_perC * (T_cell_C - T_standard_C)) * (1 - misc_losses) / 1000 return el_output_PV_kW # ============================ # Optimal angle and tilt # ============================ def optimal_angle_and_tilt(sensors_metadata_clean, latitude, worst_sh, worst_Az, transmissivity, Max_Isol, module_length): """ This function first determines the optimal tilt angle, row spacing and surface azimuth of panels installed at each sensor point. Secondly, the installed PV module areas at each sensor point are calculated. Lastly, all the modules are categorized with its surface azimuth, tilt angle, and yearly radiation. The output will then be used to calculate the absorbed radiation. :param sensors_metadata_clean: data of filtered sensor points measuring solar insulation of each building :type sensors_metadata_clean: dataframe :param latitude: latitude of the case study location :type latitude: float :param worst_sh: solar elevation at the worst hour [degree] :type worst_sh: float :param worst_Az: solar azimuth at the worst hour [degree] :type worst_Az: float :param transmissivity: transmissivity: clearness index [-] :type transmissivity: float :param module_length: length of the PV module [m] :type module_length: float :param Max_Isol: max radiation potential (equals to global horizontal radiation) [Wh/m2/year] :type Max_Isol: float :returns sensors_metadata_clean: data of filtered sensor points categorized with module tilt angle, array spacing, surface azimuth, installed PV module area of each sensor point and the categories :rtype sensors_metadata_clean: dataframe :Assumptions: 1) Tilt angle: If the sensor is on tilted roof, the panel will have the same tilt as the roof. If the sensor is on a wall, the tilt angle is 90 degree. Tilt angles for flat roof is determined using the method from Quinn et al. 2) Row spacing: Determine the row spacing by minimizing the shadow according to the solar elevation and azimuth at the worst hour of the year. The worst hour is a global variable defined by users. 3) Surface azimuth (orientation) of panels: If the sensor is on a tilted roof, the orientation of the panel is the same as the roof. Sensors on flat roofs are all south facing. """ # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls. optimal_angle_flat = calc_optimal_angle(180, latitude, transmissivity) # assume surface azimuth = 180 (N,E), south facing sensors_metadata_clean['tilt'] = np.vectorize(acos)(sensors_metadata_clean['Zdir']) # surface tilt angle in rad sensors_metadata_clean['tilt'] = np.vectorize(degrees)( sensors_metadata_clean['tilt']) # surface tilt angle in degrees sensors_metadata_clean['B'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean['tilt'], degrees(optimal_angle_flat)) # panel tilt angle in degrees # calculate spacing and surface azimuth of the panels for flat roofs optimal_spacing_flat = calc_optimal_spacing(worst_sh, worst_Az, optimal_angle_flat, module_length) sensors_metadata_clean['array_s'] = np.where(sensors_metadata_clean['tilt'] >= 5, 0, optimal_spacing_flat) sensors_metadata_clean['surface_azimuth'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'], sensors_metadata_clean['Ydir'], sensors_metadata_clean[ 'B']) # degrees # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing surface_area_flat = module_length * ( sensors_metadata_clean.array_s / 2 + module_length * [cos(optimal_angle_flat)]) # calculate the pv module area within the area of each sensor point sensors_metadata_clean['area_module'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean.AREA_m2, module_length ** 2 * ( sensors_metadata_clean.AREA_m2 / surface_area_flat)) # categorize the sensors by surface_azimuth, B, GB result = np.vectorize(solar_equations.calc_categoriesroof)(sensors_metadata_clean.surface_azimuth, sensors_metadata_clean.B, sensors_metadata_clean.total_rad_Whm2, Max_Isol) sensors_metadata_clean['CATteta_z'] = result[0] sensors_metadata_clean['CATB'] = result[1] sensors_metadata_clean['CATGB'] = result[2] return sensors_metadata_clean def calc_optimal_angle(teta_z, latitude, transmissivity): """ To calculate the optimal tilt angle of the solar panels. :param teta_z: surface azimuth, 0 degree south (east negative) or 0 degree north (east positive) :type teta_z: float :param latitude: latitude of the case study site :type latitude: float :param transmissivity: clearness index [-] :type transmissivity: float :return abs(b): optimal tilt angle [radians] :rtype abs(b): float ..[Quinn et al., 2013] S.W.Quinn, B.Lehman.A simple formula for estimating the optimum tilt angles of photovoltaic panels. 2013 IEEE 14th Work Control Model Electron, Jun, 2013, pp.1-8 """ if transmissivity <= 0.15: gKt = 0.977 elif 0.15 < transmissivity <= 0.7: gKt = 1.237 - 1.361 * transmissivity else: gKt = 0.273 Tad = 0.98 # transmittance-absorptance product of the diffuse radiation Tar = 0.97 # transmittance-absorptance product of the reflected radiation Pg = 0.2 # ground reflectance of 0.2 l = radians(latitude) a = radians(teta_z) b = atan((cos(a) * tan(l)) * (1 / (1 + ((Tad * gKt - Tar * Pg) / (2 * (1 - gKt)))))) # eq.(11) return abs(b) def calc_optimal_spacing(Sh, Az, tilt_angle, module_length): """ To calculate the optimal spacing between each panel to avoid shading. :param Sh: Solar elevation at the worst hour [degree] :type Sh: float :param Az: Solar Azimuth [degree] :type Az: float :param tilt_angle: optimal tilt angle for panels on flat surfaces [degree] :type tilt_angle: float :param module_length: [m] :type module_length: float :return D: optimal distance in [m] :rtype D: float """ h = module_length * sin(tilt_angle) D1 = h / tan(radians(Sh)) D = max(D1 * cos(radians(180 - Az)), D1 * cos(radians(Az - 180))) return D # def calc_categoriesroof(teta_z, B, GB, Max_Isol): # """ # To categorize solar panels by the surface azimuth, tilt angle and yearly radiation. # :param teta_z: surface azimuth [degree], 0 degree north (east positive, west negative) # :type teta_z: float # :param B: solar panel tile angle [degree] # :type B: float # :param GB: yearly radiation of sensors [Wh/m2/year] # :type GB: float # :param Max_Isol: yearly global horizontal radiation [Wh/m2/year] # :type Max_Isol: float # :return CATteta_z: category of surface azimuth # :rtype CATteta_z: float # :return CATB: category of tilt angle # :rtype CATB: float # :return CATBG: category of yearly radiation # :rtype CATBG: float # """ # if -122.5 < teta_z <= -67: # CATteta_z = 1 # elif -67.0 < teta_z <= -22.5: # CATteta_z = 3 # elif -22.5 < teta_z <= 22.5: # CATteta_z = 5 # elif 22.5 < teta_z <= 67: # CATteta_z = 4 # elif 67.0 <= teta_z <= 122.5: # CATteta_z = 2 # else: # CATteta_z = 6 # B = degrees(B) # if 0 < B <= 5: # CATB = 1 # flat roof # elif 5 < B <= 15: # CATB = 2 # tilted 5-15 degrees # elif 15 < B <= 25: # CATB = 3 # tilted 15-25 degrees # elif 25 < B <= 40: # CATB = 4 # tilted 25-40 degrees # elif 40 < B <= 60: # CATB = 5 # tilted 40-60 degrees # elif B > 60: # CATB = 6 # tilted >60 degrees # else: # CATB = None # print('B not in expected range') # # GB_percent = GB / Max_Isol # if 0 < GB_percent <= 0.25: # CATGB = 1 # elif 0.25 < GB_percent <= 0.50: # CATGB = 2 # elif 0.50 < GB_percent <= 0.75: # CATGB = 3 # elif 0.75 < GB_percent <= 0.90: # CATGB = 4 # elif 0.90 < GB_percent: # CATGB = 5 # else: # CATGB = None # print('GB not in expected range') # # return CATteta_z, CATB, CATGB def calc_surface_azimuth(xdir, ydir, B): """ Calculate surface azimuth from the surface normal vector (x,y,z) and tilt angle (B). Following the geological sign convention, an azimuth of 0 and 360 degree represents north, 90 degree is east. :param xdir: surface normal vector x in (x,y,z) representing east-west direction :param ydir: surface normal vector y in (x,y,z) representing north-south direction :param B: surface tilt angle in degree :type xdir: float :type ydir: float :type B: float :returns surface azimuth: the azimuth of the surface of a solar panel in degree :rtype surface_azimuth: float """ B = radians(B) teta_z = degrees(asin(xdir / sin(B))) # set the surface azimuth with on the sing convention (E,N)=(+,+) if xdir < 0: if ydir < 0: surface_azimuth = 180 + teta_z # (xdir,ydir) = (-,-) else: surface_azimuth = 360 + teta_z # (xdir,ydir) = (-,+) elif ydir < 0: surface_azimuth = 180 + teta_z # (xdir,ydir) = (+,-) else: surface_azimuth = teta_z # (xdir,ydir) = (+,+) return surface_azimuth # degree # ============================ # properties of module # ============================ # TODO: Delete when done def calc_properties_PV_db(database_path, config): """ To assign PV module properties according to panel types. :param type_PVpanel: type of PV panel used :type type_PVpanel: string :return: dict with Properties of the panel taken form the database """ type_PVpanel = config.solar.type_PVpanel data = pd.read_excel(database_path, sheet_name="PV") panel_properties = data[data['code'] == type_PVpanel].reset_index().T.to_dict()[0] return panel_properties # investment and maintenance costs # FIXME: it looks like this function is never used!!! (REMOVE) def calc_Cinv_pv(total_module_area_m2, locator, technology=0): """ To calculate capital cost of PV modules, assuming 20 year system lifetime. :param P_peak: installed capacity of PV module [kW] :return InvCa: capital cost of the installed PV module [CHF/Y] """ PV_cost_data = pd.read_excel(locator.get_database_conversion_systems(), sheet_name="PV") technology_code = list(set(PV_cost_data['code'])) PV_cost_data = PV_cost_data[PV_cost_data['code'] == technology_code[technology]] nominal_efficiency = PV_cost_data[PV_cost_data['code'] == technology_code[technology]]['PV_n'].max() P_nominal_W = total_module_area_m2 * (constants.STC_RADIATION_Wperm2 * nominal_efficiency) # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least # capacity for the corresponding technology from the database if P_nominal_W < PV_cost_data['cap_min'].values[0]: P_nominal_W = PV_cost_data['cap_min'].values[0] PV_cost_data = PV_cost_data[ (PV_cost_data['cap_min'] <= P_nominal_W) & (PV_cost_data['cap_max'] > P_nominal_W)] Inv_a = PV_cost_data.iloc[0]['a'] Inv_b = PV_cost_data.iloc[0]['b'] Inv_c = PV_cost_data.iloc[0]['c'] Inv_d = PV_cost_data.iloc[0]['d'] Inv_e = PV_cost_data.iloc[0]['e'] Inv_IR = PV_cost_data.iloc[0]['IR_%'] Inv_LT = PV_cost_data.iloc[0]['LT_yr'] Inv_OM = PV_cost_data.iloc[0]['O&M_%'] / 100 InvC = Inv_a + Inv_b * (P_nominal_W) ** Inv_c + (Inv_d + Inv_e * P_nominal_W) * log(P_nominal_W) Capex_a_PV_USD = calc_capex_annualized(InvC, Inv_IR, Inv_LT) Opex_fixed_PV_USD = InvC * Inv_OM Capex_PV_USD = InvC return Capex_a_PV_USD, Opex_fixed_PV_USD, Capex_PV_USD, P_nominal_W # remuneration scheme def calc_Crem_pv(E_nom): """ Calculates KEV (Kostendeckende Einspeise - Verguetung) for solar PV and PVT. Therefore, input the nominal capacity of EACH installation and get the according KEV as return in Rp/kWh :param E_nom: Nominal Capacity of solar panels (PV or PVT) [Wh] :type E_nom: float :return KEV_obtained_in_RpPerkWh: KEV remuneration [Rp/kWh] :rtype KEV_obtained_in_RpPerkWh: float """ # TODO: change input argument to area_installed and then calculate the nominal capacity within this function, see calc_Cinv_pv KEV_regime = [0, 0, 20.4, 20.4, 20.4, 20.4, 20.4, 20.4, 19.7, 19.3, 19, 18.9, 18.7, 18.6, 18.5, 18.1, 17.9, 17.8, 17.8, 17.7, 17.7, 17.7, 17.6, 17.6] P_installed_in_kW = [0, 9.99, 10, 12, 15, 20, 29, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500, 750, 1000, 1500, 2000, 1000000] KEV_interpolated_kW = interpolate.interp1d(P_installed_in_kW, KEV_regime, kind="linear") KEV_obtained_in_RpPerkWh = 0 if (E_nom / 1000) > P_installed_in_kW[-1]: number_of_installations = int(ceil(E_nom / P_installed_in_kW[-1])) E_nom_per_chiller = E_nom / number_of_installations for i in range(number_of_installations): KEV_obtained_in_RpPerkWh = KEV_obtained_in_RpPerkWh + KEV_interpolated_kW(E_nom_per_chiller / 1000.0) else: KEV_obtained_in_RpPerkWh = KEV_obtained_in_RpPerkWh + KEV_interpolated_kW(E_nom / 1000.0) return KEV_obtained_in_RpPerkWh def aggregate_results(locator, building_names): aggregated_hourly_results_df = pd.DataFrame() aggregated_annual_results = pd.DataFrame() for i, building in enumerate(building_names): hourly_results_per_building = pd.read_csv(locator.PV_results(building)).set_index('Date') if i == 0: aggregated_hourly_results_df = hourly_results_per_building else: aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_per_building annual_energy_production = hourly_results_per_building.filter(like='_kWh').sum() panel_area_per_building = hourly_results_per_building.filter(like='_m2').iloc[0] building_annual_results = annual_energy_production.append(panel_area_per_building) aggregated_annual_results[building] = building_annual_results return aggregated_hourly_results_df, aggregated_annual_results def aggregate_results_func(args): return aggregate_results(args[0], args[1]) def write_aggregate_results(locator, building_names, num_process=1): aggregated_hourly_results_df = pd.DataFrame() aggregated_annual_results = pd.DataFrame() pool = Pool(processes=num_process) args = [(locator, x) for x in np.array_split(building_names, num_process) if x.size != 0] for i, x in enumerate(pool.map(aggregate_results_func, args)): hourly_results_df, annual_results = x if i == 0: aggregated_hourly_results_df = hourly_results_df aggregated_annual_results = annual_results else: aggregated_hourly_results_df = aggregated_hourly_results_df + hourly_results_df aggregated_annual_results = pd.concat([aggregated_annual_results, annual_results], axis=1, sort=False) # save hourly results aggregated_hourly_results_df.to_csv(locator.PV_totals(), index=True, float_format='%.2f', na_rep='nan') # save annual results aggregated_annual_results_df = pd.DataFrame(aggregated_annual_results).T aggregated_annual_results_df.to_csv(locator.PV_total_buildings(), index=True, index_label="Name", float_format='%.2f', na_rep='nan') def main(config): assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario locator = cea.inputlocator.InputLocator(scenario=config.scenario) print('Running photovoltaic with scenario = %s' % config.scenario) print('Running photovoltaic with annual-radiation-threshold-kWh/m2 = %s' % config.solar.annual_radiation_threshold) print('Running photovoltaic with panel-on-roof = %s' % config.solar.panel_on_roof) print('Running photovoltaic with panel-on-wall = %s' % config.solar.panel_on_wall) print('Running photovoltaic with solar-window-solstice = %s' % config.solar.solar_window_solstice) print('Running photovoltaic with type-pvpanel = %s' % config.solar.type_pvpanel) if config.solar.custom_tilt_angle: print('Running photovoltaic with custom-tilt-angle = %s and panel-tilt-angle = %s' % (config.solar.custom_tilt_angle, config.solar.panel_tilt_angle)) else: print('Running photovoltaic with custom-tilt-angle = %s' % config.solar.custom_tilt_angle) if config.solar.custom_roof_coverage: print('Running photovoltaic with custom-roof-coverage = %s and max-roof-coverage = %s' % (config.solar.custom_roof_coverage, config.solar.max_roof_coverage)) else: print('Running photovoltaic with custom-roof-coverage = %s' % config.solar.custom_roof_coverage) building_names = locator.get_zone_building_names() zone_geometry_df = gdf.from_file(locator.get_zone_geometry()) latitude, longitude = get_lat_lon_projected_shapefile(zone_geometry_df) # list_buildings_names =['B026', 'B036', 'B039', 'B043', 'B050'] for missing buildings weather_data = epwreader.epw_reader(locator.get_weather_file()) date_local = solar_equations.calc_datetime_local_from_weather_file(weather_data, latitude, longitude) num_process = config.get_number_of_processes() n = len(building_names) cea.utilities.parallel.vectorize(calc_PV, num_process)(repeat(locator, n), repeat(config, n), repeat(latitude, n), repeat(longitude, n), repeat(weather_data, n), repeat(date_local, n), building_names) # aggregate results from all buildings write_aggregate_results(locator, building_names, num_process) if __name__ == '__main__': main(cea.config.Configuration())
mit
mkukielka/oddt
oddt/scoring/functions/NNScore.py
1
5160
from __future__ import print_function import sys from os.path import dirname, isfile, join as path_join import numpy as np import warnings from joblib import Parallel, delayed from scipy.stats import pearsonr from sklearn.metrics import r2_score from oddt import random_seed from oddt.utils import method_caller from oddt.metrics import rmse, standard_deviation_error from oddt.scoring import scorer, ensemble_model from oddt.scoring.descriptors.binana import binana_descriptor from oddt.scoring.models.regressors import neuralnetwork # numpy after pickling gives Runtime Warnings warnings.simplefilter("ignore", RuntimeWarning) class nnscore(scorer): def __init__(self, protein=None, n_jobs=-1): """NNScore implementation [1]_. Based on Binana descriptors [2]_ and an ensemble of 20 best scored nerual networks with a hidden layer of 5 nodes. The NNScore predicts binding affinity (pKi/d). Parameters ---------- protein : oddt.toolkit.Molecule object Receptor for the scored ligands n_jobs: int (default=-1) Number of cores to use for scoring and training. By default (-1) all cores are allocated. References ---------- .. [1] Durrant JD, McCammon JA. NNScore 2.0: a neural-network receptor-ligand scoring function. J Chem Inf Model. 2011;51: 2897-2903. doi:10.1021/ci2003889 .. [2] Durrant JD, McCammon JA. BINANA: a novel algorithm for ligand-binding characterization. J Mol Graph Model. 2011;29: 888-893. doi:10.1016/j.jmgm.2011.01.004 """ self.protein = protein self.n_jobs = n_jobs model = None decsriptors = binana_descriptor(protein) super(nnscore, self).__init__(model, decsriptors, score_title='nnscore') def gen_training_data(self, pdbbind_dir, pdbbind_versions=(2007, 2012, 2013, 2014, 2015, 2016), home_dir=None, use_proteins=False): if home_dir is None: home_dir = dirname(__file__) + '/NNScore' filename = path_join(home_dir, 'nnscore_descs.csv') super(nnscore, self)._gen_pdbbind_desc( pdbbind_dir=pdbbind_dir, pdbbind_versions=pdbbind_versions, desc_path=filename, use_proteins=use_proteins ) def train(self, home_dir=None, sf_pickle=None, pdbbind_version=2016): if not home_dir: home_dir = dirname(__file__) + '/NNScore' desc_path = path_join(home_dir, 'nnscore_descs.csv') super(nnscore, self)._load_pdbbind_desc(desc_path, pdbbind_version=pdbbind_version) # number of network to sample; original implementation did 1000, but # 100 give results good enough. # TODO: allow user to specify number of nets? n = 1000 # make nets reproducible random_seed(1) seeds = np.random.randint(123456789, size=n) trained_nets = ( Parallel(n_jobs=self.n_jobs, verbose=10, pre_dispatch='all')( delayed(method_caller)( neuralnetwork((5,), random_state=seeds[i], activation='logistic', solver='lbfgs', max_iter=10000), 'fit', self.train_descs, self.train_target) for i in range(n))) # get 20 best trained_nets.sort(key=lambda n: n.score(self.test_descs, self.test_target.flatten())) self.model = ensemble_model(trained_nets[-20:]) sets = [ ('Test', self.model.predict(self.test_descs), self.test_target), ('Train', self.model.predict(self.train_descs), self.train_target)] for name, pred, target in sets: print('%s set:' % name, 'R2_score: %.4f' % r2_score(target, pred), 'Rp: %.4f' % pearsonr(target, pred)[0], 'RMSE: %.4f' % rmse(target, pred), 'SD: %.4f' % standard_deviation_error(target, pred), sep='\t', file=sys.stderr) if sf_pickle is None: return self.save('NNScore_pdbbind%i.pickle' % (pdbbind_version)) else: return self.save(sf_pickle) @classmethod def load(self, filename=None, pdbbind_version=2016): if filename is None: fname = 'NNScore_pdbbind%i.pickle' % (pdbbind_version) for f in [fname, path_join(dirname(__file__), fname)]: if isfile(f): filename = f break else: print('No pickle, training new scoring function.', file=sys.stderr) nn = nnscore() filename = nn.train(pdbbind_version=pdbbind_version) return scorer.load(filename)
bsd-3-clause
hlin117/statsmodels
statsmodels/stats/tests/test_weightstats.py
30
21864
'''tests for weightstats, compares with replication no failures but needs cleanup update 2012-09-09: added test after fixing bug in covariance TODOs: - I don't remember what all the commented out code is doing - should be refactored to use generator or inherited tests - still gaps in test coverage - value/diff in ttest_ind is tested in test_tost.py - what about pandas data structures? Author: Josef Perktold License: BSD (3-clause) ''' import numpy as np from scipy import stats from numpy.testing import assert_almost_equal, assert_equal, assert_allclose from statsmodels.stats.weightstats import \ DescrStatsW, CompareMeans, ttest_ind, ztest, zconfint #import statsmodels.stats.weightstats as smws class Holder(object): pass class TestWeightstats(object): def __init__(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1) x2 = m2 + np.random.randn(n2) x1_2d = m1 + np.random.randn(n1, 3) x2_2d = m2 + np.random.randn(n2, 3) w1_ = 2. * np.ones(n1) w2_ = 2. * np.ones(n2) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.x1_2d, self.x2_2d = x1_2d, x2_2d def test_weightstats_1(self): x1, x2 = self.x1, self.x2 w1, w2 = self.w1, self.w2 w1_ = 2. * np.ones(len(x1)) w2_ = 2. * np.ones(len(x2)) d1 = DescrStatsW(x1) # print ttest_ind(x1, x2) # print ttest_ind(x1, x2, usevar='unequal') # #print ttest_ind(x1, x2, usevar='unequal') # print stats.ttest_ind(x1, x2) # print ttest_ind(x1, x2, usevar='unequal', alternative='larger') # print ttest_ind(x1, x2, usevar='unequal', alternative='smaller') # print ttest_ind(x1, x2, usevar='unequal', weights=(w1_, w2_)) # print stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2]) assert_almost_equal(ttest_ind(x1, x2, weights=(w1_, w2_))[:2], stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2])) def test_weightstats_2(self): x1, x2 = self.x1, self.x2 w1, w2 = self.w1, self.w2 d1 = DescrStatsW(x1) d1w = DescrStatsW(x1, weights=w1) d2w = DescrStatsW(x2, weights=w2) x1r = d1w.asrepeats() x2r = d2w.asrepeats() # print 'random weights' # print ttest_ind(x1, x2, weights=(w1, w2)) # print stats.ttest_ind(x1r, x2r) assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], stats.ttest_ind(x1r, x2r), 14) #not the same as new version with random weights/replication # assert x1r.shape[0] == d1w.sum_weights # assert x2r.shape[0] == d2w.sum_weights assert_almost_equal(x2r.mean(0), d2w.mean, 14) assert_almost_equal(x2r.var(), d2w.var, 14) assert_almost_equal(x2r.std(), d2w.std, 14) #note: the following is for 1d assert_almost_equal(np.cov(x2r, bias=1), d2w.cov, 14) #assert_almost_equal(np.corrcoef(np.x2r), d2w.corrcoef, 19) #TODO: exception in corrcoef (scalar case) #one-sample tests # print d1.ttest_mean(3) # print stats.ttest_1samp(x1, 3) # print d1w.ttest_mean(3) # print stats.ttest_1samp(x1r, 3) assert_almost_equal(d1.ttest_mean(3)[:2], stats.ttest_1samp(x1, 3), 11) assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11) def test_weightstats_3(self): x1_2d, x2_2d = self.x1_2d, self.x2_2d w1, w2 = self.w1, self.w2 d1w_2d = DescrStatsW(x1_2d, weights=w1) d2w_2d = DescrStatsW(x2_2d, weights=w2) x1r_2d = d1w_2d.asrepeats() x2r_2d = d2w_2d.asrepeats() assert_almost_equal(x2r_2d.mean(0), d2w_2d.mean, 14) assert_almost_equal(x2r_2d.var(0), d2w_2d.var, 14) assert_almost_equal(x2r_2d.std(0), d2w_2d.std, 14) assert_almost_equal(np.cov(x2r_2d.T, bias=1), d2w_2d.cov, 14) assert_almost_equal(np.corrcoef(x2r_2d.T), d2w_2d.corrcoef, 14) # print d1w_2d.ttest_mean(3) # #scipy.stats.ttest is also vectorized # print stats.ttest_1samp(x1r_2d, 3) t,p,d = d1w_2d.ttest_mean(3) assert_almost_equal([t, p], stats.ttest_1samp(x1r_2d, 3), 11) #print [stats.ttest_1samp(xi, 3) for xi in x1r_2d.T] cm = CompareMeans(d1w_2d, d2w_2d) ressm = cm.ttest_ind() resss = stats.ttest_ind(x1r_2d, x2r_2d) assert_almost_equal(ressm[:2], resss, 14) ## #doesn't work for 2d, levene doesn't use weights ## cm = CompareMeans(d1w_2d, d2w_2d) ## ressm = cm.test_equal_var() ## resss = stats.levene(x1r_2d, x2r_2d) ## assert_almost_equal(ressm[:2], resss, 14) def test_weightstats_ddof_tests(self): # explicit test that ttest and confint are independent of ddof # one sample case x1_2d = self.x1_2d w1 = self.w1 d1w_d0 = DescrStatsW(x1_2d, weights=w1, ddof=0) d1w_d1 = DescrStatsW(x1_2d, weights=w1, ddof=1) d1w_d2 = DescrStatsW(x1_2d, weights=w1, ddof=2) #check confint independent of user ddof res0 = d1w_d0.ttest_mean() res1 = d1w_d1.ttest_mean() res2 = d1w_d2.ttest_mean() # concatenate into one array with np.r_ assert_almost_equal(np.r_[res1], np.r_[res0], 14) assert_almost_equal(np.r_[res2], np.r_[res0], 14) res0 = d1w_d0.ttest_mean(0.5) res1 = d1w_d1.ttest_mean(0.5) res2 = d1w_d2.ttest_mean(0.5) assert_almost_equal(np.r_[res1], np.r_[res0], 14) assert_almost_equal(np.r_[res2], np.r_[res0], 14) #check confint independent of user ddof res0 = d1w_d0.tconfint_mean() res1 = d1w_d1.tconfint_mean() res2 = d1w_d2.tconfint_mean() assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) class CheckWeightstats1dMixin(object): def test_basic(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(x1r.mean(0), d1w.mean, 14) assert_almost_equal(x1r.var(0, ddof=d1w.ddof), d1w.var, 14) assert_almost_equal(x1r.std(0, ddof=d1w.ddof), d1w.std, 14) var1 = d1w.var_ddof(ddof=1) assert_almost_equal(x1r.var(0, ddof=1), var1, 14) std1 = d1w.std_ddof(ddof=1) assert_almost_equal(x1r.std(0, ddof=1), std1, 14) assert_almost_equal(np.cov(x1r.T, bias=1-d1w.ddof), d1w.cov, 14) # #assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14) def test_ttest(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11) # def # assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], # stats.ttest_ind(x1r, x2r), 14) def test_ttest_2sample(self): x1, x2 = self.x1, self.x2 x1r, x2r = self.x1r, self.x2r w1, w2 = self.w1, self.w2 #Note: stats.ttest_ind handles 2d/nd arguments res_sp = stats.ttest_ind(x1r, x2r) assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], res_sp, 14) #check correct ttest independent of user ddof cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=1)) assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14) cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1), DescrStatsW(x2, weights=w2, ddof=2)) assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14) cm0 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=0)) cm1 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=1)) cm2 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1), DescrStatsW(x2, weights=w2, ddof=2)) res0 = cm0.ttest_ind(usevar='unequal') res1 = cm1.ttest_ind(usevar='unequal') res2 = cm2.ttest_ind(usevar='unequal') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) #check confint independent of user ddof res0 = cm0.tconfint_diff(usevar='pooled') res1 = cm1.tconfint_diff(usevar='pooled') res2 = cm2.tconfint_diff(usevar='pooled') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) res0 = cm0.tconfint_diff(usevar='unequal') res1 = cm1.tconfint_diff(usevar='unequal') res2 = cm2.tconfint_diff(usevar='unequal') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) def test_confint_mean(self): #compare confint_mean with ttest d1w = self.d1w alpha = 0.05 low, upp = d1w.tconfint_mean() t, p, d = d1w.ttest_mean(low) assert_almost_equal(p, alpha * np.ones(p.shape), 8) t, p, d = d1w.ttest_mean(upp) assert_almost_equal(p, alpha * np.ones(p.shape), 8) t, p, d = d1w.ttest_mean(np.vstack((low, upp))) assert_almost_equal(p, alpha * np.ones(p.shape), 8) class CheckWeightstats2dMixin(CheckWeightstats1dMixin): def test_corr(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14) class TestWeightstats1d_ddof(CheckWeightstats1dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 1) x2 = m2 + np.random.randn(n2, 1) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1, ddof=1) self.d2w = DescrStatsW(x2, weights=w2, ddof=1) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() class TestWeightstats2d(CheckWeightstats2dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1_ = 2. * np.ones(n1) w2_ = 2. * np.ones(n2) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1) self.d2w = DescrStatsW(x2, weights=w2) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() class TestWeightstats2d_ddof(CheckWeightstats2dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1, ddof=1) self.d2w = DescrStatsW(x2, weights=w2, ddof=1) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() class TestWeightstats2d_nobs(CheckWeightstats2dMixin): @classmethod def setup_class(self): np.random.seed(9876789) n1, n2 = 20,30 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) self.x1, self.x2 = x1, x2 self.w1, self.w2 = w1, w2 self.d1w = DescrStatsW(x1, weights=w1, ddof=0) self.d2w = DescrStatsW(x2, weights=w2, ddof=1) self.x1r = self.d1w.asrepeats() self.x2r = self.d2w.asrepeats() def test_ttest_ind_with_uneq_var(): #from scipy # check vs. R a = (1, 2, 3) b = (1.1, 2.9, 4.2) pr = 0.53619490753126731 tr = -0.68649512735572582 t, p, df = ttest_ind(a, b, usevar='unequal') assert_almost_equal([t,p], [tr, pr], 13) a = (1, 2, 3, 4) pr = 0.84354139131608286 tr = -0.2108663315950719 t, p, df = ttest_ind(a, b, usevar='unequal') assert_almost_equal([t,p], [tr, pr], 13) def test_ztest_ztost(): # compare weightstats with separately tested proportion ztest ztost import statsmodels.stats.proportion as smprop x1 = [0, 1] w1 = [5, 15] res2 = smprop.proportions_ztest(15, 20., value=0.5) d1 = DescrStatsW(x1, w1) res1 = d1.ztest_mean(0.5) assert_allclose(res1, res2, rtol=0.03, atol=0.003) d2 = DescrStatsW(x1, np.array(w1)*21./20) res1 = d2.ztest_mean(0.5) assert_almost_equal(res1, res2, decimal=12) res1 = d2.ztost_mean(0.4, 0.6) res2 = smprop.proportions_ztost(15, 20., 0.4, 0.6) assert_almost_equal(res1[0], res2[0], decimal=12) x2 = [0, 1] w2 = [10, 10] #d2 = DescrStatsW(x1, np.array(w1)*21./20) d2 = DescrStatsW(x2, w2) res1 = ztest(d1.asrepeats(), d2.asrepeats()) res2 = smprop.proportions_chisquare(np.asarray([15, 10]), np.asarray([20., 20])) #TODO: check this is this difference expected?, see test_proportion assert_allclose(res1[1], res2[1], rtol=0.03) res1a = CompareMeans(d1, d2).ztest_ind() assert_allclose(res1a[1], res2[1], rtol=0.03) assert_almost_equal(res1a, res1, decimal=12) ###### test for ztest and z confidence interval against R BSDA z.test # Note: I needed to calculate the pooled standard deviation for R # std = np.std(np.concatenate((x-x.mean(),y-y.mean())), ddof=2) #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667) #> cat_items(zt, "ztest.") ztest_ = Holder() ztest_.statistic = 6.55109865675183 ztest_.p_value = 5.711530850508982e-11 ztest_.conf_int = np.array([1.230415246535603, 2.280948389828034]) ztest_.estimate = np.array([7.01818181818182, 5.2625]) ztest_.null_value = 0 ztest_.alternative = 'two.sided' ztest_.method = 'Two-sample z-Test' ztest_.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="less") #> cat_items(zt, "ztest_smaller.") ztest_smaller = Holder() ztest_smaller.statistic = 6.55109865675183 ztest_smaller.p_value = 0.999999999971442 ztest_smaller.conf_int = np.array([np.nan, 2.196499421109045]) ztest_smaller.estimate = np.array([7.01818181818182, 5.2625]) ztest_smaller.null_value = 0 ztest_smaller.alternative = 'less' ztest_smaller.method = 'Two-sample z-Test' ztest_smaller.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, alternative="greater") #> cat_items(zt, "ztest_larger.") ztest_larger = Holder() ztest_larger.statistic = 6.55109865675183 ztest_larger.p_value = 2.855760072861813e-11 ztest_larger.conf_int = np.array([1.314864215254592, np.nan]) ztest_larger.estimate = np.array([7.01818181818182, 5.2625 ]) ztest_larger.null_value = 0 ztest_larger.alternative = 'greater' ztest_larger.method = 'Two-sample z-Test' ztest_larger.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="two.sided") #> cat_items(zt, "ztest_mu.") ztest_mu = Holder() ztest_mu.statistic = 2.81972854805176 ztest_mu.p_value = 0.00480642898427981 ztest_mu.conf_int = np.array([1.230415246535603, 2.280948389828034]) ztest_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_mu.null_value = 1 ztest_mu.alternative = 'two.sided' ztest_mu.method = 'Two-sample z-Test' ztest_mu.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=1, alternative="greater") #> cat_items(zt, "ztest_larger_mu.") ztest_larger_mu = Holder() ztest_larger_mu.statistic = 2.81972854805176 ztest_larger_mu.p_value = 0.002403214492139871 ztest_larger_mu.conf_int = np.array([1.314864215254592, np.nan]) ztest_larger_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_larger_mu.null_value = 1 ztest_larger_mu.alternative = 'greater' ztest_larger_mu.method = 'Two-sample z-Test' ztest_larger_mu.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667, mu=2, alternative="less") #> cat_items(zt, "ztest_smaller_mu.") ztest_smaller_mu = Holder() ztest_smaller_mu.statistic = -0.911641560648313 ztest_smaller_mu.p_value = 0.1809787183191324 ztest_smaller_mu.conf_int = np.array([np.nan, 2.196499421109045]) ztest_smaller_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_smaller_mu.null_value = 2 ztest_smaller_mu.alternative = 'less' ztest_smaller_mu.method = 'Two-sample z-Test' ztest_smaller_mu.data_name = 'x and y' #> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="two.sided") #> cat_items(zt, "ztest_mu_1s.") ztest_mu_1s = Holder() ztest_mu_1s.statistic = 4.415212090914452 ztest_mu_1s.p_value = 1.009110038015147e-05 ztest_mu_1s.conf_int = np.array([6.74376372125119, 7.29259991511245]) ztest_mu_1s.estimate = 7.01818181818182 ztest_mu_1s.null_value = 6.4 ztest_mu_1s.alternative = 'two.sided' ztest_mu_1s.method = 'One-sample z-Test' ztest_mu_1s.data_name = 'x' #> zt = z.test(x, sigma.x=0.46436662631627995, mu=7.4, alternative="less") #> cat_items(zt, "ztest_smaller_mu_1s.") ztest_smaller_mu_1s = Holder() ztest_smaller_mu_1s.statistic = -2.727042762035397 ztest_smaller_mu_1s.p_value = 0.00319523783881176 ztest_smaller_mu_1s.conf_int = np.array([np.nan, 7.248480744895716]) ztest_smaller_mu_1s.estimate = 7.01818181818182 ztest_smaller_mu_1s.null_value = 7.4 ztest_smaller_mu_1s.alternative = 'less' ztest_smaller_mu_1s.method = 'One-sample z-Test' ztest_smaller_mu_1s.data_name = 'x' #> zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="greater") #> cat_items(zt, "ztest_greater_mu_1s.") ztest_larger_mu_1s = Holder() ztest_larger_mu_1s.statistic = 4.415212090914452 ztest_larger_mu_1s.p_value = 5.045550190097003e-06 ztest_larger_mu_1s.conf_int = np.array([6.78788289146792, np.nan]) ztest_larger_mu_1s.estimate = 7.01818181818182 ztest_larger_mu_1s.null_value = 6.4 ztest_larger_mu_1s.alternative = 'greater' ztest_larger_mu_1s.method = 'One-sample z-Test' ztest_larger_mu_1s.data_name = 'x' alternatives = {'less' : 'smaller', 'greater' : 'larger', 'two.sided' : 'two-sided'} class TestZTest(object): # all examples use the same data # no weights used in tests @classmethod def setup_class(cls): cls.x1 = np.array([7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8]) cls.x2 = np.array([4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5]) cls.d1 = DescrStatsW(cls.x1) cls.d2 = DescrStatsW(cls.x2) cls.cm = CompareMeans(cls.d1, cls.d2) def test(self): x1, x2 = self.x1, self.x2 cm = self.cm # tc : test cases for tc in [ztest_, ztest_smaller, ztest_larger, ztest_mu, ztest_smaller_mu, ztest_larger_mu]: zstat, pval = ztest(x1, x2, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) zstat, pval = cm.ztest_ind(value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) #overwrite nan in R's confint tc_conf_int = tc.conf_int.copy() if np.isnan(tc_conf_int[0]): tc_conf_int[0] = - np.inf if np.isnan(tc_conf_int[1]): tc_conf_int[1] = np.inf # Note: value is shifting our confidence interval in zconfint ci = zconfint(x1, x2, value=0, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = cm.zconfint_diff(alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = zconfint(x1, x2, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int - tc.null_value, rtol=1e-10) # 1 sample test copy-paste d1 = self.d1 for tc in [ztest_mu_1s, ztest_smaller_mu_1s, ztest_larger_mu_1s]: zstat, pval = ztest(x1, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) zstat, pval = d1.ztest_mean(value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) #overwrite nan in R's confint tc_conf_int = tc.conf_int.copy() if np.isnan(tc_conf_int[0]): tc_conf_int[0] = - np.inf if np.isnan(tc_conf_int[1]): tc_conf_int[1] = np.inf # Note: value is shifting our confidence interval in zconfint ci = zconfint(x1, value=0, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = d1.zconfint_mean(alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10)
bsd-3-clause
bearing/dosenet-analysis
Programming Lesson Modules/Module 6- Data Binning.py
1
2232
""" ####Module 6- Data Binning """ import csv import io import urllib.request import matplotlib.pyplot as plt import numpy as np from datetime import datetime url = 'http://radwatch.berkeley.edu/sites/default/files/dosenet/etch_roof.csv' response = urllib.request.urlopen(url) reader = csv.reader(io.TextIOWrapper(response)) timedata = [] counts = [] CPMerror = [] line = 0 for row in reader: if line != 0: timedata.append(datetime.fromtimestamp(float(row[2],))) # 3rd column if CSV is a UNIX timestamp that can be converted to datetime via fromtimestamp counts.append(float(row[6])) CPMerror.append(float(row[7])) line += 1 def month_bin(): Year = [timedata[-1].year] Month = [timedata[-1].month] sumCPM = [0] sumError = [0] DataCount = [0] flag = 0 for i in range(len(counts)-1,-1,-1): if Year[flag] == timedata[i].year: if Month[flag] == timedata[i].month: sumCPM[flag] += counts[i] sumError[flag] += CPMerror[i] DataCount[flag] += 1 else: Year.append(timedata[i].year) Month.append(timedata[i].month) sumCPM.append(0) sumError.append(0) DataCount.append(0) flag += 1 else: Year.append(timedata[i].year) Month.append(timedata[i].month) sumCPM.append(0) sumError.append(0) DataCount.append(0) flag += 1 binnedCPM = np.array(sumCPM) / np.array(DataCount) binnedError = np.array(sumError) / np.array(DataCount) strDates = [str(m)+'-'+str(n) for m,n in zip(Month,Year)] binnedDates = [] for i in range(0,len(Month)): binnedDates.append(datetime.strptime(strDates[i],'%m-%Y')) fig, ax = plt.subplots() ax.plot(binnedDates,binnedCPM, 'ro-') ax.errorbar(binnedDates,binnedCPM, yerr=binnedError, fmt='ro', ecolor='r') plt.xticks(rotation=30) plt.title('DoseNet: Time-Averaged CPM (Etcheverry Roof)') plt.xlabel('Date') plt.ylabel('Average CPM')
mit
raghavrv/scikit-learn
sklearn/tree/export.py
16
18309
""" This module defines export functions for decision trees. """ # Authors: Gilles Louppe <g.louppe@gmail.com> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Brian Holt <bdholt1@gmail.com> # Noel Dawe <noel@dawe.me> # Satrajit Gosh <satrajit.ghosh@gmail.com> # Trevor Stephens <trev.stephens@gmail.com> # Li Li <aiki.nogard@gmail.com> # License: BSD 3 clause from numbers import Integral import numpy as np import warnings from ..externals import six from ..utils.validation import check_is_fitted from . import _criterion from . import _tree def _color_brew(n): """Generate n colors with equally spaced hues. Parameters ---------- n : int The number of colors required. Returns ------- color_list : list, length n List of n tuples of form (R, G, B) being the components of each color. """ color_list = [] # Initialize saturation & value; calculate chroma & value shift s, v = 0.75, 0.9 c = s * v m = v - c for h in np.arange(25, 385, 360. / n).astype(int): # Calculate some intermediate values h_bar = h / 60. x = c * (1 - abs((h_bar % 2) - 1)) # Initialize RGB with same hue & chroma as our color rgb = [(c, x, 0), (x, c, 0), (0, c, x), (0, x, c), (x, 0, c), (c, 0, x), (c, x, 0)] r, g, b = rgb[int(h_bar)] # Shift the initial RGB values to match value and store rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))] color_list.append(rgb) return color_list class Sentinel(object): def __repr__(): return '"tree.dot"' SENTINEL = Sentinel() def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None, feature_names=None, class_names=None, label='all', filled=False, leaves_parallel=False, impurity=True, node_ids=False, proportion=False, rotate=False, rounded=False, special_characters=False, precision=3): """Export a decision tree in DOT format. This function generates a GraphViz representation of the decision tree, which is then written into `out_file`. Once exported, graphical renderings can be generated using, for example:: $ dot -Tps tree.dot -o tree.ps (PostScript format) $ dot -Tpng tree.dot -o tree.png (PNG format) The sample counts that are shown are weighted with any sample_weights that might be present. Read more in the :ref:`User Guide <tree>`. Parameters ---------- decision_tree : decision tree classifier The decision tree to be exported to GraphViz. out_file : file object or string, optional (default='tree.dot') Handle or name of the output file. If ``None``, the result is returned as a string. This will the default from version 0.20. max_depth : int, optional (default=None) The maximum depth of the representation. If None, the tree is fully generated. feature_names : list of strings, optional (default=None) Names of each of the features. class_names : list of strings, bool or None, optional (default=None) Names of each of the target classes in ascending numerical order. Only relevant for classification and not supported for multi-output. If ``True``, shows a symbolic representation of the class name. label : {'all', 'root', 'none'}, optional (default='all') Whether to show informative labels for impurity, etc. Options include 'all' to show at every node, 'root' to show only at the top root node, or 'none' to not show at any node. filled : bool, optional (default=False) When set to ``True``, paint nodes to indicate majority class for classification, extremity of values for regression, or purity of node for multi-output. leaves_parallel : bool, optional (default=False) When set to ``True``, draw all leaf nodes at the bottom of the tree. impurity : bool, optional (default=True) When set to ``True``, show the impurity at each node. node_ids : bool, optional (default=False) When set to ``True``, show the ID number on each node. proportion : bool, optional (default=False) When set to ``True``, change the display of 'values' and/or 'samples' to be proportions and percentages respectively. rotate : bool, optional (default=False) When set to ``True``, orient tree left to right rather than top-down. rounded : bool, optional (default=False) When set to ``True``, draw node boxes with rounded corners and use Helvetica fonts instead of Times-Roman. special_characters : bool, optional (default=False) When set to ``False``, ignore special characters for PostScript compatibility. precision : int, optional (default=3) Number of digits of precision for floating point in the values of impurity, threshold and value attributes of each node. Returns ------- dot_data : string String representation of the input tree in GraphViz dot format. Only returned if ``out_file`` is None. .. versionadded:: 0.18 Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn import tree >>> clf = tree.DecisionTreeClassifier() >>> iris = load_iris() >>> clf = clf.fit(iris.data, iris.target) >>> tree.export_graphviz(clf, ... out_file='tree.dot') # doctest: +SKIP """ def get_color(value): # Find the appropriate color & intensity for a node if colors['bounds'] is None: # Classification tree color = list(colors['rgb'][np.argmax(value)]) sorted_values = sorted(value, reverse=True) if len(sorted_values) == 1: alpha = 0 else: alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1]), 0)) else: # Regression tree or multi-output color = list(colors['rgb'][0]) alpha = int(np.round(255 * ((value - colors['bounds'][0]) / (colors['bounds'][1] - colors['bounds'][0])), 0)) # Return html color code in #RRGGBBAA format color.append(alpha) hex_codes = [str(i) for i in range(10)] hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f']) color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color] return '#' + ''.join(color) def node_to_str(tree, node_id, criterion): # Generate the node content string if tree.n_outputs == 1: value = tree.value[node_id][0, :] else: value = tree.value[node_id] # Should labels be shown? labels = (label == 'root' and node_id == 0) or label == 'all' # PostScript compatibility for special characters if special_characters: characters = ['&#35;', '<SUB>', '</SUB>', '&le;', '<br/>', '>'] node_string = '<' else: characters = ['#', '[', ']', '<=', '\\n', '"'] node_string = '"' # Write node ID if node_ids: if labels: node_string += 'node ' node_string += characters[0] + str(node_id) + characters[4] # Write decision criteria if tree.children_left[node_id] != _tree.TREE_LEAF: # Always write node decision criteria, except for leaves if feature_names is not None: feature = feature_names[tree.feature[node_id]] else: feature = "X%s%s%s" % (characters[1], tree.feature[node_id], characters[2]) node_string += '%s %s %s%s' % (feature, characters[3], round(tree.threshold[node_id], precision), characters[4]) # Write impurity if impurity: if isinstance(criterion, _criterion.FriedmanMSE): criterion = "friedman_mse" elif not isinstance(criterion, six.string_types): criterion = "impurity" if labels: node_string += '%s = ' % criterion node_string += (str(round(tree.impurity[node_id], precision)) + characters[4]) # Write node sample count if labels: node_string += 'samples = ' if proportion: percent = (100. * tree.n_node_samples[node_id] / float(tree.n_node_samples[0])) node_string += (str(round(percent, 1)) + '%' + characters[4]) else: node_string += (str(tree.n_node_samples[node_id]) + characters[4]) # Write node class distribution / regression value if proportion and tree.n_classes[0] != 1: # For classification this will show the proportion of samples value = value / tree.weighted_n_node_samples[node_id] if labels: node_string += 'value = ' if tree.n_classes[0] == 1: # Regression value_text = np.around(value, precision) elif proportion: # Classification value_text = np.around(value, precision) elif np.all(np.equal(np.mod(value, 1), 0)): # Classification without floating-point weights value_text = value.astype(int) else: # Classification with floating-point weights value_text = np.around(value, precision) # Strip whitespace value_text = str(value_text.astype('S32')).replace("b'", "'") value_text = value_text.replace("' '", ", ").replace("'", "") if tree.n_classes[0] == 1 and tree.n_outputs == 1: value_text = value_text.replace("[", "").replace("]", "") value_text = value_text.replace("\n ", characters[4]) node_string += value_text + characters[4] # Write node majority class if (class_names is not None and tree.n_classes[0] != 1 and tree.n_outputs == 1): # Only done for single-output classification trees if labels: node_string += 'class = ' if class_names is not True: class_name = class_names[np.argmax(value)] else: class_name = "y%s%s%s" % (characters[1], np.argmax(value), characters[2]) node_string += class_name # Clean up any trailing newlines if node_string[-2:] == '\\n': node_string = node_string[:-2] if node_string[-5:] == '<br/>': node_string = node_string[:-5] return node_string + characters[5] def recurse(tree, node_id, criterion, parent=None, depth=0): if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) left_child = tree.children_left[node_id] right_child = tree.children_right[node_id] # Add node with description if max_depth is None or depth <= max_depth: # Collect ranks for 'leaf' option in plot_options if left_child == _tree.TREE_LEAF: ranks['leaves'].append(str(node_id)) elif str(depth) not in ranks: ranks[str(depth)] = [str(node_id)] else: ranks[str(depth)].append(str(node_id)) out_file.write('%d [label=%s' % (node_id, node_to_str(tree, node_id, criterion))) if filled: # Fetch appropriate color for node if 'rgb' not in colors: # Initialize colors and bounds if required colors['rgb'] = _color_brew(tree.n_classes[0]) if tree.n_outputs != 1: # Find max and min impurities for multi-output colors['bounds'] = (np.min(-tree.impurity), np.max(-tree.impurity)) elif (tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1): # Find max and min values in leaf nodes for regression colors['bounds'] = (np.min(tree.value), np.max(tree.value)) if tree.n_outputs == 1: node_val = (tree.value[node_id][0, :] / tree.weighted_n_node_samples[node_id]) if tree.n_classes[0] == 1: # Regression node_val = tree.value[node_id][0, :] else: # If multi-output color node by impurity node_val = -tree.impurity[node_id] out_file.write(', fillcolor="%s"' % get_color(node_val)) out_file.write('] ;\n') if parent is not None: # Add edge to parent out_file.write('%d -> %d' % (parent, node_id)) if parent == 0: # Draw True/False labels if parent is root node angles = np.array([45, -45]) * ((rotate - .5) * -2) out_file.write(' [labeldistance=2.5, labelangle=') if node_id == 1: out_file.write('%d, headlabel="True"]' % angles[0]) else: out_file.write('%d, headlabel="False"]' % angles[1]) out_file.write(' ;\n') if left_child != _tree.TREE_LEAF: recurse(tree, left_child, criterion=criterion, parent=node_id, depth=depth + 1) recurse(tree, right_child, criterion=criterion, parent=node_id, depth=depth + 1) else: ranks['leaves'].append(str(node_id)) out_file.write('%d [label="(...)"' % node_id) if filled: # color cropped nodes grey out_file.write(', fillcolor="#C0C0C0"') out_file.write('] ;\n' % node_id) if parent is not None: # Add edge to parent out_file.write('%d -> %d ;\n' % (parent, node_id)) check_is_fitted(decision_tree, 'tree_') own_file = False return_string = False try: if out_file == SENTINEL: warnings.warn("out_file can be set to None starting from 0.18. " "This will be the default in 0.20.", DeprecationWarning) out_file = "tree.dot" if isinstance(out_file, six.string_types): if six.PY3: out_file = open(out_file, "w", encoding="utf-8") else: out_file = open(out_file, "wb") own_file = True if out_file is None: return_string = True out_file = six.StringIO() if isinstance(precision, Integral): if precision < 0: raise ValueError("'precision' should be greater or equal to 0." " Got {} instead.".format(precision)) else: raise ValueError("'precision' should be an integer. Got {}" " instead.".format(type(precision))) # Check length of feature_names before getting into the tree node # Raise error if length of feature_names does not match # n_features_ in the decision_tree if feature_names is not None: if len(feature_names) != decision_tree.n_features_: raise ValueError("Length of feature_names, %d " "does not match number of features, %d" % (len(feature_names), decision_tree.n_features_)) # The depth of each node for plotting with 'leaf' option ranks = {'leaves': []} # The colors to render each node with colors = {'bounds': None} out_file.write('digraph Tree {\n') # Specify node aesthetics out_file.write('node [shape=box') rounded_filled = [] if filled: rounded_filled.append('filled') if rounded: rounded_filled.append('rounded') if len(rounded_filled) > 0: out_file.write(', style="%s", color="black"' % ", ".join(rounded_filled)) if rounded: out_file.write(', fontname=helvetica') out_file.write('] ;\n') # Specify graph & edge aesthetics if leaves_parallel: out_file.write('graph [ranksep=equally, splines=polyline] ;\n') if rounded: out_file.write('edge [fontname=helvetica] ;\n') if rotate: out_file.write('rankdir=LR ;\n') # Now recurse the tree and add node & edge attributes if isinstance(decision_tree, _tree.Tree): recurse(decision_tree, 0, criterion="impurity") else: recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion) # If required, draw leaf nodes at same depth as each other if leaves_parallel: for rank in sorted(ranks): out_file.write("{rank=same ; " + "; ".join(r for r in ranks[rank]) + "} ;\n") out_file.write("}") if return_string: return out_file.getvalue() finally: if own_file: out_file.close()
bsd-3-clause
neale/CS-program
434-MachineLearning/final_project/linearClassifier/sklearn/naive_bayes.py
16
30329
# -*- coding: utf-8 -*- """ The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Author: Vincent Michel <vincent.michel@inria.fr> # Minor fixes by Fabian Pedregosa # Amit Aides <amitibo@tx.technion.ac.il> # Yehuda Finkelstein <yehudaf@tx.technion.ac.il> # Lars Buitinck # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # (parts based on earlier work by Mathieu Blondel) # # License: BSD 3 clause from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import issparse from .base import BaseEstimator, ClassifierMixin from .preprocessing import binarize from .preprocessing import LabelBinarizer from .preprocessing import label_binarize from .utils import check_X_y, check_array from .utils.extmath import safe_sparse_dot, logsumexp from .utils.multiclass import _check_partial_fit_first_call from .utils.fixes import in1d from .utils.validation import check_is_fitted from .externals import six __all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB'] class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape [n_classes, n_samples]. Input is passed to _joint_log_likelihood as-is by predict, predict_proba and predict_log_proba. """ def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Predicted target values for X """ jll = self._joint_log_likelihood(X) return self.classes_[np.argmax(jll, axis=1)] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = logsumexp(jll, axis=1) return jll - np.atleast_2d(log_prob_x).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array-like, shape = [n_samples, n_classes] Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute `classes_`. """ return np.exp(self.predict_log_proba(X)) class GaussianNB(BaseNB): """ Gaussian Naive Bayes (GaussianNB) Can perform online updates to model parameters via `partial_fit` method. For details on algorithm used to update feature means and variance online, see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like, shape (n_classes,) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_prior_ : array, shape (n_classes,) probability of each class. class_count_ : array, shape (n_classes,) number of training samples observed in each class. theta_ : array, shape (n_classes, n_features) mean of each feature per class sigma_ : array, shape (n_classes, n_features) variance of each feature per class Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB(priors=None) >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB(priors=None) >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ def __init__(self, priors=None): self.priors = priors def fit(self, X, y, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Gaussian Naive Bayes supports fitting with *sample_weight*. Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) return self._partial_fit(X, y, np.unique(y), _refit=True, sample_weight=sample_weight) @staticmethod def _update_mean_variance(n_past, mu, var, X, sample_weight=None): """Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like, shape (number of Gaussians,) Means for Gaussians in original set. var : array-like, shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like, shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like, shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. """ if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(sample_weight.sum()) new_mu = np.average(X, axis=0, weights=sample_weight / n_new) new_var = np.average((X - new_mu) ** 2, axis=0, weights=sample_weight / n_new) else: n_new = X.shape[0] new_var = np.var(X, axis=0) new_mu = np.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = (old_ssd + new_ssd + (n_past / float(n_new * n_total)) * (n_new * mu - n_new * new_mu) ** 2) total_var = total_ssd / n_total return total_mu, total_var def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Returns ------- self : object Returns self. """ return self._partial_fit(X, y, classes, _refit=False, sample_weight=sample_weight) def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): """Actual implementation of Gaussian NB fitting. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. classes : array-like, shape (n_classes,) List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. _refit: bool If true, act as though this were the first time we called _partial_fit (ie, throw away any past fitting and start over). sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y) # If the ratio of data variance between dimensions is too small, it # will cause numerical errors. To address this, we artificially # boost the variance by epsilon, a small fraction of the standard # deviation of the largest dimension. epsilon = 1e-9 * np.var(X, axis=0).max() if _refit: self.classes_ = None if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_features = X.shape[1] n_classes = len(self.classes_) self.theta_ = np.zeros((n_classes, n_features)) self.sigma_ = np.zeros((n_classes, n_features)) self.class_count_ = np.zeros(n_classes, dtype=np.float64) # Initialise the class prior n_classes = len(self.classes_) # Take into account the priors if self.priors is not None: priors = np.asarray(self.priors) # Check that the provide prior match the number of classes if len(priors) != n_classes: raise ValueError('Number of priors must match number of' ' classes.') # Check that the sum is 1 if priors.sum() != 1.0: raise ValueError('The sum of the priors should be 1.') # Check that the prior are non-negative if (priors < 0).any(): raise ValueError('Priors must be non-negative.') self.class_prior_ = priors else: # Initialize the priors to zeros for each class self.class_prior_ = np.zeros(len(self.classes_), dtype=np.float64) else: if X.shape[1] != self.theta_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) # Put epsilon back in each time self.sigma_[:, :] -= epsilon classes = self.classes_ unique_y = np.unique(y) unique_y_in_classes = in1d(unique_y, classes) if not np.all(unique_y_in_classes): raise ValueError("The target label(s) %s in y do not exist in the " "initial classes %s" % (y[~unique_y_in_classes], classes)) for y_i in unique_y: i = classes.searchsorted(y_i) X_i = X[y == y_i, :] if sample_weight is not None: sw_i = sample_weight[y == y_i] N_i = sw_i.sum() else: sw_i = None N_i = X_i.shape[0] new_theta, new_sigma = self._update_mean_variance( self.class_count_[i], self.theta_[i, :], self.sigma_[i, :], X_i, sw_i) self.theta_[i, :] = new_theta self.sigma_[i, :] = new_sigma self.class_count_[i] += N_i self.sigma_[:, :] += epsilon # Update if only no priors is provided if self.priors is None: # Empirical prior, with sample_weight taken into account self.class_prior_ = self.class_count_ / self.class_count_.sum() return self def _joint_log_likelihood(self, X): check_is_fitted(self, "classes_") X = check_array(X) joint_log_likelihood = [] for i in range(np.size(self.classes_)): jointi = np.log(self.class_prior_[i]) n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :])) n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.sigma_[i, :]), 1) joint_log_likelihood.append(jointi + n_ij) joint_log_likelihood = np.array(joint_log_likelihood).T return joint_log_likelihood class BaseDiscreteNB(BaseNB): """Abstract base class for naive Bayes on discrete/categorical data Any estimator based on this class should provide: __init__ _joint_log_likelihood(X) as per BaseNB """ def _update_class_log_prior(self, class_prior=None): n_classes = len(self.classes_) if class_prior is not None: if len(class_prior) != n_classes: raise ValueError("Number of priors must match number of" " classes.") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: # empirical prior, with sample_weight taken into account self.class_log_prior_ = (np.log(self.class_count_) - np.log(self.class_count_.sum())) else: self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes) def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. classes : array-like, shape = [n_classes] List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X = check_array(X, accept_sparse='csr', dtype=np.float64) _, n_features = X.shape if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_effective_classes = len(classes) if len(classes) > 1 else 2 self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) elif n_features != self.coef_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (n_features, self.coef_.shape[-1])) Y = label_binarize(y, classes=self.classes_) if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) n_samples, n_classes = Y.shape if X.shape[0] != Y.shape[0]: msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." raise ValueError(msg % (X.shape[0], y.shape[0])) # label_binarize() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently Y = Y.astype(np.float64) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas self._count(X, Y) # XXX: OPTIM: we could introduce a public finalization method to # be called by the user explicitly just once after several consecutive # calls to partial_fit and prior any call to predict[_[log_]proba] # to avoid computing the smooth log probas at each call to partial fit self._update_feature_log_prob() self._update_class_log_prior(class_prior=class_prior) return self def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. sample_weight : array-like, shape = [n_samples], optional Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns self. """ X, y = check_X_y(X, y, 'csr') _, n_features = X.shape labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: Y = np.concatenate((1 - Y, Y), axis=1) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point Y = Y.astype(np.float64) if sample_weight is not None: sample_weight = np.atleast_2d(sample_weight) Y *= check_array(sample_weight).T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_effective_classes = Y.shape[1] self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_effective_classes, n_features), dtype=np.float64) self._count(X, Y) self._update_feature_log_prob() self._update_class_log_prior(class_prior=class_prior) return self # XXX The following is a stopgap measure; we need to set the dimensions # of class_log_prior_ and feature_log_prob_ correctly. def _get_coef(self): return (self.feature_log_prob_[1:] if len(self.classes_) == 2 else self.feature_log_prob_) def _get_intercept(self): return (self.class_log_prior_[1:] if len(self.classes_) == 2 else self.class_log_prior_) coef_ = property(_get_coef) intercept_ = property(_get_intercept) class MultinomialNB(BaseDiscreteNB): """ Naive Bayes classifier for multinomial models The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work. Read more in the :ref:`User Guide <multinomial_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). fit_prior : boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size (n_classes,) Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape (n_classes, ) Smoothed empirical log probability for each class. intercept_ : property Mirrors ``class_log_prior_`` for interpreting MultinomialNB as a linear model. feature_log_prob_ : array, shape (n_classes, n_features) Empirical log probability of features given a class, ``P(x_i|y)``. coef_ : property Mirrors ``feature_log_prob_`` for interpreting MultinomialNB as a linear model. class_count_ : array, shape (n_classes,) Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape (n_classes, n_features) Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(5, size=(6, 100)) >>> y = np.array([1, 2, 3, 4, 5, 6]) >>> from sklearn.naive_bayes import MultinomialNB >>> clf = MultinomialNB() >>> clf.fit(X, y) MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2:3])) [3] Notes ----- For the rationale behind the names `coef_` and `intercept_`, i.e. naive Bayes as a linear classifier, see J. Rennie et al. (2003), Tackling the poor assumptions of naive Bayes text classifiers, ICML. References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html """ def __init__(self, alpha=1.0, fit_prior=True, class_prior=None): self.alpha = alpha self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if np.any((X.data if issparse(X) else X) < 0): raise ValueError("Input X must be non-negative") self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + self.alpha smoothed_cc = smoothed_fc.sum(axis=1) self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') return (safe_sparse_dot(X, self.feature_log_prob_.T) + self.class_log_prior_) class BernoulliNB(BaseDiscreteNB): """Naive Bayes classifier for multivariate Bernoulli models. Like MultinomialNB, this classifier is suitable for discrete data. The difference is that while MultinomialNB works with occurrence counts, BernoulliNB is designed for binary/boolean features. Read more in the :ref:`User Guide <bernoulli_naive_bayes>`. Parameters ---------- alpha : float, optional (default=1.0) Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing). binarize : float or None, optional Threshold for binarizing (mapping to booleans) of sample features. If None, input is presumed to already consist of binary vectors. fit_prior : boolean Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like, size=[n_classes,] Prior probabilities of the classes. If specified the priors are not adjusted according to the data. Attributes ---------- class_log_prior_ : array, shape = [n_classes] Log probability of each class (smoothed). feature_log_prob_ : array, shape = [n_classes, n_features] Empirical log probability of features given a class, P(x_i|y). class_count_ : array, shape = [n_classes] Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. feature_count_ : array, shape = [n_classes, n_features] Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. Examples -------- >>> import numpy as np >>> X = np.random.randint(2, size=(6, 100)) >>> Y = np.array([1, 2, 3, 4, 4, 5]) >>> from sklearn.naive_bayes import BernoulliNB >>> clf = BernoulliNB() >>> clf.fit(X, Y) BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True) >>> print(clf.predict(X[2:3])) [3] References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html A. McCallum and K. Nigam (1998). A comparison of event models for naive Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for Text Categorization, pp. 41-48. V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS). """ def __init__(self, alpha=1.0, binarize=.0, fit_prior=True, class_prior=None): self.alpha = alpha self.binarize = binarize self.fit_prior = fit_prior self.class_prior = class_prior def _count(self, X, Y): """Count and smooth feature occurrences.""" if self.binarize is not None: X = binarize(X, threshold=self.binarize) self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0) def _update_feature_log_prob(self): """Apply smoothing to raw counts and recompute log probabilities""" smoothed_fc = self.feature_count_ + self.alpha smoothed_cc = self.class_count_ + self.alpha * 2 self.feature_log_prob_ = (np.log(smoothed_fc) - np.log(smoothed_cc.reshape(-1, 1))) def _joint_log_likelihood(self, X): """Calculate the posterior log probability of the samples X""" check_is_fitted(self, "classes_") X = check_array(X, accept_sparse='csr') if self.binarize is not None: X = binarize(X, threshold=self.binarize) n_classes, n_features = self.feature_log_prob_.shape n_samples, n_features_X = X.shape if n_features_X != n_features: raise ValueError("Expected input with %d features, got %d instead" % (n_features, n_features_X)) neg_prob = np.log(1 - np.exp(self.feature_log_prob_)) # Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T) jll += self.class_log_prior_ + neg_prob.sum(axis=1) return jll
unlicense
henridwyer/scikit-learn
sklearn/linear_model/omp.py
127
30417
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause import warnings from distutils.version import LooseVersion import numpy as np from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel, _pre_fit from ..base import RegressorMixin from ..utils import as_float_array, check_array, check_X_y from ..cross_validation import check_cv from ..externals.joblib import Parallel, delayed import scipy solve_triangular_args = {} if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): # check_finite=False is an optimization available only in scipy >=0.12 solve_triangular_args = {'check_finite': False} premature = """ Orthogonal matching pursuit ended prematurely due to linear dependence in the dictionary. The requested precision might not have been met. """ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : array, shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : array, shape (n_samples,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coef : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy('F') else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) potrs, = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=X.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=X.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = np.sqrt(1 - v) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the the Cholesky decomposition method. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data matrix Xy : array, shape (n_features,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol_0 : float Squared norm of y, required if tol is not None. tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coefs : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram) if copy_Xy: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,)) potrs, = get_lapack_funcs(('potrs',), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=Gram.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = np.sqrt(1 - v) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array, shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : array, shape (n_samples,) or (n_samples, n_targets) Input targets n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : {True, False, 'auto'}, Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp_gram lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError("The number of atoms cannot be more than the number " "of features") if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y ** 2), axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False): """Gram Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data: X.T * X Xy : array, shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. norms_squared : array-like, shape (n_targets,) Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order='F', copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError('Gram OMP needs the precomputed norms in order ' 'to evaluate the error sum of squares.') if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError("The number of atoms cannot be more than the number " "of features") if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram))) else: coef = np.zeros((len(Gram), Xy.shape[1])) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=copy_Xy, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) class OrthogonalMatchingPursuit(LinearModel, RegressorMixin): """Orthogonal Matching Pursuit model (OMP) Parameters ---------- n_nonzero_coefs : int, optional Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, optional Maximum norm of the residual. If not None, overrides n_nonzero_coefs. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional If False, the regressors X are assumed to be already normalized. precompute : {True, False, 'auto'}, default 'auto' Whether to use a precomputed Gram and Xy matrix to speed up calculations. Improves performance when `n_targets` or `n_samples` is very large. Note that if you already have such matrices, you can pass them directly to the fit method. Read more in the :ref:`User Guide <omp>`. Attributes ---------- coef_ : array, shape (n_features,) or (n_features, n_targets) parameter vector (w in the formula) intercept_ : float or array, shape (n_targets,) independent term in decision function. n_iter_ : int or array-like Number of active features across every target. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars decomposition.sparse_encode """ def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto'): self.n_nonzero_coefs = n_nonzero_coefs self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] X, y, X_mean, y_mean, X_std, Gram, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if self.n_nonzero_coefs is None and self.tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if Gram is False: coef_, self.n_iter_ = orthogonal_mp( X, y, self.n_nonzero_coefs_, self.tol, precompute=False, copy_X=True, return_n_iter=True) else: norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None coef_, self.n_iter_ = orthogonal_mp_gram( Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True) self.coef_ = coef_.T self._set_intercept(X_mean, y_mean, X_std) return self def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True, fit_intercept=True, normalize=True, max_iter=100): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied. If False, they may be overwritten. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 100 by default. Returns ------- residues: array, shape (n_samples, max_features) Residues of the prediction on the test data """ if copy: X_train = X_train.copy() y_train = y_train.copy() X_test = X_test.copy() y_test = y_test.copy() if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None, precompute=False, copy_X=False, return_path=True) if coefs.ndim == 1: coefs = coefs[:, np.newaxis] if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] return np.dot(coefs.T, X_test.T) - y_test class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin): """Cross-validated Orthogonal Matching Pursuit model (OMP) Parameters ---------- copy : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional If False, the regressors X are assumed to be already normalized. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 10% of ``n_features`` but at least 5 if available. cv : cross-validation generator, optional see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to a 5-fold strategy n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Read more in the :ref:`User Guide <omp>`. Attributes ---------- intercept_ : float or array, shape (n_targets,) Independent term in decision function. coef_ : array, shape (n_features,) or (n_features, n_targets) Parameter vector (w in the problem formulation). n_nonzero_coefs_ : int Estimated number of non-zero coefficients giving the best mean squared error over the cross-validation folds. n_iter_ : int or array-like Number of active features across every target for the model refit with the best hyperparameters got by cross-validating across all folds. See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars OrthogonalMatchingPursuit LarsCV LassoLarsCV decomposition.sparse_encode """ def __init__(self, copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=1, verbose=False): self.copy = copy self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.cv = cv self.n_jobs = n_jobs self.verbose = verbose def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape [n_samples, n_features] Training data. y : array-like, shape [n_samples] Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X = as_float_array(X, copy=False, force_all_finite=False) cv = check_cv(self.cv, X, y, classifier=False) max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) if not self.max_iter else self.max_iter) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_omp_path_residues)( X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, self.normalize, max_iter) for train, test in cv) min_early_stop = min(fold.shape[0] for fold in cv_paths) mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]) best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, normalize=self.normalize) omp.fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
bsd-3-clause
jreback/pandas
pandas/tests/arrays/boolean/test_arithmetic.py
6
3586
import operator import numpy as np import pytest import pandas as pd import pandas._testing as tm from pandas.arrays import FloatingArray @pytest.fixture def data(): return pd.array( [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], dtype="boolean", ) @pytest.fixture def left_array(): return pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") @pytest.fixture def right_array(): return pd.array([True, False, None] * 3, dtype="boolean") # Basic test for the arithmetic array ops # ----------------------------------------------------------------------------- @pytest.mark.parametrize( "opname, exp", [ ("add", [True, True, None, True, False, None, None, None, None]), ("mul", [True, False, None, False, False, None, None, None, None]), ], ids=["add", "mul"], ) def test_add_mul(left_array, right_array, opname, exp): op = getattr(operator, opname) result = op(left_array, right_array) expected = pd.array(exp, dtype="boolean") tm.assert_extension_array_equal(result, expected) def test_sub(left_array, right_array): msg = ( r"numpy boolean subtract, the `-` operator, is (?:deprecated|not supported), " r"use the bitwise_xor, the `\^` operator, or the logical_xor function instead\." ) with pytest.raises(TypeError, match=msg): left_array - right_array def test_div(left_array, right_array): result = left_array / right_array expected = FloatingArray( np.array( [1.0, np.inf, np.nan, 0.0, np.nan, np.nan, np.nan, np.nan, np.nan], dtype="float64", ), np.array([False, False, True, False, False, True, True, True, True]), ) tm.assert_extension_array_equal(result, expected) @pytest.mark.parametrize( "opname", [ "floordiv", "mod", pytest.param( "pow", marks=pytest.mark.xfail(reason="TODO follow int8 behaviour? GH34686") ), ], ) def test_op_int8(left_array, right_array, opname): op = getattr(operator, opname) result = op(left_array, right_array) expected = op(left_array.astype("Int8"), right_array.astype("Int8")) tm.assert_extension_array_equal(result, expected) # Test generic characteristics / errors # ----------------------------------------------------------------------------- def test_error_invalid_values(data, all_arithmetic_operators): # invalid ops op = all_arithmetic_operators s = pd.Series(data) ops = getattr(s, op) # invalid scalars msg = ( "did not contain a loop with signature matching types|" "BooleanArray cannot perform the operation|" "not supported for the input types, and the inputs could not be safely coerced " "to any supported types according to the casting rule ''safe''" ) with pytest.raises(TypeError, match=msg): ops("foo") msg = ( r"unsupported operand type\(s\) for|" "Concatenation operation is not implemented for NumPy arrays" ) with pytest.raises(TypeError, match=msg): ops(pd.Timestamp("20180101")) # invalid array-likes if op not in ("__mul__", "__rmul__"): # TODO(extension) numpy's mul with object array sees booleans as numbers msg = ( r"unsupported operand type\(s\) for|can only concatenate str|" "not all arguments converted during string formatting" ) with pytest.raises(TypeError, match=msg): ops(pd.Series("foo", index=s.index))
bsd-3-clause
akrherz/iem
cgi-bin/request/isusm.py
1
14930
"""Download interface for ISU-SM data.""" import datetime from io import StringIO, BytesIO import pandas as pd import psycopg2.extras from paste.request import parse_formvars from pyiem.util import get_dbconn, c2f, mm2inch EXL = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" MISSING = {"", "M", "-99"} def get_stations(form): """Figure out which stations were requested""" stations = form.getall("sts") if not stations: stations.append("XXXXX") if len(stations) == 1: stations.append("XXXXX") return stations def get_dates(form): """Get the start and end dates requested""" year1 = form.get("year1", 2013) month1 = form.get("month1", 1) day1 = form.get("day1", 1) year2 = form.get("year2", 2013) month2 = form.get("month2", 1) day2 = form.get("day2", 1) try: sts = datetime.datetime(int(year1), int(month1), int(day1)) ets = datetime.datetime(int(year2), int(month2), int(day2)) except Exception: return None, None if sts > ets: sts, ets = ets, sts if sts == ets: ets = sts + datetime.timedelta(days=1) return sts, ets def get_delimiter(form): """Figure out what is the requested delimiter""" d = form.getvalue("delim", "comma") if d == "comma": return "," return "\t" def fetch_daily(form, cols): """Return a fetching of daily data""" pgconn = get_dbconn("isuag", user="nobody") cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor) sts, ets = get_dates(form) if sts is None: return None, None stations = get_stations(form) if not cols: cols = [ "station", "valid", "high", "low", "rh_min", "rh", "rh_max", "gdd50", "solar", "precip", "sped", "gust", "et", "soil04t", "soil12t", "soil24t", "soil50t", "soil12vwc", "soil24vwc", "soil50vwc", ] else: cols.insert(0, "valid") cols.insert(0, "station") sql = """ --- Get the Daily Max/Min soil values WITH soils as ( SELECT station, date(valid) as date, min(rh_avg_qc) as rh_min, avg(rh_avg_qc) as rh, max(rh_avg_qc) as rh_max, min(t4_c_avg_qc) as soil04tn, max(t4_c_avg_qc) as soil04tx, min(t12_c_avg_qc) as soil12tn, max(t12_c_avg_qc) as soil12tx, min(t24_c_avg_qc) as soil24tn, max(t24_c_avg_qc) as soil24tx, min(t50_c_avg_qc) as soil50tn, max(t50_c_avg_qc) as soil50tx from sm_hourly where valid >= '%s 00:00' and valid < '%s 00:00' and station in %s GROUP by station, date ), daily as ( SELECT station, valid, tair_c_max_qc, tair_c_min_qc, slrkj_tot_qc, rain_in_tot_qc, dailyet_qc, t4_c_avg_qc, t12_c_avg_qc, t24_c_avg_qc, t50_c_avg_qc, calc_vwc_12_avg_qc, calc_vwc_24_avg_qc, calc_vwc_50_avg_qc, ws_mps_s_wvt_qc, ws_mps_max_qc, lwmv_1_qc, lwmv_2_qc, lwmdry_1_tot_qc, lwmcon_1_tot_qc, lwmwet_1_tot_qc, lwmdry_2_tot_qc, lwmcon_2_tot_qc, lwmwet_2_tot_qc, bpres_avg_qc from sm_daily WHERE valid >= '%s 00:00' and valid < '%s 00:00' and station in %s ) SELECT d.station, d.valid, s.date, s.soil04tn, s.soil04tx, s.rh, s.rh_min, s.rh_max, s.soil12tn, s.soil12tx, s.soil24tn, s.soil24tx, s.soil50tn, s.soil50tx, tair_c_max_qc, tair_c_min_qc, slrkj_tot_qc, rain_in_tot_qc, dailyet_qc, t4_c_avg_qc, t12_c_avg_qc, t24_c_avg_qc, t50_c_avg_qc, calc_vwc_12_avg_qc, calc_vwc_24_avg_qc, calc_vwc_50_avg_qc, ws_mps_s_wvt_qc, ws_mps_max_qc, round(gddxx(50, 86, c2f( tair_c_max_qc ), c2f( tair_c_min_qc ))::numeric,1) as gdd50, lwmv_1_qc, lwmv_2_qc, lwmdry_1_tot_qc, lwmcon_1_tot_qc, lwmwet_1_tot_qc, lwmdry_2_tot_qc, lwmcon_2_tot_qc, lwmwet_2_tot_qc, bpres_avg_qc FROM soils s JOIN daily d on (d.station = s.station and s.date = d.valid) ORDER by d.valid ASC """ % ( sts.strftime("%Y-%m-%d"), ets.strftime("%Y-%m-%d"), str(tuple(stations)), sts.strftime("%Y-%m-%d"), ets.strftime("%Y-%m-%d"), str(tuple(stations)), ) cursor.execute(sql) values = [] miss = form.get("missing", "-99") assert miss in MISSING for row in cursor: valid = row["valid"] station = row["station"] high = ( c2f(row["tair_c_max_qc"]) if row["tair_c_max_qc"] is not None else miss ) low = ( c2f(row["tair_c_min_qc"]) if row["tair_c_min_qc"] is not None else miss ) precip = row["rain_in_tot_qc"] if row["rain_in_tot_qc"] > 0 else 0 et = ( mm2inch(row["dailyet_qc"]) if row["dailyet_qc"] is not None and row["dailyet_qc"] > 0 else 0 ) soil04t = ( c2f(row["t4_c_avg_qc"]) if row["t4_c_avg_qc"] is not None else miss ) soil04tn = ( c2f(row["soil04tn"]) if row["soil04tn"] is not None else miss ) soil04tx = ( c2f(row["soil04tx"]) if row["soil04tx"] is not None else miss ) soil12t = ( c2f(row["t12_c_avg_qc"]) if row["t12_c_avg_qc"] is not None else miss ) soil12tn = ( c2f(row["soil12tn"]) if row["soil12tn"] is not None else miss ) soil12tx = ( c2f(row["soil12tx"]) if row["soil12tx"] is not None else miss ) soil24t = ( c2f(row["t24_c_avg_qc"]) if row["t24_c_avg_qc"] is not None else miss ) soil24tn = ( c2f(row["soil24tn"]) if row["soil24tn"] is not None else miss ) soil24tx = ( c2f(row["soil24tx"]) if row["soil24tx"] is not None else miss ) soil50t = ( c2f(row["t50_c_avg_qc"]) if row["t50_c_avg_qc"] is not None else miss ) soil50tn = ( c2f(row["soil50tn"]) if row["soil50tn"] is not None else miss ) soil50tx = ( c2f(row["soil50tx"]) if row["soil50tx"] is not None else miss ) soil12vwc = ( row["calc_vwc_12_avg_qc"] if row["calc_vwc_12_avg_qc"] is not None else miss ) soil24vwc = ( row["calc_vwc_24_avg_qc"] if row["calc_vwc_24_avg_qc"] is not None else miss ) soil50vwc = ( row["calc_vwc_50_avg_qc"] if row["calc_vwc_50_avg_qc"] is not None else miss ) speed = ( row["ws_mps_s_wvt_qc"] * 2.23 if row["ws_mps_s_wvt_qc"] is not None else miss ) gust = ( row["ws_mps_max_qc"] * 2.23 if row["ws_mps_max_qc"] is not None else miss ) values.append( dict( station=station, valid=valid.strftime("%Y-%m-%d"), high=high, low=low, solar=row["slrkj_tot_qc"] / 1000.0, rh=row["rh"], rh_min=row["rh_min"], rh_max=row["rh_max"], gdd50=row["gdd50"], precip=precip, sped=speed, gust=gust, et=et, soil04t=soil04t, soil12t=soil12t, soil24t=soil24t, soil50t=soil50t, soil04tn=soil04tn, soil04tx=soil04tx, soil12tn=soil12tn, soil12tx=soil12tx, soil24tn=soil24tn, soil24tx=soil24tx, soil50tn=soil50tn, soil50tx=soil50tx, soil12vwc=soil12vwc, soil24vwc=soil24vwc, soil50vwc=soil50vwc, lwmv_1=row["lwmv_1_qc"], lwmv_2=row["lwmv_2_qc"], lwmdry_1_tot=row["lwmdry_1_tot_qc"], lwmcon_1_tot=row["lwmcon_1_tot_qc"], lwmwet_1_tot=row["lwmwet_1_tot_qc"], lwmdry_2_tot=row["lwmdry_2_tot_qc"], lwmcon_2_tot=row["lwmcon_2_tot_qc"], lwmwet_2_tot=row["lwmwet_2_tot_qc"], bpres_avg=row["bpres_avg_qc"], ) ) return values, cols def fetch_hourly(form, cols): """Return a fetching of hourly data""" pgconn = get_dbconn("isuag", user="nobody") cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor) sts, ets = get_dates(form) if sts is None: return None, None stations = get_stations(form) if not cols: cols = [ "station", "valid", "tmpf", "relh", "solar", "precip", "speed", "drct", "et", "soil04t", "soil12t", "soil24t", "soil50t", "soil12vwc", "soil24vwc", "soil50vwc", ] else: cols.insert(0, "valid") cols.insert(0, "station") table = "sm_hourly" sqlextra = ", null as bp_mb_qc, etalfalfa_qc " if form.get("timeres") == "minute": table = "sm_minute" sqlextra = ", bp_mb_qc, null as etalfalfa_qc" else: if "bp_mb" in cols: cols.remove("bp_mb") cursor.execute( f"""SELECT station, valid, tair_c_avg_qc, rh_avg_qc, slrkj_tot_qc, rain_in_tot_qc, ws_mps_s_wvt_qc, winddir_d1_wvt_qc, t4_c_avg_qc, t12_c_avg_qc, t24_c_avg_qc, t50_c_avg_qc, calc_vwc_12_avg_qc, calc_vwc_24_avg_qc, calc_vwc_50_avg_qc, lwmv_1_qc, lwmv_2_qc, lwmdry_1_tot_qc, lwmcon_1_tot_qc, lwmwet_1_tot_qc, lwmdry_2_tot_qc, lwmcon_2_tot_qc, lwmwet_2_tot_qc, bpres_avg_qc {sqlextra} from {table} WHERE valid >= '%s 00:00' and valid < '%s 00:00' and station in %s ORDER by valid ASC """ % ( sts.strftime("%Y-%m-%d"), ets.strftime("%Y-%m-%d"), str(tuple(stations)), ) ) values = [] miss = form.get("missing", "-99") assert miss in MISSING for row in cursor: valid = row["valid"] station = row["station"] tmpf = ( c2f(row["tair_c_avg_qc"]) if row["tair_c_avg_qc"] is not None else miss ) relh = row["rh_avg_qc"] if row["rh_avg_qc"] is not None else -99 solar = ( (row["slrkj_tot_qc"] * 1000.0) if row["slrkj_tot_qc"] is not None else miss ) precip = ( row["rain_in_tot_qc"] if row["rain_in_tot_qc"] is not None else miss ) speed = ( row["ws_mps_s_wvt_qc"] * 2.23 if row["ws_mps_s_wvt_qc"] is not None else miss ) drct = ( row["winddir_d1_wvt_qc"] if row["winddir_d1_wvt_qc"] is not None else miss ) et = ( mm2inch(row["etalfalfa_qc"]) if row["etalfalfa_qc"] is not None else miss ) soil04t = ( c2f(row["t4_c_avg_qc"]) if row["t4_c_avg_qc"] is not None else miss ) soil12t = ( c2f(row["t12_c_avg_qc"]) if row["t12_c_avg_qc"] is not None else miss ) soil24t = ( c2f(row["t24_c_avg_qc"]) if row["t24_c_avg_qc"] is not None else miss ) soil50t = ( c2f(row["t50_c_avg_qc"]) if row["t50_c_avg_qc"] is not None else miss ) soil12vwc = ( row["calc_vwc_12_avg_qc"] if row["calc_vwc_12_avg_qc"] is not None else miss ) soil24vwc = ( row["calc_vwc_24_avg_qc"] if row["calc_vwc_24_avg_qc"] is not None else miss ) soil50vwc = ( row["calc_vwc_50_avg_qc"] if row["calc_vwc_50_avg_qc"] is not None else miss ) bp_mb = row["bp_mb_qc"] if row["bp_mb_qc"] is not None else -99 values.append( dict( station=station, valid=valid.strftime("%Y-%m-%d %H:%M"), tmpf=tmpf, relh=relh, solar=solar, precip=precip, speed=speed, drct=drct, et=et, soil04t=soil04t, soil12t=soil12t, soil24t=soil24t, soil50t=soil50t, soil12vwc=soil12vwc, soil24vwc=soil24vwc, soil50vwc=soil50vwc, lwmv_1=row["lwmv_1_qc"], lwmv_2=row["lwmv_2_qc"], lwmdry_1_tot=row["lwmdry_1_tot_qc"], lwmcon_1_tot=row["lwmcon_1_tot_qc"], lwmwet_1_tot=row["lwmwet_1_tot_qc"], lwmdry_2_tot=row["lwmdry_2_tot_qc"], lwmcon_2_tot=row["lwmcon_2_tot_qc"], lwmwet_2_tot=row["lwmwet_2_tot_qc"], bpres_avg=row["bpres_avg_qc"], bp_mb=bp_mb, ) ) return values, cols def application(environ, start_response): """Do things""" form = parse_formvars(environ) mode = form.get("mode", "hourly") cols = form.getall("vars") fmt = form.get("format", "csv").lower() todisk = form.get("todisk", "no") if mode == "hourly": values, cols = fetch_hourly(form, cols) else: values, cols = fetch_daily(form, cols) if not values: start_response("200 OK", [("Content-type", "text/plain")]) return [b"Sorry, no data found for this query."] df = pd.DataFrame(values) if fmt == "excel": bio = BytesIO() # pylint: disable=abstract-class-instantiated with pd.ExcelWriter(bio, engine="xlsxwriter") as writer: df.to_excel(writer, "Data", columns=cols, index=False) headers = [ ("Content-type", EXL), ("Content-disposition", "attachment; Filename=isusm.xlsx"), ] start_response("200 OK", headers) return [bio.getvalue()] delim = "," if fmt == "comma" else "\t" sio = StringIO() df.to_csv(sio, index=False, columns=cols, sep=delim) if todisk == "yes": headers = [ ("Content-type", "application/octet-stream"), ("Content-Disposition", "attachment; filename=isusm.txt"), ] else: headers = [("Content-type", "text/plain")] start_response("200 OK", headers) return [sio.getvalue().encode("ascii")]
mit
fredrikw/scipy
scipy/integrate/quadrature.py
33
28087
from __future__ import division, print_function, absolute_import import numpy as np import math import warnings # trapz is a public function for scipy.integrate, # even though it's actually a numpy function. from numpy import trapz from scipy.special.orthogonal import p_roots from scipy.special import gammaln from scipy._lib.six import xrange __all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb', 'cumtrapz', 'newton_cotes'] class AccuracyWarning(Warning): pass def _cached_p_roots(n): """ Cache p_roots results to speed up calls of the fixed_quad function. """ if n in _cached_p_roots.cache: return _cached_p_roots.cache[n] _cached_p_roots.cache[n] = p_roots(n) return _cached_p_roots.cache[n] _cached_p_roots.cache = dict() def fixed_quad(func, a, b, args=(), n=5): """ Compute a definite integral using fixed-order Gaussian quadrature. Integrate `func` from `a` to `b` using Gaussian quadrature of order `n`. Parameters ---------- func : callable A Python function or method to integrate (must accept vector inputs). a : float Lower limit of integration. b : float Upper limit of integration. args : tuple, optional Extra arguments to pass to function, if any. n : int, optional Order of quadrature integration. Default is 5. Returns ------- val : float Gaussian quadrature approximation to the integral none : None Statically returned value of None See Also -------- quad : adaptive quadrature using QUADPACK dblquad : double integrals tplquad : triple integrals romberg : adaptive Romberg quadrature quadrature : adaptive Gaussian quadrature romb : integrators for sampled data simps : integrators for sampled data cumtrapz : cumulative integration for sampled data ode : ODE integrator odeint : ODE integrator """ x, w = _cached_p_roots(n) x = np.real(x) if np.isinf(a) or np.isinf(b): raise ValueError("Gaussian quadrature is only available for " "finite limits.") y = (b-a)*(x+1)/2.0 + a return (b-a)/2.0 * np.sum(w*func(y, *args), axis=0), None def vectorize1(func, args=(), vec_func=False): """Vectorize the call to a function. This is an internal utility function used by `romberg` and `quadrature` to create a vectorized version of a function. If `vec_func` is True, the function `func` is assumed to take vector arguments. Parameters ---------- func : callable User defined function. args : tuple, optional Extra arguments for the function. vec_func : bool, optional True if the function func takes vector arguments. Returns ------- vfunc : callable A function that will take a vector argument and return the result. """ if vec_func: def vfunc(x): return func(x, *args) else: def vfunc(x): if np.isscalar(x): return func(x, *args) x = np.asarray(x) # call with first point to get output type y0 = func(x[0], *args) n = len(x) dtype = getattr(y0, 'dtype', type(y0)) output = np.empty((n,), dtype=dtype) output[0] = y0 for i in xrange(1, n): output[i] = func(x[i], *args) return output return vfunc def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50, vec_func=True, miniter=1): """ Compute a definite integral using fixed-tolerance Gaussian quadrature. Integrate `func` from `a` to `b` using Gaussian quadrature with absolute tolerance `tol`. Parameters ---------- func : function A Python function or method to integrate. a : float Lower limit of integration. b : float Upper limit of integration. args : tuple, optional Extra arguments to pass to function. tol, rtol : float, optional Iteration stops when error between last two iterates is less than `tol` OR the relative change is less than `rtol`. maxiter : int, optional Maximum order of Gaussian quadrature. vec_func : bool, optional True or False if func handles arrays as arguments (is a "vector" function). Default is True. miniter : int, optional Minimum order of Gaussian quadrature. Returns ------- val : float Gaussian quadrature approximation (within tolerance) to integral. err : float Difference between last two estimates of the integral. See also -------- romberg: adaptive Romberg quadrature fixed_quad: fixed-order Gaussian quadrature quad: adaptive quadrature using QUADPACK dblquad: double integrals tplquad: triple integrals romb: integrator for sampled data simps: integrator for sampled data cumtrapz: cumulative integration for sampled data ode: ODE integrator odeint: ODE integrator """ if not isinstance(args, tuple): args = (args,) vfunc = vectorize1(func, args, vec_func=vec_func) val = np.inf err = np.inf maxiter = max(miniter+1, maxiter) for n in xrange(miniter, maxiter+1): newval = fixed_quad(vfunc, a, b, (), n)[0] err = abs(newval-val) val = newval if err < tol or err < rtol*abs(val): break else: warnings.warn( "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err), AccuracyWarning) return val, err def tupleset(t, i, value): l = list(t) l[i] = value return tuple(l) def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None): """ Cumulatively integrate y(x) using the composite trapezoidal rule. Parameters ---------- y : array_like Values to integrate. x : array_like, optional The coordinate to integrate along. If None (default), use spacing `dx` between consecutive elements in `y`. dx : int, optional Spacing between elements of `y`. Only used if `x` is None. axis : int, optional Specifies the axis to cumulate. Default is -1 (last axis). initial : scalar, optional If given, uses this value as the first value in the returned result. Typically this value should be 0. Default is None, which means no value at ``x[0]`` is returned and `res` has one element less than `y` along the axis of integration. Returns ------- res : ndarray The result of cumulative integration of `y` along `axis`. If `initial` is None, the shape is such that the axis of integration has one less value than `y`. If `initial` is given, the shape is equal to that of `y`. See Also -------- numpy.cumsum, numpy.cumprod quad: adaptive quadrature using QUADPACK romberg: adaptive Romberg quadrature quadrature: adaptive Gaussian quadrature fixed_quad: fixed-order Gaussian quadrature dblquad: double integrals tplquad: triple integrals romb: integrators for sampled data ode: ODE integrators odeint: ODE integrators Examples -------- >>> from scipy import integrate >>> import matplotlib.pyplot as plt >>> x = np.linspace(-2, 2, num=20) >>> y = x >>> y_int = integrate.cumtrapz(y, x, initial=0) >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-') >>> plt.show() """ y = np.asarray(y) if x is None: d = dx else: x = np.asarray(x) if x.ndim == 1: d = np.diff(x) # reshape to correct shape shape = [1] * y.ndim shape[axis] = -1 d = d.reshape(shape) elif len(x.shape) != len(y.shape): raise ValueError("If given, shape of x must be 1-d or the " "same as y.") else: d = np.diff(x, axis=axis) if d.shape[axis] != y.shape[axis] - 1: raise ValueError("If given, length of x along axis must be the " "same as y.") nd = len(y.shape) slice1 = tupleset((slice(None),)*nd, axis, slice(1, None)) slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1)) res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis) if initial is not None: if not np.isscalar(initial): raise ValueError("`initial` parameter should be a scalar.") shape = list(res.shape) shape[axis] = 1 res = np.concatenate([np.ones(shape, dtype=res.dtype) * initial, res], axis=axis) return res def _basic_simps(y, start, stop, x, dx, axis): nd = len(y.shape) if start is None: start = 0 step = 2 slice_all = (slice(None),)*nd slice0 = tupleset(slice_all, axis, slice(start, stop, step)) slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step)) if x is None: # Even spaced Simpson's rule. result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]), axis=axis) else: # Account for possibly different spacings. # Simpson's rule changes a bit. h = np.diff(x, axis=axis) sl0 = tupleset(slice_all, axis, slice(start, stop, step)) sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) h0 = h[sl0] h1 = h[sl1] hsum = h0 + h1 hprod = h0 * h1 h0divh1 = h0 / h1 tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) + y[slice1]*hsum*hsum/hprod + y[slice2]*(2-h0divh1)) result = np.sum(tmp, axis=axis) return result def simps(y, x=None, dx=1, axis=-1, even='avg'): """ Integrate y(x) using samples along the given axis and the composite Simpson's rule. If x is None, spacing of dx is assumed. If there are an even number of samples, N, then there are an odd number of intervals (N-1), but Simpson's rule requires an even number of intervals. The parameter 'even' controls how this is handled. Parameters ---------- y : array_like Array to be integrated. x : array_like, optional If given, the points at which `y` is sampled. dx : int, optional Spacing of integration points along axis of `y`. Only used when `x` is None. Default is 1. axis : int, optional Axis along which to integrate. Default is the last axis. even : {'avg', 'first', 'str'}, optional 'avg' : Average two results:1) use the first N-2 intervals with a trapezoidal rule on the last interval and 2) use the last N-2 intervals with a trapezoidal rule on the first interval. 'first' : Use Simpson's rule for the first N-2 intervals with a trapezoidal rule on the last interval. 'last' : Use Simpson's rule for the last N-2 intervals with a trapezoidal rule on the first interval. See Also -------- quad: adaptive quadrature using QUADPACK romberg: adaptive Romberg quadrature quadrature: adaptive Gaussian quadrature fixed_quad: fixed-order Gaussian quadrature dblquad: double integrals tplquad: triple integrals romb: integrators for sampled data cumtrapz: cumulative integration for sampled data ode: ODE integrators odeint: ODE integrators Notes ----- For an odd number of samples that are equally spaced the result is exact if the function is a polynomial of order 3 or less. If the samples are not equally spaced, then the result is exact only if the function is a polynomial of order 2 or less. """ y = np.asarray(y) nd = len(y.shape) N = y.shape[axis] last_dx = dx first_dx = dx returnshape = 0 if x is not None: x = np.asarray(x) if len(x.shape) == 1: shapex = [1] * nd shapex[axis] = x.shape[0] saveshape = x.shape returnshape = 1 x = x.reshape(tuple(shapex)) elif len(x.shape) != len(y.shape): raise ValueError("If given, shape of x must be 1-d or the " "same as y.") if x.shape[axis] != N: raise ValueError("If given, length of x along axis must be the " "same as y.") if N % 2 == 0: val = 0.0 result = 0.0 slice1 = (slice(None),)*nd slice2 = (slice(None),)*nd if even not in ['avg', 'last', 'first']: raise ValueError("Parameter 'even' must be " "'avg', 'last', or 'first'.") # Compute using Simpson's rule on first intervals if even in ['avg', 'first']: slice1 = tupleset(slice1, axis, -1) slice2 = tupleset(slice2, axis, -2) if x is not None: last_dx = x[slice1] - x[slice2] val += 0.5*last_dx*(y[slice1]+y[slice2]) result = _basic_simps(y, 0, N-3, x, dx, axis) # Compute using Simpson's rule on last set of intervals if even in ['avg', 'last']: slice1 = tupleset(slice1, axis, 0) slice2 = tupleset(slice2, axis, 1) if x is not None: first_dx = x[tuple(slice2)] - x[tuple(slice1)] val += 0.5*first_dx*(y[slice2]+y[slice1]) result += _basic_simps(y, 1, N-2, x, dx, axis) if even == 'avg': val /= 2.0 result /= 2.0 result = result + val else: result = _basic_simps(y, 0, N-2, x, dx, axis) if returnshape: x = x.reshape(saveshape) return result def romb(y, dx=1.0, axis=-1, show=False): """ Romberg integration using samples of a function. Parameters ---------- y : array_like A vector of ``2**k + 1`` equally-spaced samples of a function. dx : float, optional The sample spacing. Default is 1. axis : int, optional The axis along which to integrate. Default is -1 (last axis). show : bool, optional When `y` is a single 1-D array, then if this argument is True print the table showing Richardson extrapolation from the samples. Default is False. Returns ------- romb : ndarray The integrated result for `axis`. See also -------- quad : adaptive quadrature using QUADPACK romberg : adaptive Romberg quadrature quadrature : adaptive Gaussian quadrature fixed_quad : fixed-order Gaussian quadrature dblquad : double integrals tplquad : triple integrals simps : integrators for sampled data cumtrapz : cumulative integration for sampled data ode : ODE integrators odeint : ODE integrators """ y = np.asarray(y) nd = len(y.shape) Nsamps = y.shape[axis] Ninterv = Nsamps-1 n = 1 k = 0 while n < Ninterv: n <<= 1 k += 1 if n != Ninterv: raise ValueError("Number of samples must be one plus a " "non-negative power of 2.") R = {} slice_all = (slice(None),) * nd slice0 = tupleset(slice_all, axis, 0) slicem1 = tupleset(slice_all, axis, -1) h = Ninterv * np.asarray(dx, dtype=float) R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h slice_R = slice_all start = stop = step = Ninterv for i in xrange(1, k+1): start >>= 1 slice_R = tupleset(slice_R, axis, slice(start, stop, step)) step >>= 1 R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis)) for j in xrange(1, i+1): prev = R[(i, j-1)] R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1) h /= 2.0 if show: if not np.isscalar(R[(0, 0)]): print("*** Printing table only supported for integrals" + " of a single data set.") else: try: precis = show[0] except (TypeError, IndexError): precis = 5 try: width = show[1] except (TypeError, IndexError): width = 8 formstr = "%%%d.%df" % (width, precis) title = "Richardson Extrapolation Table for Romberg Integration" print("", title.center(68), "=" * 68, sep="\n", end="") for i in xrange(k+1): for j in xrange(i+1): print(formstr % R[(i, j)], end=" ") print() print("=" * 68) print() return R[(k, k)] # Romberg quadratures for numeric integration. # # Written by Scott M. Ransom <ransom@cfa.harvard.edu> # last revision: 14 Nov 98 # # Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr> # last revision: 1999-7-21 # # Adapted to scipy by Travis Oliphant <oliphant.travis@ieee.org> # last revision: Dec 2001 def _difftrap(function, interval, numtraps): """ Perform part of the trapezoidal rule to integrate a function. Assume that we had called difftrap with all lower powers-of-2 starting with 1. Calling difftrap only returns the summation of the new ordinates. It does _not_ multiply by the width of the trapezoids. This must be performed by the caller. 'function' is the function to evaluate (must accept vector arguments). 'interval' is a sequence with lower and upper limits of integration. 'numtraps' is the number of trapezoids to use (must be a power-of-2). """ if numtraps <= 0: raise ValueError("numtraps must be > 0 in difftrap().") elif numtraps == 1: return 0.5*(function(interval[0])+function(interval[1])) else: numtosum = numtraps/2 h = float(interval[1]-interval[0])/numtosum lox = interval[0] + 0.5 * h points = lox + h * np.arange(numtosum) s = np.sum(function(points), axis=0) return s def _romberg_diff(b, c, k): """ Compute the differences for the Romberg quadrature corrections. See Forman Acton's "Real Computing Made Real," p 143. """ tmp = 4.0**k return (tmp * c - b)/(tmp - 1.0) def _printresmat(function, interval, resmat): # Print the Romberg result matrix. i = j = 0 print('Romberg integration of', repr(function), end=' ') print('from', interval) print('') print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results')) for i in xrange(len(resmat)): print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ') for j in xrange(i+1): print('%9f' % (resmat[i][j]), end=' ') print('') print('') print('The final result is', resmat[i][j], end=' ') print('after', 2**(len(resmat)-1)+1, 'function evaluations.') def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False, divmax=10, vec_func=False): """ Romberg integration of a callable function or method. Returns the integral of `function` (a function of one variable) over the interval (`a`, `b`). If `show` is 1, the triangular array of the intermediate results will be printed. If `vec_func` is True (default is False), then `function` is assumed to support vector arguments. Parameters ---------- function : callable Function to be integrated. a : float Lower limit of integration. b : float Upper limit of integration. Returns ------- results : float Result of the integration. Other Parameters ---------------- args : tuple, optional Extra arguments to pass to function. Each element of `args` will be passed as a single argument to `func`. Default is to pass no extra arguments. tol, rtol : float, optional The desired absolute and relative tolerances. Defaults are 1.48e-8. show : bool, optional Whether to print the results. Default is False. divmax : int, optional Maximum order of extrapolation. Default is 10. vec_func : bool, optional Whether `func` handles arrays as arguments (i.e whether it is a "vector" function). Default is False. See Also -------- fixed_quad : Fixed-order Gaussian quadrature. quad : Adaptive quadrature using QUADPACK. dblquad : Double integrals. tplquad : Triple integrals. romb : Integrators for sampled data. simps : Integrators for sampled data. cumtrapz : Cumulative integration for sampled data. ode : ODE integrator. odeint : ODE integrator. References ---------- .. [1] 'Romberg's method' http://en.wikipedia.org/wiki/Romberg%27s_method Examples -------- Integrate a gaussian from 0 to 1 and compare to the error function. >>> from scipy import integrate >>> from scipy.special import erf >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2) >>> result = integrate.romberg(gaussian, 0, 1, show=True) Romberg integration of <function vfunc at ...> from [0, 1] :: Steps StepSize Results 1 1.000000 0.385872 2 0.500000 0.412631 0.421551 4 0.250000 0.419184 0.421368 0.421356 8 0.125000 0.420810 0.421352 0.421350 0.421350 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350 The final result is 0.421350396475 after 33 function evaluations. >>> print("%g %g" % (2*result, erf(1))) 0.842701 0.842701 """ if np.isinf(a) or np.isinf(b): raise ValueError("Romberg integration only available " "for finite limits.") vfunc = vectorize1(function, args, vec_func=vec_func) n = 1 interval = [a, b] intrange = b - a ordsum = _difftrap(vfunc, interval, n) result = intrange * ordsum resmat = [[result]] err = np.inf last_row = resmat[0] for i in xrange(1, divmax+1): n *= 2 ordsum += _difftrap(vfunc, interval, n) row = [intrange * ordsum / n] for k in xrange(i): row.append(_romberg_diff(last_row[k], row[k], k+1)) result = row[i] lastresult = last_row[i-1] if show: resmat.append(row) err = abs(result - lastresult) if err < tol or err < rtol * abs(result): break last_row = row else: warnings.warn( "divmax (%d) exceeded. Latest difference = %e" % (divmax, err), AccuracyWarning) if show: _printresmat(vfunc, interval, resmat) return result # Coefficients for Netwon-Cotes quadrature # # These are the points being used # to construct the local interpolating polynomial # a are the weights for Newton-Cotes integration # B is the error coefficient. # error in these coefficients grows as N gets larger. # or as samples are closer and closer together # You can use maxima to find these rational coefficients # for equally spaced data using the commands # a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i); # Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N)); # Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N)); # B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N)); # # pre-computed for equally-spaced weights # # num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N] # # a = num_a*array(int_a)/den_a # B = num_B*1.0 / den_B # # integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*) # where k = N // 2 # _builtincoeffs = { 1: (1,2,[1,1],-1,12), 2: (1,3,[1,4,1],-1,90), 3: (3,8,[1,3,3,1],-3,80), 4: (2,45,[7,32,12,32,7],-8,945), 5: (5,288,[19,75,50,50,75,19],-275,12096), 6: (1,140,[41,216,27,272,27,216,41],-9,1400), 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400), 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989], -2368,467775), 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080, 15741,2857], -4671, 394240), 10: (5,299376,[16067,106300,-48525,272400,-260550,427368, -260550,272400,-48525,106300,16067], -673175, 163459296), 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542, 15493566,15493566,-9595542,25226685,-3237113, 13486539,2171465], -2224234463, 237758976000), 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295, 87516288,-87797136,87516288,-51491295,35725120, -7587864,9903168,1364651], -3012, 875875), 13: (13, 402361344000,[8181904909, 56280729661, -31268252574, 156074417954,-151659573325,206683437987, -43111992612,-43111992612,206683437987, -151659573325,156074417954,-31268252574, 56280729661,8181904909], -2639651053, 344881152000), 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784, -6625093363,12630121616,-16802270373,19534438464, -16802270373,12630121616,-6625093363,3501442784, -770720657,710986864,90241897], -3740727473, 1275983280000) } def newton_cotes(rn, equal=0): """ Return weights and error coefficient for Newton-Cotes integration. Suppose we have (N+1) samples of f at the positions x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the integral between x_0 and x_N is: :math:`\\int_{x_0}^{x_N} f(x)dx = \\Delta x \\sum_{i=0}^{N} a_i f(x_i) + B_N (\\Delta x)^{N+2} f^{N+1} (\\xi)` where :math:`\\xi \\in [x_0,x_N]` and :math:`\\Delta x = \\frac{x_N-x_0}{N}` is the average samples spacing. If the samples are equally-spaced and N is even, then the error term is :math:`B_N (\\Delta x)^{N+3} f^{N+2}(\\xi)`. Parameters ---------- rn : int The integer order for equally-spaced data or the relative positions of the samples with the first sample at 0 and the last at N, where N+1 is the length of `rn`. N is the order of the Newton-Cotes integration. equal : int, optional Set to 1 to enforce equally spaced data. Returns ------- an : ndarray 1-D array of weights to apply to the function at the provided sample positions. B : float Error coefficient. Notes ----- Normally, the Newton-Cotes rules are used on smaller integration regions and a composite rule is used to return the total integral. """ try: N = len(rn)-1 if equal: rn = np.arange(N+1) elif np.all(np.diff(rn) == 1): equal = 1 except: N = rn rn = np.arange(N+1) equal = 1 if equal and N in _builtincoeffs: na, da, vi, nb, db = _builtincoeffs[N] an = na * np.array(vi, dtype=float) / da return an, float(nb)/db if (rn[0] != 0) or (rn[-1] != N): raise ValueError("The sample positions must start at 0" " and end at N") yi = rn / float(N) ti = 2 * yi - 1 nvec = np.arange(N+1) C = ti ** nvec[:, np.newaxis] Cinv = np.linalg.inv(C) # improve precision of result for i in range(2): Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv) vec = 2.0 / (nvec[::2]+1) ai = Cinv[:, ::2].dot(vec) * (N / 2.) if (N % 2 == 0) and equal: BN = N/(N+3.) power = N+2 else: BN = N/(N+2.) power = N+1 BN = BN - np.dot(yi**power, ai) p1 = power+1 fac = power*math.log(N) - gammaln(p1) fac = math.exp(fac) return ai, BN*fac
bsd-3-clause
aswolf/xmeos
xmeos/test/test_models_thermal.py
1
5279
from __future__ import absolute_import, print_function, division, with_statement from builtins import object import numpy as np import xmeos from xmeos import models from xmeos.models import core import pytest import matplotlib.pyplot as plt import matplotlib as mpl from abc import ABCMeta, abstractmethod import copy import test_models try: import cPickle as pickle except: import pickle #==================================================================== # Define "slow" tests # - indicated by @slow decorator # - slow tests are run only if using --runslow cmd line arg #==================================================================== slow = pytest.mark.skipif( not pytest.config.getoption("--runslow"), reason="need --runslow option to run" ) #==================================================================== class BaseTestThermalEos(test_models.BaseTestEos): def test_heat_capacity_0K(self): self.calc_test_heat_capacity(T0=0) def test_heat_capacity_300K(self): self.calc_test_heat_capacity(T0=300) def test_heat_capacity_3000K(self): self.calc_test_heat_capacity(T0=3000) def test_entropy_0K(self): self.calc_test_entropy(T0=0) def test_entropy_300K(self): self.calc_test_entropy(T0=300) def test_entropy_3000K(self): self.calc_test_entropy(T0=3000) def calc_test_heat_capacity(self, T0=0): TOL = 1e-3 Nsamp = 10001 eos_mod = self.load_eos(T0=T0) Tmod_a = np.linspace(300.0, 3000.0, Nsamp) dT = Tmod_a[1] - Tmod_a[0] assert eos_mod.energy(T0)==0, 'Energy must be zero at T0.' energy_a = eos_mod.energy(Tmod_a) heat_capacity_a = eos_mod.heat_capacity(Tmod_a) abs_err, rel_err, range_err = self.numerical_deriv( Tmod_a, energy_a, heat_capacity_a, scale=1) assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \ ', must be less than TOL, ' + np.str(TOL) def calc_test_entropy(self, T0=0): TOL = 1e-3 Nsamp = 10001 eos_mod = self.load_eos(T0=T0) Tmod_a = np.linspace(300.0, 3000.0, Nsamp) dT = Tmod_a[1] - Tmod_a[0] assert eos_mod.entropy(T0)==0, 'Entropy must be zero at T0.' entropy_a = eos_mod.entropy(Tmod_a) heat_capacity_a = eos_mod.heat_capacity(Tmod_a) abs_err, rel_err, range_err = self.numerical_deriv( Tmod_a, entropy_a, heat_capacity_a, scale=Tmod_a) assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \ ', must be less than TOL, ' + np.str(TOL) #==================================================================== #==================================================================== # SEC:2 Implimented Test Clases #==================================================================== class TestDebye(BaseTestThermalEos): def load_eos(self, T0=0): # add T0 natom=10 eos_mod = models.ThermalEos(kind='Debye', natom=natom) refstate_calc = eos_mod.calculators['refstate'] refstate = refstate_calc.ref_state refstate['T0'] = T0 # eos_mod.set_param_values(param_names=['T0'], param_values=[T0]) return eos_mod #==================================================================== class TestEinstein(BaseTestThermalEos): def load_eos(self, T0=0): natom=10 eos_mod = models.ThermalEos(kind='Einstein', natom=natom) refstate_calc = eos_mod.calculators['refstate'] refstate = refstate_calc.ref_state refstate['T0'] = T0 # eos_mod.set_param_values(param_names=['T0'], param_values=[T0]) return eos_mod #==================================================================== class TestGenRosenfeldTarazona(BaseTestThermalEos): def load_eos(self, T0=0): natom=10 eos_mod = models.ThermalEos(kind='GenRosenfeldTarazona', natom=natom) refstate_calc = eos_mod.calculators['refstate'] refstate = refstate_calc.ref_state refstate['T0'] = T0 # eos_mod.set_param_values(param_names=['T0'], param_values=[T0]) return eos_mod def test_heat_capacity_0K(self): pass def test_entropy_0K(self): pass #==================================================================== # 2.2: ThermalPathMod Tests #==================================================================== # class TestGenRosenfeldTaranzona(BaseTestThermalPathMod): # def load_thermal_path_mod(self, eos_d): # thermal_path_mod = thermal.GenRosenfeldTaranzona(path_const='V') # core.set_modtypes( ['ThermalPathMod'], [thermal_path_mod], eos_d ) # # pass # # def init_params(self,eos_d): # # Set model parameter values # acoef = -158.2 # bcoef = .042 # mexp = 3/5 # lognfac = 0.0 # T0 = 5000.0 # # param_key_a = ['acoef','bcoef','mexp','lognfac','T0'] # param_val_a = np.array([acoef,bcoef,mexp,lognfac,T0]) # # core.set_consts( [], [], eos_d ) # self.load_thermal_path_mod( eos_d ) # # core.set_params( param_key_a, param_val_a, eos_d ) # # return eos_d #====================================================================
mit
yyjiang/scikit-learn
examples/model_selection/plot_train_error_vs_test_error.py
349
2577
""" ========================= Train error vs Test error ========================= Illustration of how the performance of an estimator on unseen data (test data) is not the same as the performance on training data. As the regularization increases the performance on train decreases while the performance on test is optimal within a range of values of the regularization parameter. The example with an Elastic-Net regression model and the performance is measured using the explained variance a.k.a. R^2. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause import numpy as np from sklearn import linear_model ############################################################################### # Generate sample data n_samples_train, n_samples_test, n_features = 75, 150, 500 np.random.seed(0) coef = np.random.randn(n_features) coef[50:] = 0.0 # only the top 10 features are impacting the model X = np.random.randn(n_samples_train + n_samples_test, n_features) y = np.dot(X, coef) # Split train and test data X_train, X_test = X[:n_samples_train], X[n_samples_train:] y_train, y_test = y[:n_samples_train], y[n_samples_train:] ############################################################################### # Compute train and test errors alphas = np.logspace(-5, 1, 60) enet = linear_model.ElasticNet(l1_ratio=0.7) train_errors = list() test_errors = list() for alpha in alphas: enet.set_params(alpha=alpha) enet.fit(X_train, y_train) train_errors.append(enet.score(X_train, y_train)) test_errors.append(enet.score(X_test, y_test)) i_alpha_optim = np.argmax(test_errors) alpha_optim = alphas[i_alpha_optim] print("Optimal regularization parameter : %s" % alpha_optim) # Estimate the coef_ on full data with optimal regularization parameter enet.set_params(alpha=alpha_optim) coef_ = enet.fit(X, y).coef_ ############################################################################### # Plot results functions import matplotlib.pyplot as plt plt.subplot(2, 1, 1) plt.semilogx(alphas, train_errors, label='Train') plt.semilogx(alphas, test_errors, label='Test') plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k', linewidth=3, label='Optimum on test') plt.legend(loc='lower left') plt.ylim([0, 1.2]) plt.xlabel('Regularization parameter') plt.ylabel('Performance') # Show estimated coef_ vs true coef plt.subplot(2, 1, 2) plt.plot(coef, label='True coef') plt.plot(coef_, label='Estimated coef') plt.legend() plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26) plt.show()
bsd-3-clause
jmontgom10/Mimir_pyPol
02b_buildStarAndNebulaMasks.py
1
19696
import os import glob import numpy as np import warnings from skimage import measure, morphology from scipy import ndimage from astropy.table import Table, Column import astropy.units as u from astropy.coordinates import SkyCoord from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel from astroquery.vizier import Vizier # For debugging import matplotlib.pyplot as plt # Add the AstroImage class import astroimage as ai # Add the header handler to the BaseImage class from Mimir_header_handler import Mimir_header_handler ai.set_instrument('2MASS') # This script will read in the background level estimated for each on-target # image in the previous step. The background level in dimmest parts of the # on-target image will be directly computed, and the residual between the direct # estimate and the interpolation will be stored. The distribution of these # residual will be used to estimate which interpolated background levels can be # trusted. #============================================================================== # *********************** CUSTOM USER CODE ************************************ # this is where the user specifies where the raw data is stored # and some of the subdirectory structure to find the actual .FITS images #============================================================================== # This is the location of all PPOL reduction directory PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_Reduced\\201611' # Build the path to the S3_Asotremtry files S3_dir = os.path.join(PPOL_dir, 'S3_Astrometry') # This is the location where all pyPol data will be saved pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611' # This is the directory where the 2MASS tiles of the targets have been saved # Go to "http://hachi.ipac.caltech.edu/" to download 2MASS tiles TMASSdir = ".\\2MASSimages" # Setup new directory for masks maskDir = os.path.join(pyPol_data, 'Masks') if (not os.path.isdir(maskDir)): os.mkdir(maskDir, 0o755) starMaskDir = os.path.join(maskDir, 'starMasks') if (not os.path.isdir(starMaskDir)): os.mkdir(starMaskDir, 0o755) # Read in the indexFile data and select the filenames indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv') fileIndex = Table.read(indexFile, format='csv') useRows = np.where(fileIndex['USE']) fileIndex = fileIndex[useRows] ################################################################################ def find_2MASS_flux(array): # Identify which pixels have acceptable "background" levels. Start by # grabbing the image statistics mean, median, stddev = sigma_clipped_stats(array) # Idesntify pixels more than 2-sigma above the background fgdThresh = median + 2.0*stddev fgdRegion = array > fgdThresh # Repeat the classification withiout the *definitely* nebular pixels bkgPix = np.logical_not(fgdRegion) mean, median, stddev = sigma_clipped_stats(array[bkgPix]) fgdThresh = median + 2.0*stddev fgdRegion = array > fgdThresh # Clean the foreground ID region all_labels = measure.label(fgdRegion) all_labels1 = morphology.remove_small_objects(all_labels, min_size=50) fgdRegion = all_labels1 > 0 # Grab the part *only* connected to the central, nebular region ny, nx = fgdRegion.shape all_labels = measure.label(fgdRegion) nebularLabel = all_labels[ny//2, nx//2] nebularMask = all_labels == nebularLabel starMask = np.logical_and( all_labels > 0, all_labels != nebularLabel ) all_labels = measure.label(starMask) all_labels1 = morphology.remove_small_objects(all_labels, min_size=50) starMask = all_labels1 > 0 # Dilate a TOOON to be conservatine... nebularSigma = 20.0 * gaussian_fwhm_to_sigma # FWHM = 3.0 # Build a kernel for detecting pixels above the threshold nebularKernel = Gaussian2DKernel(nebularSigma, x_size=41, y_size=41) nebularKernel.normalize() nebularMask = convolve_fft( nebularMask.astype(float), nebularKernel.array ) nebularMask = (nebularMask > 0.01) # Expand a second time to be conservative nebularMask = convolve_fft( nebularMask.astype(float), nebularKernel.array ) nebularMask = (nebularMask > 0.01) # Do a less aggressive dilation of the stellar mask stellarSigma = 10.0 * gaussian_fwhm_to_sigma # FWHM = 3.0 # Build a kernel for detecting pixels above the threshold stellarKernel = Gaussian2DKernel(stellarSigma, x_size=41, y_size=41) stellarKernel.normalize() stellarMask = convolve_fft( fgdRegion.astype(float), stellarKernel.array ) stellarMask = (stellarMask > 0.01) # Recombine the nebular and stellar components fgdRegion = np.logical_or(nebularMask, stellarMask) # Return the flux-bright pixels to the user return fgdRegion ################################################################################ # Read in the Kokopelli Mask kokopelliMask = ai.reduced.ReducedScience.read('kokopelliMask.fits') # Dilate the mask in order to be more conservative. kokopelliMask.data = ndimage.binary_dilation(kokopelliMask.data, iterations=8).astype(int) # Construct the 2MASS masks and save to disk # Read in all the 2MASS images and store them in a dictionary for quick reference TMASS_Hfiles = np.array(glob.glob(os.path.join(TMASSdir, '*H.fits'))) TMASS_Kfiles = np.array(glob.glob(os.path.join(TMASSdir, '*Ks.fits'))) # Read in the 2MASS images TMASS_HimgList = [ai.reduced.ReducedScience.read(f) for f in TMASS_Hfiles] TMASS_KimgList = [ai.reduced.ReducedScience.read(f) for f in TMASS_Kfiles] # Convert the images to "background masks" for img in TMASS_HimgList: # Construct the output name for this mask base = os.path.basename(img.filename) targetMask = base.split('.')[0] + '_mask.fits' outFilename = os.path.join(TMASSdir, targetMask) # Skip files that have already been done if os.path.isfile(outFilename): continue print('Building background mask for {}'.format(os.path.basename(img.filename))) tmp = img.copy() tmp.data = find_2MASS_flux(img.data).astype(int) tmp.write(outFilename, dtype=np.uint8) for img in TMASS_KimgList: # Construct the output name for this mask base = os.path.basename(img.filename) targetMask = base.split('.')[0] + '_mask.fits' outFilename = os.path.join(TMASSdir, targetMask) # Skip files that have already been done if os.path.isfile(outFilename): continue print('Building background mask for {}'.format(os.path.basename(img.filename))) tmp = img.copy() tmp.data = find_2MASS_flux(img.data).astype(int) tmp.write(outFilename, dtype=np.uint8) # Now that the 2MASS files have been read in, it is safe to set the Mimir_header_handler ai.set_instrument('Mimir') ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler) # Set the Vizier download size to be unlimited Vizier.ROW_LIMIT = -1 # Group by target HWP groupedFileIndex = fileIndex.group_by(['GROUP_ID', 'HWP']) #Loop through each file in the fileList variable numberOfFiles = len(fileIndex) bkgLevels = fileIndex['BACKGROUND'] print('{0:3.1%} complete'.format(0), end='\r') iRow = 0 for group in groupedFileIndex.groups: # Increment the row counter iRow += len(group) # Grab the relevant information for this group thisTarget = np.unique(group['TARGET'])[0] thisFilter = np.unique(group['FILTER'])[0] # Re-group by dither pointing ABBAsubGroups = group.group_by(['AB']) for ABBAgroup in ABBAsubGroups.groups: # Grab the relevant information for this subgroup thisAB = np.unique(ABBAgroup['AB'])[0] # If this is an on-target (A) subgroup, then skip it! if thisAB == 'A': continue # Grab the off-target files Bfiles = [] maskFilenames = [] for thisFile in ABBAgroup['FILENAME']: # Append the B-file to use Bfiles.append(os.path.join(S3_dir, thisFile)) # BUild the mask name maskBasename = os.path.basename(thisFile) maskFilenames.append(os.path.join(starMaskDir, maskBasename)) # Check if the file has already been written and skip those which have been if all([os.path.isfile(f) for f in maskFilenames]): continue # Read in the off-target frames Bimgs = [ai.reduced.ReducedScience.read(f) for f in Bfiles] numBimgs = len(Bimgs) if numBimgs > 1: # Combine the images to get a quick map of the region to download BimgStack = ai.utilitywrappers.ImageStack(Bimgs, gobble=False) BimgStack.align_images_with_wcs() # Determine the boundaries of the region to download 2MASS data referenceImage = BimgStack.imageList[0] else: referenceImage = Bimgs[0] # Get the image shape and coordinates ny, nx = referenceImage.shape lfrt, bttp = referenceImage.wcs.wcs_pix2world([0, ny], [0, nx], 0) lf, rt = lfrt bt, tp = bttp # Grab the maximum width and the median (RA, Dec) RAcen, DecCen = 0.5*(lf + rt), 0.5*(bt + tp) height = (tp - bt)*u.deg width = (lf - rt)*np.cos(np.deg2rad(DecCen))*u.deg with warnings.catch_warnings(): warnings.simplefilter("ignore") # Download the 2MASS point source catalog tmassCatalog = Vizier.query_region( SkyCoord( ra=RAcen, dec=DecCen, unit=(u.deg, u.deg), frame='fk5' ), width=width, height=height, catalog='II/246/out' )[0] # Cut off low SNR detections tmassFilter = referenceImage.filter[0] tmassSNR = tmassCatalog[tmassFilter+'snr'] goodDetections = np.logical_and( tmassSNR.data.data > 5.0, np.logical_not(tmassSNR.mask) ) goodDetections = np.logical_and( goodDetections, np.logical_not(tmassCatalog[tmassFilter+'mag'].mask) ) # Cull the bad data tmassCatalog = tmassCatalog[goodDetections] # Grab the RAs, Decs, and magnitudes RAs, Decs = tmassCatalog['_RAJ2000'], tmassCatalog['_DEJ2000'] mags = tmassCatalog[tmassFilter+'mag'] # Loop through each file and build the preliminary mask starMasks = [] for thisImg in Bimgs: # # Read in the image and store it for possible later use # thisImg = ai.reduced.ReducedScience.read(Bfile) # # # Attempt to recover a background estimate. If not, then just fill with -1e6 # # Locate the non-stellar pixels in this image # photAnalyzer = ai.utilitywrappers.PhotometryAnalyzer(thisImg) # try: # _, psfParams = photAnalyzer.get_psf() # FWHM = 2.355*np.sqrt(psfParams['sminor']*psfParams['smajor']) # except: # FWHM = 4.5 # xs, ys = thisImg.get_sources(FWHMguess = FWHM, minimumSNR = 3.5, # satLimit = 1e20, edgeLimit = 21) # starFluxes, fluxUncerts = photAnalyzer.aperture_photometry( # xs, ys, FWHM, 24, 26, mask=(thisImg.data < -1e4) # ) # # # Catch bad stars # kokopelliArtifacts = kokopelliMask.data[ys.round().astype(int), xs.round().astype(int)] # # # Look through the stars in Kokopelly and determine which are *real* # realStars = (kokopelliArtifacts.astype(int)*starFluxes > 4e3) # kokopelliArtifacts = np.logical_and( # kokopelliArtifacts, # np.logical_not(realStars) # ) # # # Only keep those stars which are not kokopilli artifacts # goodInds = np.where( # np.logical_and( # starFluxes > 0, # np.logical_not(kokopelliArtifacts) # ) # ) # xs = xs[goodInds] # ys = ys[goodInds] # starFluxes = starFluxes[goodInds] # Now simply mask *any* of the stars downloaded xs, ys = thisImg.wcs.wcs_world2pix(RAs, Decs, 0) starRadii = 35 - 1.5*mags # Loop through each star and make its mask ny, nx = thisImg.shape yy, xx = np.mgrid[0:ny, 0:nx] starMask = False for xs1, ys1, rs in zip(xs, ys, starRadii): if not np.isfinite(rs): import pdb; # Compute the distances from this star # Mask any pixels within 1 radius of this star starMask = np.logical_or( starMask, np.sqrt((xx - xs1)**2 + (yy - ys1)**2) < rs ) # Store the mask for later use starMasks.append(starMask) # # If more than one image exists in this group, then do a secondary pass to # # locate the dimmer stars # numBimgs = len(Bimgs) # if numBimgs == 1: # # Grab the available Bimg # tmpImg = Bimgs[0].copy() # # # Smooth the data to look for lower SNR stars # tmpImg.data = ndimage.median_filter(tmpImg.data, 3) # # # Construct a temporary image to do another pass as the low SNR stars # tmpFWHM = np.sqrt(FWHM**2 + (0.5*3)**2) # xs, ys = tmpImg.get_sources(FWHMguess = tmpFWHM, minimumSNR = 2.5, # satLimit = 1e20, edgeLimit = 21) # starFluxes, fluxUncerts = photAnalyzer.aperture_photometry( # xs, ys, tmpFWHM, 24, 26, mask=(tmpImg.data < -1e4) # ) # # # Catch bad stars # kokopelliArtifacts = kokopelliMask.data[ys.round().astype(int), xs.round().astype(int)] # # # Look through the stars in Kokopelly and determine which are *real* # realStars = (kokopelliArtifacts.astype(int)*starFluxes > 4e3) # kokopelliArtifacts = np.logical_and( # kokopelliArtifacts, # np.logical_not(realStars) # ) # # # Only keep those stars which are not kokopilli artifacts # goodInds = np.where( # np.logical_and( # starFluxes > 0, # np.logical_not(kokopelliArtifacts) # ) # ) # xs = xs[goodInds] # ys = ys[goodInds] # starFluxes = starFluxes[goodInds] # starRadii = 5*np.log10(starFluxes) # # # Loop through each star and make its mask # ny, nx = thisImg.shape # yy, xx = np.mgrid[0:ny, 0:nx] # starMask = starMasks[0] # for xs1, ys1, rs in zip(xs, ys, starRadii): # if not np.isfinite(rs): import pdb; # # Compute the distances from this star # # Mask any pixels within 1 radius of this star # starMask = np.logical_or( # starMask, # np.sqrt((xx - xs1)**2 + (yy - ys1)**2) < rs # ) # # # Store the mask for later use # starMasks[0] = starMask # # elif numBimgs > 1: # # Loop through each # for iImg in range(numBimgs): # # Determine which image is the primary image and which is secondary # if iImg == 0: # thisImg = Bimgs[0] # otherImg = Bimgs[1] # elif iImg == 1: # thisImg = Bimgs[1] # otherImg = Bimgs[0] # else: # print('What?! How did you even get here?!') # import pdb; pdb.set_trace() # # # Grab the corresponding mask # thisMask = starMasks[iImg] # # # Subtract the two images from eachother # diffData = otherImg.data - thisImg.data # # # Smooth the difference image # median9Data = ndimage.median_filter(diffData, 9) # # # LOOK FOR DIVOTS IN GENERAL MEDIAN FILTERED IMAGE # # Locate pixels less than negative 2-sigma # mean9, median9, stddev9 = sigma_clipped_stats(median9Data) # starDivots = np.nan_to_num(median9Data) < (mean9 -4*stddev9) # # # Remove anything that is smaller than 20 pixels # all_labels = measure.label(starDivots) # all_labels1 = morphology.remove_small_objects(all_labels, min_size=20) # label_hist, label_bins = np.histogram( # all_labels1, # bins=np.arange(all_labels1.max() - all_labels1.min()) # ) # label_mode = label_bins[label_hist.argmax()] # starDivots = all_labels1 != label_mode # # # Remove any pixels along extreme top # starDivots[ny-10:ny,:] = False # # # Dialate the starDivots mask # stellarSigma = 5.0 * gaussian_fwhm_to_sigma # FWHM = 3.0 # # # Build a kernel for detecting pixels above the threshold # stellarKernel = Gaussian2DKernel(stellarSigma, x_size=41, y_size=41) # stellarKernel.normalize() # starDivots = convolve_fft( # starDivots.astype(float), # stellarKernel.array # ) # starDivots = (starDivots > 0.01) # # # Compbine the divots mask and the original mask # fullMask = np.logical_or(thisMask, starDivots) # # # Store the mask back in its list # starMasks[iImg] = ai.reduced.ReducedScience(fullMask.astype(int)) # # # Do a finel loop-through to make sure there is as much agreement between # # the two masks as possible # if numBimgs > 1: # # Construct an image stack and compute image offsets # BimgStack = ai.utilitywrappers.ImageStack(Bimgs) # dx, dy = BimgStack.get_wcs_offsets(BimgStack) # # try: # starMask0 = starMasks[0].copy() # starMask1 = starMasks[1].copy() # except: # print('Why are there not 2 starMasks?') # import pdb; pdb.set_trace() # # for iMask in range(numBimgs): # # Determine which image is the primary image and which is secondary # if iMask == 0: # dx1 = dx[1] - dx[0] # dy1 = dy[1] - dy[0] # thisMask = starMask0 # otherMask = starMask1 # elif iMask == 1: # dx1 = dx[0] - dx[1] # dy1 = dy[0] - dy[1] # thisMask = starMask1 # otherMask = starMask0 # else: # print('What?! How did you even get here?!') # import pdb; pdb.set_trace() # # # Shift the mask accordingly # shiftedOtherMask = otherMask.shift(dx1, dy1) # # # Combine this mask and the shifted mask # fullMask = np.logical_or( # thisMask.data, # shiftedOtherMask.data # ) # # # Store the mask for a final write-to-disk # starMasks[iMask] = fullMask # Look through the masks and write to disk for maskFile, starMask in zip(maskFilenames, starMasks): try: # Write the mask to disk maskImg = ai.reduced.ReducedScience(starMask.astype(int)) maskImg.write(maskFile, dtype=np.uint8) except: print('Failed to save file {}'.format(maskFile)) # Update on progress print('{0:3.1%} complete'.format(iRow/numberOfFiles), end='\r') # Alert the user that everything is complete print('{0:3.1%} complete'.format(1), end='\n\n') print('Done!')
mit
pkruskal/scikit-learn
sklearn/neighbors/base.py
115
29783
"""Base and mixin classes for nearest neighbors""" # Authors: Jake Vanderplas <vanderplas@astro.washington.edu> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl> # Multi-output support by Arnaud Joly <a.joly@ulg.ac.be> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import csr_matrix, issparse from .ball_tree import BallTree from .kd_tree import KDTree from ..base import BaseEstimator from ..metrics import pairwise_distances from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from ..utils import check_X_y, check_array from ..utils.fixes import argpartition from ..utils.validation import DataConversionWarning from ..utils.validation import NotFittedError from ..externals import six VALID_METRICS = dict(ball_tree=BallTree.valid_metrics, kd_tree=KDTree.valid_metrics, # The following list comes from the # sklearn.metrics.pairwise doc string brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) + ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule', 'wminkowski'])) VALID_METRICS_SPARSE = dict(ball_tree=[], kd_tree=[], brute=PAIRWISE_DISTANCE_FUNCTIONS.keys()) class NeighborsWarning(UserWarning): pass # Make sure that NeighborsWarning are displayed more than once warnings.simplefilter("always", NeighborsWarning) def _check_weights(weights): """Check to make sure weights are valid""" if weights in (None, 'uniform', 'distance'): return weights elif callable(weights): return weights else: raise ValueError("weights not recognized: should be 'uniform', " "'distance', or a callable function") def _get_weights(dist, weights): """Get the weights from an array of distances and a parameter ``weights`` Parameters =========== dist: ndarray The input distances weights: {'uniform', 'distance' or a callable} The kind of weighting used Returns ======== weights_arr: array of the same shape as ``dist`` if ``weights == 'uniform'``, then returns None """ if weights in (None, 'uniform'): return None elif weights == 'distance': # if user attempts to classify a point that was zero distance from one # or more training points, those training points are weighted as 1.0 # and the other points as 0.0 if dist.dtype is np.dtype(object): for point_dist_i, point_dist in enumerate(dist): # check if point_dist is iterable # (ex: RadiusNeighborClassifier.predict may set an element of # dist to 1e-6 to represent an 'outlier') if hasattr(point_dist, '__contains__') and 0. in point_dist: dist[point_dist_i] = point_dist == 0. else: dist[point_dist_i] = 1. / point_dist else: with np.errstate(divide='ignore'): dist = 1. / dist inf_mask = np.isinf(dist) inf_row = np.any(inf_mask, axis=1) dist[inf_row] = inf_mask[inf_row] return dist elif callable(weights): return weights(dist) else: raise ValueError("weights not recognized: should be 'uniform', " "'distance', or a callable function") class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)): """Base class for nearest neighbors estimators.""" @abstractmethod def __init__(self): pass def _init_params(self, n_neighbors=None, radius=None, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, **kwargs): if kwargs: warnings.warn("Passing additional arguments to the metric " "function as **kwargs is deprecated " "and will no longer be supported in 0.18. " "Use metric_params instead.", DeprecationWarning, stacklevel=3) if metric_params is None: metric_params = {} metric_params.update(kwargs) self.n_neighbors = n_neighbors self.radius = radius self.algorithm = algorithm self.leaf_size = leaf_size self.metric = metric self.metric_params = metric_params self.p = p if algorithm not in ['auto', 'brute', 'kd_tree', 'ball_tree']: raise ValueError("unrecognized algorithm: '%s'" % algorithm) if algorithm == 'auto': alg_check = 'ball_tree' else: alg_check = algorithm if callable(metric): if algorithm == 'kd_tree': # callable metric is only valid for brute force and ball_tree raise ValueError( "kd_tree algorithm does not support callable metric '%s'" % metric) elif metric not in VALID_METRICS[alg_check]: raise ValueError("Metric '%s' not valid for algorithm '%s'" % (metric, algorithm)) if self.metric_params is not None and 'p' in self.metric_params: warnings.warn("Parameter p is found in metric_params. " "The corresponding parameter from __init__ " "is ignored.", SyntaxWarning, stacklevel=3) effective_p = metric_params['p'] else: effective_p = self.p if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1: raise ValueError("p must be greater than one for minkowski metric") self._fit_X = None self._tree = None self._fit_method = None def _fit(self, X): if self.metric_params is None: self.effective_metric_params_ = {} else: self.effective_metric_params_ = self.metric_params.copy() effective_p = self.effective_metric_params_.get('p', self.p) if self.metric in ['wminkowski', 'minkowski']: self.effective_metric_params_['p'] = effective_p self.effective_metric_ = self.metric # For minkowski distance, use more efficient methods where available if self.metric == 'minkowski': p = self.effective_metric_params_.pop('p', 2) if p < 1: raise ValueError("p must be greater than one " "for minkowski metric") elif p == 1: self.effective_metric_ = 'manhattan' elif p == 2: self.effective_metric_ = 'euclidean' elif p == np.inf: self.effective_metric_ = 'chebyshev' else: self.effective_metric_params_['p'] = p if isinstance(X, NeighborsBase): self._fit_X = X._fit_X self._tree = X._tree self._fit_method = X._fit_method return self elif isinstance(X, BallTree): self._fit_X = X.data self._tree = X self._fit_method = 'ball_tree' return self elif isinstance(X, KDTree): self._fit_X = X.data self._tree = X self._fit_method = 'kd_tree' return self X = check_array(X, accept_sparse='csr') n_samples = X.shape[0] if n_samples == 0: raise ValueError("n_samples must be greater than 0") if issparse(X): if self.algorithm not in ('auto', 'brute'): warnings.warn("cannot use tree with sparse input: " "using brute force") if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']: raise ValueError("metric '%s' not valid for sparse input" % self.effective_metric_) self._fit_X = X.copy() self._tree = None self._fit_method = 'brute' return self self._fit_method = self.algorithm self._fit_X = X if self._fit_method == 'auto': # A tree approach is better for small number of neighbors, # and KDTree is generally faster when available if (self.n_neighbors is None or self.n_neighbors < self._fit_X.shape[0] // 2): if self.effective_metric_ in VALID_METRICS['kd_tree']: self._fit_method = 'kd_tree' else: self._fit_method = 'ball_tree' else: self._fit_method = 'brute' if self._fit_method == 'ball_tree': self._tree = BallTree(X, self.leaf_size, metric=self.effective_metric_, **self.effective_metric_params_) elif self._fit_method == 'kd_tree': self._tree = KDTree(X, self.leaf_size, metric=self.effective_metric_, **self.effective_metric_params_) elif self._fit_method == 'brute': self._tree = None else: raise ValueError("algorithm = '%s' not recognized" % self.algorithm) return self class KNeighborsMixin(object): """Mixin for k-neighbors searches""" def kneighbors(self, X=None, n_neighbors=None, return_distance=True): """Finds the K-neighbors of a point. Returns distance Parameters ---------- X : array-like, last dimension same as that of fit data, optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. n_neighbors : int Number of neighbors to get (default is the value passed to the constructor). return_distance : boolean, optional. Defaults to True. If False, distances will not be returned Returns ------- dist : array Array representing the lengths to points, only present if return_distance=True ind : array Indices of the nearest points in the population matrix. Examples -------- In the following example, we construct a NeighborsClassifier class from an array representing our data set and ask who's the closest point to [1,1,1] >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=1) >>> neigh.fit(samples) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS (array([[ 0.5]]), array([[2]]...)) As you can see, it returns [[0.5]], and [[2]], which means that the element is at distance 0.5 and is the third element of samples (indexes start at 0). You can also query for multiple points: >>> X = [[0., 1., 0.], [1., 0., 1.]] >>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS array([[1], [2]]...) """ if self._fit_method is None: raise NotFittedError("Must fit neighbors before querying.") if n_neighbors is None: n_neighbors = self.n_neighbors if X is not None: query_is_train = False X = check_array(X, accept_sparse='csr') else: query_is_train = True X = self._fit_X # Include an extra neighbor to account for the sample itself being # returned, which is removed later n_neighbors += 1 train_size = self._fit_X.shape[0] if n_neighbors > train_size: raise ValueError( "Expected n_neighbors <= n_samples, " " but n_samples = %d, n_neighbors = %d" % (train_size, n_neighbors) ) n_samples, _ = X.shape sample_range = np.arange(n_samples)[:, None] if self._fit_method == 'brute': # for efficiency, use squared euclidean distances if self.effective_metric_ == 'euclidean': dist = pairwise_distances(X, self._fit_X, 'euclidean', squared=True) else: dist = pairwise_distances(X, self._fit_X, self.effective_metric_, **self.effective_metric_params_) neigh_ind = argpartition(dist, n_neighbors - 1, axis=1) neigh_ind = neigh_ind[:, :n_neighbors] # argpartition doesn't guarantee sorted order, so we sort again neigh_ind = neigh_ind[ sample_range, np.argsort(dist[sample_range, neigh_ind])] if return_distance: if self.effective_metric_ == 'euclidean': result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind else: result = dist[sample_range, neigh_ind], neigh_ind else: result = neigh_ind elif self._fit_method in ['ball_tree', 'kd_tree']: if issparse(X): raise ValueError( "%s does not work with sparse matrices. Densify the data, " "or set algorithm='brute'" % self._fit_method) result = self._tree.query(X, n_neighbors, return_distance=return_distance) else: raise ValueError("internal: _fit_method not recognized") if not query_is_train: return result else: # If the query data is the same as the indexed data, we would like # to ignore the first nearest neighbor of every sample, i.e # the sample itself. if return_distance: dist, neigh_ind = result else: neigh_ind = result sample_mask = neigh_ind != sample_range # Corner case: When the number of duplicates are more # than the number of neighbors, the first NN will not # be the sample, but a duplicate. # In that case mask the first duplicate. dup_gr_nbrs = np.all(sample_mask, axis=1) sample_mask[:, 0][dup_gr_nbrs] = False neigh_ind = np.reshape( neigh_ind[sample_mask], (n_samples, n_neighbors - 1)) if return_distance: dist = np.reshape( dist[sample_mask], (n_samples, n_neighbors - 1)) return dist, neigh_ind return neigh_ind def kneighbors_graph(self, X=None, n_neighbors=None, mode='connectivity'): """Computes the (weighted) graph of k-Neighbors for points in X Parameters ---------- X : array-like, last dimension same as that of fit data, optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. n_neighbors : int Number of neighbors for each sample. (default is value passed to the constructor). mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit] n_samples_fit is the number of samples in the fitted data A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 1.], [ 1., 0., 1.]]) See also -------- NearestNeighbors.radius_neighbors_graph """ if n_neighbors is None: n_neighbors = self.n_neighbors # kneighbors does the None handling. if X is not None: X = check_array(X, accept_sparse='csr') n_samples1 = X.shape[0] else: n_samples1 = self._fit_X.shape[0] n_samples2 = self._fit_X.shape[0] n_nonzero = n_samples1 * n_neighbors A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) # construct CSR matrix representation of the k-NN graph if mode == 'connectivity': A_data = np.ones(n_samples1 * n_neighbors) A_ind = self.kneighbors(X, n_neighbors, return_distance=False) elif mode == 'distance': A_data, A_ind = self.kneighbors( X, n_neighbors, return_distance=True) A_data = np.ravel(A_data) else: raise ValueError( 'Unsupported mode, must be one of "connectivity" ' 'or "distance" but got "%s" instead' % mode) kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr), shape=(n_samples1, n_samples2)) return kneighbors_graph class RadiusNeighborsMixin(object): """Mixin for radius-based neighbors searches""" def radius_neighbors(self, X=None, radius=None, return_distance=True): """Finds the neighbors within a given radius of a point or points. Return the indices and distances of each point from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. Parameters ---------- X : array-like, (n_samples, n_features), optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional. Defaults to True. If False, distances will not be returned Returns ------- dist : array, shape (n_samples,) of arrays Array representing the distances to each point, only present if return_distance=True. The distance values are computed according to the ``metric`` constructor parameter. ind : array, shape (n_samples,) of arrays An array of arrays of indices of the approximate nearest points from the population matrix that lie within a ball of size ``radius`` around the query points. Examples -------- In the following example, we construct a NeighborsClassifier class from an array representing our data set and ask who's the closest point to [1, 1, 1]: >>> import numpy as np >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.6) >>> neigh.fit(samples) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> rng = neigh.radius_neighbors([1., 1., 1.]) >>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS [ 1.5 0.5] >>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS [1 2] The first array returned contains the distances to all points which are closer than 1.6, while the second array returned contains their indices. In general, multiple points can be queried at the same time. Notes ----- Because the number of neighbors of each point is not necessarily equal, the results for multiple query points cannot be fit in a standard data array. For efficiency, `radius_neighbors` returns arrays of objects, where each object is a 1D array of indices or distances. """ if self._fit_method is None: raise NotFittedError("Must fit neighbors before querying.") if X is not None: query_is_train = False X = check_array(X, accept_sparse='csr') else: query_is_train = True X = self._fit_X if radius is None: radius = self.radius n_samples = X.shape[0] if self._fit_method == 'brute': # for efficiency, use squared euclidean distances if self.effective_metric_ == 'euclidean': dist = pairwise_distances(X, self._fit_X, 'euclidean', squared=True) radius *= radius else: dist = pairwise_distances(X, self._fit_X, self.effective_metric_, **self.effective_metric_params_) neigh_ind_list = [np.where(d <= radius)[0] for d in dist] # See https://github.com/numpy/numpy/issues/5456 # if you want to understand why this is initialized this way. neigh_ind = np.empty(n_samples, dtype='object') neigh_ind[:] = neigh_ind_list if return_distance: dist_array = np.empty(n_samples, dtype='object') if self.effective_metric_ == 'euclidean': dist_list = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)] else: dist_list = [d[neigh_ind[i]] for i, d in enumerate(dist)] dist_array[:] = dist_list results = dist_array, neigh_ind else: results = neigh_ind elif self._fit_method in ['ball_tree', 'kd_tree']: if issparse(X): raise ValueError( "%s does not work with sparse matrices. Densify the data, " "or set algorithm='brute'" % self._fit_method) results = self._tree.query_radius(X, radius, return_distance=return_distance) if return_distance: results = results[::-1] else: raise ValueError("internal: _fit_method not recognized") if not query_is_train: return results else: # If the query data is the same as the indexed data, we would like # to ignore the first nearest neighbor of every sample, i.e # the sample itself. if return_distance: dist, neigh_ind = results else: neigh_ind = results for ind, ind_neighbor in enumerate(neigh_ind): mask = ind_neighbor != ind neigh_ind[ind] = ind_neighbor[mask] if return_distance: dist[ind] = dist[ind][mask] if return_distance: return dist, neigh_ind return neigh_ind def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'): """Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Parameters ---------- X : array-like, shape = [n_samples, n_features], optional The query point or points. If not provided, neighbors of each indexed point are returned. In this case, the query point is not considered its own neighbor. radius : float Radius of neighborhoods. (default is the value passed to the constructor). mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.5) >>> neigh.fit(X) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> A = neigh.radius_neighbors_graph(X) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 0.], [ 1., 0., 1.]]) See also -------- kneighbors_graph """ if X is not None: X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) n_samples2 = self._fit_X.shape[0] if radius is None: radius = self.radius # construct CSR matrix representation of the NN graph if mode == 'connectivity': A_ind = self.radius_neighbors(X, radius, return_distance=False) A_data = None elif mode == 'distance': dist, A_ind = self.radius_neighbors(X, radius, return_distance=True) A_data = np.concatenate(list(dist)) else: raise ValueError( 'Unsupported mode, must be one of "connectivity", ' 'or "distance" but got %s instead' % mode) n_samples1 = A_ind.shape[0] n_neighbors = np.array([len(a) for a in A_ind]) A_ind = np.concatenate(list(A_ind)) if A_data is None: A_data = np.ones(len(A_ind)) A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors))) return csr_matrix((A_data, A_ind, A_indptr), shape=(n_samples1, n_samples2)) class SupervisedFloatMixin(object): def fit(self, X, y): """Fit the model using X as training data and y as target values Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape = [n_samples, n_features] y : {array-like, sparse matrix} Target values, array of float values, shape = [n_samples] or [n_samples, n_outputs] """ if not isinstance(X, (KDTree, BallTree)): X, y = check_X_y(X, y, "csr", multi_output=True) self._y = y return self._fit(X) class SupervisedIntegerMixin(object): def fit(self, X, y): """Fit the model using X as training data and y as target values Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape = [n_samples, n_features] y : {array-like, sparse matrix} Target values of shape = [n_samples] or [n_samples, n_outputs] """ if not isinstance(X, (KDTree, BallTree)): X, y = check_X_y(X, y, "csr", multi_output=True) if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1: if y.ndim != 1: warnings.warn("A column-vector y was passed when a 1d array " "was expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning, stacklevel=2) self.outputs_2d_ = False y = y.reshape((-1, 1)) else: self.outputs_2d_ = True self.classes_ = [] self._y = np.empty(y.shape, dtype=np.int) for k in range(self._y.shape[1]): classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes) if not self.outputs_2d_: self.classes_ = self.classes_[0] self._y = self._y.ravel() return self._fit(X) class UnsupervisedMixin(object): def fit(self, X, y=None): """Fit the model using X as training data Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape = [n_samples, n_features] """ return self._fit(X)
bsd-3-clause
INM-6/Python-Module-of-the-Week
session20_NEST/snakemake/scripts/simulateBrunel.py
1
3959
import argparse import numpy as np import matplotlib.pyplot as plt import nest # parse command line parameters parser = argparse.ArgumentParser(description='Simulate a Brunel network.') parser.add_argument('spikefile', type=str, help='output file (spikes)') parser.add_argument('rasterfile', type=str, help='output file (rasterplot)') parser.add_argument('g', type=float) parser.add_argument('nu_ex', type=float) args = parser.parse_args() # simulation parameters simtime = 500. # simulation time (ms) dt = 0.1 # simulation resolution (ms) # network parameters gamma = 0.25 # relative number of inhibitory connections NE = 5000 # number of excitatory neurons (10.000 in [1]) NI = int(gamma * NE) # number of inhibitory neurons N_rec = 50 # record from 100 (50 e + 50 i) neurons CE = 1000 # indegree from excitatory neurons CI = int(gamma * CE) # indegree from inhibitory neurons # synapse parameters w = 0.1 # excitatory synaptic weight (mV) g = args.g # relative inhibitory to excitatory synaptic weight d = 1.5 # synaptic transmission delay (ms) # neuron paramters V_th = 20. # spike threshold (mV) tau_m = 20. # membrane time constant (ms) neuron_params = { 'C_m': 1.0, # membrane capacity (pF) 'E_L': 0., # resting membrane potential (mV) 'I_e': 0., # external input current (pA) 'V_m': 0., # membrane potential (mV) 'V_reset': 10., # reset membrane potential after a spike (mV) 'V_th': V_th, # 't_ref': 2.0, # refractory period (ms) 'tau_m': tau_m, # } # external input parameters nu_th = V_th / (w * tau_m) # external rate needed to evoke activity (spikes/ms) nu_ex = args.nu_ex * nu_th # set external rate above threshold p_rate = 1e3 * nu_ex # external rate (spikes/s) # configure kernel nest.ResetKernel() nest.SetKernelStatus({ 'resolution': dt, # set simulation resolution 'print_time': True # enable printing of simulation progress (-> terminal) }) # set default parameters for neurons and create neurons nest.SetDefaults('iaf_psc_delta', neuron_params) neurons_e = nest.Create('iaf_psc_delta', NE) neurons_i = nest.Create('iaf_psc_delta', NI) # create poisson generator and set 'rate' to p_rate pgen = nest.Create('poisson_generator', params={'rate': p_rate}) # create spike detectors spikes = nest.Create('spike_detector') nest.SetStatus(spikes, [{'withtime': True, 'withgid': True, 'to_file': False}]) # create excitatory connections # synapse specification syn_exc = {'delay': d, 'weight': w} # connection specification conn_exc = {'rule': 'fixed_indegree', 'indegree': CE} # connect stuff nest.Connect(neurons_e, neurons_e, conn_exc, syn_exc) nest.Connect(neurons_e, neurons_i, conn_exc, syn_exc) # create inhibitory connections # synapse specification syn_inh = {'delay': d, 'weight': - g * w} # connection specification conn_inh = {'rule': 'fixed_indegree', 'indegree': CI} # connect stuff nest.Connect(neurons_i, neurons_e, conn_inh, syn_inh) nest.Connect(neurons_i, neurons_i, conn_inh, syn_inh) # connect poisson generator using the excitatory connection weight nest.Connect(pgen, neurons_i, syn_spec=syn_exc) nest.Connect(pgen, neurons_e, syn_spec=syn_exc) # connect N_rec excitatory / inhibitory neurons to spike detector nest.Connect(neurons_e[:N_rec], spikes) # simulate nest.Simulate(simtime) # read out spikes from spikedetector data = nest.GetStatus(spikes, 'events')[0] ids = data['senders'] times = data['times'] # save spikes np.save(args.spikefile, [ids, times]) # raster plot of spiking activity using matplotlib plt.plot(times, ids, 'o') plt.xlabel('Time (ms)') plt.xlim(simtime - 100, simtime) plt.savefig(args.rasterfile)
mit
phobson/statsmodels
statsmodels/datasets/utils.py
1
11013
from statsmodels.compat.python import (range, StringIO, urlopen, HTTPError, URLError, lrange, cPickle, urljoin, BytesIO, long) import sys import shutil from os import environ from os import makedirs from os.path import expanduser from os.path import exists from os.path import join import numpy as np from numpy import array from pandas import read_csv, DataFrame, Index def webuse(data, baseurl='http://www.stata-press.com/data/r11/', as_df=True): """ Download and return an example dataset from Stata. Parameters ---------- data : str Name of dataset to fetch. baseurl : str The base URL to the stata datasets. as_df : bool If True, returns a `pandas.DataFrame` Returns ------- dta : Record Array A record array containing the Stata dataset. Examples -------- >>> dta = webuse('auto') Notes ----- Make sure baseurl has trailing forward slash. Doesn't do any error checking in response URLs. """ # lazy imports from statsmodels.iolib import genfromdta url = urljoin(baseurl, data+'.dta') dta = urlopen(url) dta = BytesIO(dta.read()) # make it truly file-like if as_df: # could make this faster if we don't process dta twice? return DataFrame.from_records(genfromdta(dta)) else: return genfromdta(dta) class Dataset(dict): def __init__(self, **kw): # define some default attributes, so pylint can find them self.endog = None self.exog = None self.data = None self.names = None dict.__init__(self, kw) self.__dict__ = self # Some datasets have string variables. If you want a raw_data # attribute you must create this in the dataset's load function. try: # some datasets have string variables self.raw_data = self.data.view((float, len(self.names))) except: pass def __repr__(self): return str(self.__class__) def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None): names = list(data.dtype.names) if isinstance(endog_idx, (int, long)): endog = array(data[names[endog_idx]], dtype=dtype) endog_name = names[endog_idx] endog_idx = [endog_idx] else: endog_name = [names[i] for i in endog_idx] if stack: endog = np.column_stack(data[field] for field in endog_name) else: endog = data[endog_name] if exog_idx is None: exog_name = [names[i] for i in range(len(names)) if i not in endog_idx] else: exog_name = [names[i] for i in exog_idx] if stack: exog = np.column_stack(data[field] for field in exog_name) else: exog = data[exog_name] if dtype: endog = endog.astype(dtype) exog = exog.astype(dtype) dataset = Dataset(data=data, names=names, endog=endog, exog=exog, endog_name=endog_name, exog_name=exog_name) return dataset def process_recarray_pandas(data, endog_idx=0, exog_idx=None, dtype=None, index_idx=None): data = DataFrame(data, dtype=dtype) names = data.columns if isinstance(endog_idx, (int, long)): endog_name = names[endog_idx] endog = data[endog_name] if exog_idx is None: exog = data.drop([endog_name], axis=1) else: exog = data.filter(names[exog_idx]) else: endog = data.ix[:, endog_idx] endog_name = list(endog.columns) if exog_idx is None: exog = data.drop(endog_name, axis=1) elif isinstance(exog_idx, (int, long)): exog = data.filter([names[exog_idx]]) else: exog = data.filter(names[exog_idx]) if index_idx is not None: # NOTE: will have to be improved for dates endog.index = Index(data.ix[:, index_idx]) exog.index = Index(data.ix[:, index_idx]) data = data.set_index(names[index_idx]) exog_name = list(exog.columns) dataset = Dataset(data=data, names=list(names), endog=endog, exog=exog, endog_name=endog_name, exog_name=exog_name) return dataset def _maybe_reset_index(data): """ All the Rdatasets have the integer row.labels from R if there is no real index. Strip this for a zero-based index """ if data.index.equals(Index(lrange(1, len(data) + 1))): data = data.reset_index(drop=True) return data def _get_cache(cache): if cache is False: # do not do any caching or load from cache cache = None elif cache is True: # use default dir for cache cache = get_data_home(None) else: cache = get_data_home(cache) return cache def _cache_it(data, cache_path): if sys.version_info[0] >= 3: # for some reason encode("zip") won't work for me in Python 3? import zlib # use protocol 2 so can open with python 2.x if cached in 3.x open(cache_path, "wb").write(zlib.compress(cPickle.dumps(data, protocol=2))) else: open(cache_path, "wb").write(cPickle.dumps(data).encode("zip")) def _open_cache(cache_path): if sys.version_info[0] >= 3: # NOTE: don't know why but decode('zip') doesn't work on my # Python 3 build import zlib data = zlib.decompress(open(cache_path, 'rb').read()) # return as bytes object encoded in utf-8 for cross-compat of cached data = cPickle.loads(data).encode('utf-8') else: data = open(cache_path, 'rb').read().decode('zip') data = cPickle.loads(data) return data def _urlopen_cached(url, cache): """ Tries to load data from cache location otherwise downloads it. If it downloads the data and cache is not None then it will put the downloaded data in the cache path. """ from_cache = False if cache is not None: cache_path = join(cache, url.split("://")[-1].replace('/', ',') + ".zip") try: data = _open_cache(cache_path) from_cache = True except: pass # not using the cache or didn't find it in cache if not from_cache: data = urlopen(url).read() if cache is not None: # then put it in the cache _cache_it(data, cache_path) return data, from_cache def _get_data(base_url, dataname, cache, extension="csv"): url = base_url + (dataname + ".%s") % extension try: data, from_cache = _urlopen_cached(url, cache) except HTTPError as err: if '404' in str(err): raise ValueError("Dataset %s was not found." % dataname) else: raise err data = data.decode('utf-8', 'strict') return StringIO(data), from_cache def _get_dataset_meta(dataname, package, cache): # get the index, you'll probably want this cached because you have # to download info about all the data to get info about any of the data... index_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/master/" "datasets.csv") data, _ = _urlopen_cached(index_url, cache) # Python 3 if sys.version[0] == '3': # pragma: no cover data = data.decode('utf-8', 'strict') index = read_csv(StringIO(data)) idx = np.logical_and(index.Item == dataname, index.Package == package) dataset_meta = index.ix[idx] return dataset_meta["Title"].item() def get_rdataset(dataname, package="datasets", cache=False): """download and return R dataset Parameters ---------- dataname : str The name of the dataset you want to download package : str The package in which the dataset is found. The default is the core 'datasets' package. cache : bool or str If True, will download this data into the STATSMODELS_DATA folder. The default location is a folder called statsmodels_data in the user home folder. Otherwise, you can specify a path to a folder to use for caching the data. If False, the data will not be cached. Returns ------- dataset : Dataset instance A `statsmodels.data.utils.Dataset` instance. This objects has attributes:: * data - A pandas DataFrame containing the data * title - The dataset title * package - The package from which the data came * from_cache - Whether not cached data was retrieved * __doc__ - The verbatim R documentation. Notes ----- If the R dataset has an integer index. This is reset to be zero-based. Otherwise the index is preserved. The caching facilities are dumb. That is, no download dates, e-tags, or otherwise identifying information is checked to see if the data should be downloaded again or not. If the dataset is in the cache, it's used. """ # NOTE: use raw github bc html site might not be most up to date data_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/" "master/csv/"+package+"/") docs_base_url = ("https://raw.github.com/vincentarelbundock/Rdatasets/" "master/doc/"+package+"/rst/") cache = _get_cache(cache) data, from_cache = _get_data(data_base_url, dataname, cache) data = read_csv(data, index_col=0) data = _maybe_reset_index(data) title = _get_dataset_meta(dataname, package, cache) doc, _ = _get_data(docs_base_url, dataname, cache, "rst") return Dataset(data=data, __doc__=doc.read(), package=package, title=title, from_cache=from_cache) # The below function were taken from sklearn def get_data_home(data_home=None): """Return the path of the statsmodels data dir. This folder is used by some large dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'statsmodels_data' in the user home folder. Alternatively, it can be set by the 'STATSMODELS_DATA' environment variable or programatically by giving an explit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. """ if data_home is None: data_home = environ.get('STATSMODELS_DATA', join('~', 'statsmodels_data')) data_home = expanduser(data_home) if not exists(data_home): makedirs(data_home) return data_home def clear_data_home(data_home=None): """Delete all the content of the data home cache.""" data_home = get_data_home(data_home) shutil.rmtree(data_home) def check_internet(): """Check if internet is available""" try: urlopen("https://github.com") except URLError as err: return False return True
bsd-3-clause
danny200309/BuildingMachineLearningSystemsWithPython
ch10/simple_classification.py
21
2299
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import mahotas as mh import numpy as np from glob import glob from features import texture, color_histogram from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler basedir = '../SimpleImageDataset/' haralicks = [] labels = [] chists = [] print('This script will test (with cross-validation) classification of the simple 3 class dataset') print('Computing features...') # Use glob to get all the images images = glob('{}/*.jpg'.format(basedir)) # We sort the images to ensure that they are always processed in the same order # Otherwise, this would introduce some variation just based on the random # ordering that the filesystem uses for fname in sorted(images): imc = mh.imread(fname) haralicks.append(texture(mh.colors.rgb2grey(imc))) chists.append(color_histogram(imc)) # Files are named like building00.jpg, scene23.jpg... labels.append(fname[:-len('xx.jpg')]) print('Finished computing features.') haralicks = np.array(haralicks) labels = np.array(labels) chists = np.array(chists) haralick_plus_chists = np.hstack([chists, haralicks]) # We use Logistic Regression because it achieves high accuracy on small(ish) datasets # Feel free to experiment with other classifiers clf = Pipeline([('preproc', StandardScaler()), ('classifier', LogisticRegression())]) from sklearn import cross_validation cv = cross_validation.LeaveOneOut(len(images)) scores = cross_validation.cross_val_score( clf, haralicks, labels, cv=cv) print('Accuracy (Leave-one-out) with Logistic Regression [haralick features]: {:.1%}'.format( scores.mean())) scores = cross_validation.cross_val_score( clf, chists, labels, cv=cv) print('Accuracy (Leave-one-out) with Logistic Regression [color histograms]: {:.1%}'.format( scores.mean())) scores = cross_validation.cross_val_score( clf, haralick_plus_chists, labels, cv=cv) print('Accuracy (Leave-one-out) with Logistic Regression [texture features + color histograms]: {:.1%}'.format( scores.mean()))
mit
zhonghualiu/FaST-LMM
fastlmm/association/score.py
1
19728
import numpy as NP import scipy as sp import scipy.linalg as LA import numpy.linalg as nla import os import sys import glob sys.path.append("./../../pyplink") from fastlmm.pyplink.plink import * from pysnptools.util.pheno import * from fastlmm.util.mingrid import * #import pdb import scipy.stats as ST import fastlmm.util.stats as ss import fastlmm.util.util as util import fastlmm.association as association class scoretest(association.varcomp_test): ''' This is the super class that just performs a score test for the 1K linear case, gives P-values etc. All other models are inherited ''' __slots__ = ["squaredform","expectationsqform","varsqform","GPG","GPY"] def __init__(self,Y,X=None,appendbias=False): association.varcomp_test.__init__(self,Y=Y,X=X,appendbias=appendbias) pass def _score(self,G1): ''' This calls the score computation for a single kernel Christoph guess: varsqform is the variance of sigma_g, and is the inverse of the Fisher information wrt sigma_g Christoph: should compute variance of h2 (and test h2>0), which eliminates one nusiance parameter and yields a better test ''' self.squaredform, self.expectationsqform, self.varsqform, self.GPG, self.GPY= scoreNoK( Y=self.Y, X = self.X, Xdagger=None, G = G1, sigma2=None,Bartletcorrection=True) if self.GPG.shape[0]==0: raise Exception("GPG is empty") return self.squaredform, self.expectationsqform, self.varsqform, self.GPG #def _pv(self, type): # this used to default to ="davies" # evalstring = 'self.pv_%s(self.squaredform,self.expectationsqform,self.varsqform,self.GPG)' % (type) # return eval(evalstring) def testG(self,G1,type, altModel=None,i_exclude=None,G_exclude=None): """ Params: G1: SNPs to be tested type: moment matching davies etc i_exclude: Dummy G_exclude: Dummy """ # this used to default to ="davies" self._score(G1=G1) pv = type.pv(self.squaredform,self.expectationsqform,self.varsqform,self.GPG) #stat = scoretest.scoreteststat(self.squaredform,self.varsqform) test={ 'pv':pv, 'stat':self.squaredform } return test class scoretest_logit(scoretest): __slots__ = ["Y","X","Xdagger","logreg_result","logreg_mod","pY","stdY","VX","pinvVX"] def __init__(self,Y,X=None,appendbias=False): ## check if is binary uniquey=sp.unique(Y) if not sp.sort(uniquey).tolist()==[0,1]: raise Exception("must use binary data in {0,1} for logit tests, found:" + str(Y)) scoretest.__init__(self,Y=Y,X=X,appendbias=appendbias) #from sklearn.linear_model import LogisticRegression as LR #logreg_sk = LR(C=200000.0) #logreg_sk.fit( X, Y ) import statsmodels.api as sm self.logreg_mod = sm.Logit(Y[:,0],X) self.logreg_result = self.logreg_mod.fit(disp=0) self.pY = self.logreg_result.predict(X) self.stdY=sp.sqrt(self.pY*(1.0-self.pY)) self.VX=self.X * NP.lib.stride_tricks.as_strided((self.stdY), (self.stdY.size,self.X.shape[1]), (self.stdY.itemsize,0)) self.pinvVX=nla.pinv(self.VX) def _score(self, G1): ''' compute the score Inputs: Bartletcorrection: refers to dividing by N-D instead of D, it is used in REML Outputs: squaredform expectationsqform varsqform GPG=P^1/2*K*P^1/2 (take eigenvalues of this for Davies method) ''' Y=self.Y X=self.X N=Y.shape[0] if Y.ndim == 1: P=1 #num of phenotypes else: P = Y.shape[1] if X is None: D = 1 #num of covariates (and assumes they are independent) else: D = X.shape[1] RxY = (self.Y.flatten()-self.pY) #residual of y regressed on X, which here, is equivalent to sigma2*Py (P is the projection matrix, which is idempotent) VG = G1 * NP.lib.stride_tricks.as_strided(self.stdY, (self.stdY.size,G1.shape[1]), (self.stdY.itemsize,0)) GY = G1.T.dot(RxY) squaredform=(GY*GY).sum()/(2.0*P) RxVG,Xd = linreg(VG, X=self.VX, Xdagger=self.pinvVX,rcond=None) if (G1.shape[0]<G1.shape[1]): GPG=RxVG.dot(RxVG.T)/(2.0*P) else: GPG=RxVG.T.dot(RxVG)/(2.0*P) self.squaredform=squaredform self.expectationsqform=None self.varsqform=None self.GPG=GPG return squaredform, GPG class scoretest2K(scoretest): __slots__ = ["K","PxKPx","G0","U","S","Xdagger","UY","UUY","YUUY","optparams","expectedinfo","lowrank","Neff"] def __init__(self,Y,X=None,K=None,G0=None,appendbias=False,forcefullrank=False): scoretest.__init__(self,Y=Y,X=X,appendbias=appendbias) self.Xdagger = None self.G0=G0 self.K=K #compute the spectral decomposition of K self.lowrank = False N=Y.shape[0] if Y.ndim==1: P=1 else: P=Y.shape[1] D=1 if X is not None: D=X.shape[1] self.Neff = N-D if self.K is not None: ar = sp.arange(self.K.shape[0]) self.K[ar,ar]+=1.0 self.PxKPx,self.Xdagger = linreg(Y=(self.K), X=self.X, Xdagger=self.Xdagger) self.PxKPx,self.Xdagger = linreg(Y=self.PxKPx.T, X=self.X, Xdagger=self.Xdagger) [self.S,self.U] = LA.eigh(self.PxKPx) self.K[ar,ar]-=1.0 self.U=self.U[:,D:N] self.S=self.S[D:N]-1.0 elif 0.7*(self.Neff)<=self.G0.shape[1] or forcefullrank: self.K = self.G0.dot(self.G0.T) # BR: changed K to self.K (K is not defined) ar = sp.arange(self.K.shape[0]) self.K[ar,ar]+=1.0 self.PxKPx,self.Xdagger = linreg(Y=(self.K), X=self.X, Xdagger=self.Xdagger) self.PxKPx,self.Xdagger = linreg(Y=self.PxKPx.T, X=self.X, Xdagger=self.Xdagger) self.K[ar,ar]-=1.0 # BR: changed PxKPx to self.PxKPx (PxKPx is not defined) [self.S,self.U] = LA.eigh(self.PxKPx) self.U=self.U[:,D:N] self.S=self.S[D:N]-1.0 else: PxG,self.Xdagger = linreg(Y=self.G0, X=self.X, Xdagger=self.Xdagger) [self.U,self.S,V] = LA.svd(PxG,False,True) inonzero = self.S>1E-10 self.S=self.S[inonzero]*self.S[inonzero] self.U=self.U[:,inonzero] self.lowrank = True pass #rotate the phenotype as well as the fixed effects self.UY = self.U.T.dot(self.Y) if self.lowrank: Yres,self.Xdagger = linreg(Y=self.Y, X=self.X, Xdagger=self.Xdagger) self.UUY = Yres-self.U.dot(self.UY) self.YUUY = (self.UUY * self.UUY).sum() pass #learn null model resmin=[None] def f(x,resmin=resmin,**kwargs): res = self._nLLeval(h2=x) if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']): resmin[0]=res return res['nLL'] min = minimize1D(f, evalgrid = None, nGrid=20, minval=0.0, maxval = 0.99999) self.optparams = resmin[0] #pre-compute model parameters self.expectedinfo = sp.zeros((2,2)) #tr(PIPI) Sd = 1.0/((1.0 - self.optparams['h2']) + self.optparams['h2'] * self.S) Sd *= Sd self.expectedinfo[0,0] = (Sd).sum()#/(self.optparams['sigma2']*self.optparams['sigma2']) if self.lowrank: self.expectedinfo[0,0]+=((self.Neff-self.S.shape[0]))/((1.0 - self.optparams['h2'])*(1.0 - self.optparams['h2'])) #tr(PKPI) Sd*=self.S self.expectedinfo[1,0] = (Sd).sum()#/(self.optparams['sigma2']*self.optparams['sigma2']) self.expectedinfo[0,1] = self.expectedinfo[1,0] #tr(PKPK) Sd*=self.S self.expectedinfo[1,1] = (Sd).sum()#/(self.optparams['sigma2']*self.optparams['sigma2']) self.expectedinfo*=0.5*P/(self.optparams['sigma2']*self.optparams['sigma2']) pass def _nLLeval(self,h2=0.0): ''' evaluate -ln( N( U^T*y | U^T*X*beta , h2*S + (1-h2)*I ) ), where K = USU^T -------------------------------------------------------------------------- Input: h2 : mixture weight between K and Identity (environmental noise) -------------------------------------------------------------------------- Output dictionary: 'nLL' : negative log-likelihood 'sigma2' : the model variance sigma^2 'h2' : mixture weight between Covariance and noise -------------------------------------------------------------------------- ''' if (h2<0.0) or (h2>=1.0): return {'nLL':3E20, 'h2':h2 } k=self.S.shape[0] N=self.Y.shape[0] if self.Y.ndim==1: P=1 else: P=self.Y.shape[1] Sd = h2*self.S + (1.0-h2) UYS = self.UY / NP.lib.stride_tricks.as_strided(Sd, (Sd.size,self.UY.shape[1]), (Sd.itemsize,0)) YKY = (UYS*self.UY).sum() logdetK = sp.log(Sd).sum() if (self.lowrank):#low rank part YKY += self.YUUY/(1.0-h2) logdetK +=sp.log(1.0-h2)*(self.Neff*P-k) sigma2 = YKY / (self.Neff*P) nLL = 0.5 * ( logdetK + self.Neff*P * ( sp.log(2.0*sp.pi*sigma2) + 1 ) ) result = { 'nLL':nLL, 'sigma2':sigma2, 'h2':h2 } return result def _score(self, G1): ''' compute the score with a background kernel ''' #if 1: # #background kernel # self.K=self.G.dot(self.G.T) # h2 = self.optparams['h2'] # sig = self.optparams['sigma2'] # V = h2*self.K + (1-h2)*sp.eye(self.K.shape[0]) # V*=sig # Vi=LA.inv(V) # P =LA.inv(self.X.T.dot(Vi).dot(self.X)) # P=self.X.dot(P.dot(self.X.T)) # P=Vi.dot(P.dot(Vi)) # Px = Vi-P P = self.UY.shape[1] resG, Xdagger = linreg(Y=G1, X=self.X, Xdagger=self.Xdagger) sigma2e = (1.0-self.optparams["h2"])*self.optparams["sigma2"] sigma2g = self.optparams["h2"]*self.optparams["sigma2"] UG = self.U.T.dot(resG) if self.lowrank: UUG = resG-self.U.dot(UG) Sd = 1.0/(self.S*sigma2g + sigma2e) SUG = UG * NP.lib.stride_tricks.as_strided(Sd, (Sd.size,UG.shape[1]), (Sd.itemsize,0)) #tr(YPGGPY) GPY = SUG.T.dot(self.UY) if self.lowrank: GPY += UUG.T.dot(self.UUY)/sigma2e squaredform = 0.5*(GPY*GPY).sum() #tr(PGG) if G1.shape[0]>G1.shape[1]: GPG = SUG.T.dot(UG) else: GPG = SUG.dot(UG.T) expectationsqform = 0.5*P*GPG.trace() #tr(PGGPGG) trPGGPGG = 0.5*P*(GPG*GPG).sum() #tr(PGGPI) SUG*=SUG expectedInfoCross=sp.empty(2) expectedInfoCross[0] = 0.5*P*SUG.sum() #tr(PGGPK) SUG*=NP.lib.stride_tricks.as_strided(self.S, (self.S.size,SUG.shape[1]), (self.S.itemsize,0)) expectedInfoCross[1] = 0.5*P*SUG.sum() if self.lowrank: if G1.shape[0]>G1.shape[1]: GPG_lowr = UUG.T.dot(UUG)/sigma2e else: GPG_lowr = UUG.dot(UUG.T)/sigma2e GPG+=GPG_lowr #tr(PGGPGG) expectationsqform += 0.5*P*GPG_lowr.trace() trPGGPGG += 0.5*P*(GPG_lowr*GPG_lowr).sum() #tr(PGGPI) expectedInfoCross[0] += 0.5*P*GPG_lowr.trace()/(sigma2e) varsqform = 1.0/(trPGGPGG - expectedInfoCross.dot(LA.inv(self.expectedinfo).dot(expectedInfoCross))) self.squaredform = squaredform self.expectationsqform=expectationsqform self.varsqform=varsqform self.GPG = GPG*0.5 return self.squaredform, self.expectationsqform, self.varsqform, self.GPG def _findH2(self, nGridH2=10, minH2 = 0.0, maxH2 = 0.99999, **kwargs): ''' Find the optimal h2 for a given K. (default maxH2 value is set to a value smaller than 1 to avoid loss of positive definiteness of the final model covariance) -------------------------------------------------------------------------- Input: nGridH2 : number of h2-grid points to evaluate the negative log-likelihood at minH2 : minimum value for h2 optimization maxH2 : maximum value for h2 optimization -------------------------------------------------------------------------- Output: dictionary containing the model parameters at the optimal h2 -------------------------------------------------------------------------- ''' #f = lambda x : (self.nLLeval(h2=x,**kwargs)['nLL']) resmin=[None] def f(x,resmin=resmin,**kwargs): res = self._nLLeval(h2=x,**kwargs) if (resmin[0] is None) or (res['nLL']<resmin[0]['nLL']): resmin[0]=res return res['nLL'] min = minimize1D(f=f, nGrid=nGridH2, minval=minH2, maxval=maxH2 ) return resmin[0] def linreg(Y, X=None, Xdagger=None,rcond=None): if Y.ndim == 1: P=1 else: P = Y.shape[1] if X is None: RxY = Y-Y.mean(0) return RxY, None else: if Xdagger is None: #Xdagger = LA.pinv(X,rcond) #can be ridiculously slow (solves a linear system), 20 seconds instead of 0.1 sec. Xdagger = nla.pinv(X) #SVD-based, and seems fast RxY = Y-X.dot(Xdagger.dot(Y)) return RxY, Xdagger def scoreNoK( Y, X = None, Xdagger=None, G = None, sigma2=None,Bartletcorrection=True): ''' compute the score Inputs: Bartletcorrection: refers to dividing by N-D instead of D, it is used in REML Outputs: squaredform expectationsqform varsqform GPG=P^1/2*K*P^1/2 (take eigenvalues of this for Davies method) ''' N=Y.shape[0] if Y.ndim == 1: P=1 #num of phenotypes else: P = Y.shape[1] if X is None: D = 1 #num of covariates (and assumes they are independent) else: D = X.shape[1] RxY, Xdagger = linreg(Y=Y,X=X,Xdagger=Xdagger) #residual of y regressed on X, which here, is equivalent to sigma2*Py (P is the projection matrix, which is idempotent) if sigma2 is None: # note: Xdagger is pseudo inverse of X, or (X^T*X)^1*X^T such that Xdagger*y=beta if Bartletcorrection: sigma2 = (RxY*RxY).sum()/((N-D)*P) else: sigma2 = (RxY*RxY).sum()/(N*P) RxG, Xdagger = linreg(Y=G,X=X, Xdagger = Xdagger) #residual of G regressed on X, which here, is equivalent to PG (P is the projection matrix, and in this one kernel case, is idempotent) # note: P is never computed explicitly, only via residuals such as Py=1/sigma2(I-Xdagger*X)y and PG=1/sigma2(I-Xdagger*X)G # also note that "RxY"=Py=1/sigma2*(I-Xdagger*X)y is nothing more (except for 1/sigma2) than the residual of y regressed on X (i.e. y-X*beta), # and similarly for PG="RxG" GtRxY = G.T.dot(RxY) squaredform = ((GtRxY*GtRxY).sum())*(0.5/(sigma2*sigma2)) # yPKPy=yPG^T*GPy=(yPG^T)*(yPG^T)^T if G.shape[0]>G.shape[1]: GPG = sp.dot(RxG.T,RxG) #GPG is always a square matrix in the smaller dimension else: GPG = sp.dot(RxG,RxG.T) expectationsqform = P*(GPG.trace())*(0.5/sigma2) #note this is Trace(PKP)=Trace(PPK)=Trace(PK), for P=projection matrix in comment, and in the code P=1=#phen expectedinfo00 = P*(GPG*GPG).sum()*(0.5/(sigma2*sigma2)) expectedinfo10 = expectationsqform/sigma2 # P*0.5/(sigma2*sigma2)*GPG.trace() expectedinfo11 = P*(N-D)*(0.5/(sigma2*sigma2)) varsqform = 1.0/(expectedinfo00 - expectedinfo10*expectedinfo10/expectedinfo11) #if 1: # XXi=LA.inv(X.T.dot(X)) # Px=(sp.eye(N)-X.dot(XXi).dot(X.T))/sigma2 #pdb.set_trace() GPG/=sigma2*2.0 #what we will take eigenvalues of for Davies (which is P^1/2*K*P^1/2) #for debugging, explicitly compute GPG=P^1/2 * K * P^1/2 #SigInv=(1/sigma2)*sp.eye(N,N) #Phat=X.dot(LA.inv(X.T.dot(SigInv).dot(X))).dot(X.T).dot(SigInv) #PP=SigInv.dot(sp.eye(N,N)-Phat) #K=G.dot(G.T) #PKP=PP.dot(K).dot(PP) #ss.stats(PKP-PKP.T) ##eigvalsFull=LA.eigh(PKP,eigvals_only=True) #eigvalsFull2=LA.eigvals(PKP) #eigvalsLow =LA.eigh(GPG,eigvals_only=True) #GPG=PKP*0.5 #pdb.set_trace() return squaredform, expectationsqform, varsqform, GPG, GtRxY*(0.25/sigma2) if __name__ == "__main__": if 1:#example p-value computation for sample data #specify the directory that contains the data datadir = "data"#os.path.join('twokerneltest','data') #specify the directory that contains the alternative models in form of ped files datadiralt = os.path.join(datadir,'altmodels') pedfilesalt = glob.glob(os.path.join(datadiralt, '*.ped')) for i in xrange(len(pedfilesalt)): pedfilesalt[i]=pedfilesalt[i][0:-4] phenofile = os.path.join(datadir,'phen.N1000.M5000.txt') covarfile = os.path.join(datadir,'covariates.N1000.M5000.txt') #base0 = os.path.join(datadir,'snpDat.N1000.M5000.20Snps') base0 = os.path.join(datadir,'snpDat.N1000.M5000.10_20Snps') #specify index of the phenotype to be tested ipheno = 0 #only one phenotype in file, use the first one #exclusion parameters (correction for proximal contamination) mindist = 10 #minimum distance to alternative SNPs to be included in null model idist = 2 #use genetic distance #idist = 3 #use basepair distance #run the example logging.info(('\n\n\nrunning real data example') ) logging.info(('base file of null model: %s' % base0)) logging.info(('testing all SNP sets in %s' % datadiralt)) result = testPedfilesFromDir(phenofile, base0, pedfilesalt, ipheno=ipheno, mindist = mindist, idist=idist, covarfile = covarfile)
apache-2.0
idea4bsd/idea4bsd
python/helpers/pydev/pydevconsole.py
1
17708
''' Entry point module to start the interactive console. ''' from _pydev_imps._pydev_saved_modules import thread start_new_thread = thread.start_new_thread try: from code import InteractiveConsole except ImportError: from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole from code import compile_command from code import InteractiveInterpreter import os import sys from _pydev_imps._pydev_saved_modules import threading import traceback from _pydev_bundle import fix_getpass fix_getpass.fix_getpass() from _pydevd_bundle import pydevd_vars, pydevd_save_locals from _pydev_bundle.pydev_imports import Exec, _queue try: import __builtin__ except: import builtins as __builtin__ # @UnresolvedImport try: False True except NameError: # version < 2.3 -- didn't have the True/False builtins import __builtin__ setattr(__builtin__, 'True', 1) #Python 3.0 does not accept __builtin__.True = 1 in its syntax setattr(__builtin__, 'False', 0) from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn, set_result_ipython_value from _pydev_bundle.pydev_console_utils import CodeFragment IS_PYTHON_3K = False IS_PY24 = False try: if sys.version_info[0] == 3: IS_PYTHON_3K = True elif sys.version_info[0] == 2 and sys.version_info[1] == 4: IS_PY24 = True except: #That's OK, not all versions of python have sys.version_info pass class Command: def __init__(self, interpreter, code_fragment): """ :type code_fragment: CodeFragment :type interpreter: InteractiveConsole """ self.interpreter = interpreter self.code_fragment = code_fragment self.more = None def symbol_for_fragment(code_fragment): if code_fragment.is_single_line: symbol = 'single' else: symbol = 'exec' # Jython doesn't support this return symbol symbol_for_fragment = staticmethod(symbol_for_fragment) def run(self): text = self.code_fragment.text symbol = self.symbol_for_fragment(self.code_fragment) self.more = self.interpreter.runsource(text, '<input>', symbol) try: try: execfile #Not in Py3k except NameError: from _pydev_bundle.pydev_imports import execfile __builtin__.execfile = execfile except: pass # Pull in runfile, the interface to UMD that wraps execfile from _pydev_bundle.pydev_umd import runfile, _set_globals_function try: import builtins # @UnresolvedImport builtins.runfile = runfile except: import __builtin__ __builtin__.runfile = runfile #======================================================================================================================= # InterpreterInterface #======================================================================================================================= class InterpreterInterface(BaseInterpreterInterface): ''' The methods in this class should be registered in the xml-rpc server. ''' def __init__(self, host, client_port, mainThread, show_banner=True): BaseInterpreterInterface.__init__(self, mainThread) self.client_port = client_port self.host = host self.namespace = {} self.interpreter = InteractiveConsole(self.namespace) self._input_error_printed = False def do_add_exec(self, codeFragment): command = Command(self.interpreter, codeFragment) command.run() return command.more def get_namespace(self): return self.namespace def getCompletions(self, text, act_tok): try: from _pydev_bundle._pydev_completer import Completer completer = Completer(self.namespace, None) return completer.complete(act_tok) except: import traceback traceback.print_exc() return [] def close(self): sys.exit(0) def get_greeting_msg(self): return 'PyDev console: starting.\n' class _ProcessExecQueueHelper: _debug_hook = None _return_control_osc = False def set_debug_hook(debug_hook): _ProcessExecQueueHelper._debug_hook = debug_hook def process_exec_queue(interpreter): from pydev_ipython.inputhook import get_inputhook, set_return_control_callback def return_control(): ''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find out if they should cede control and return ''' if _ProcessExecQueueHelper._debug_hook: # Some of the input hooks check return control without doing # a single operation, so we don't return True on every # call when the debug hook is in place to allow the GUI to run # XXX: Eventually the inputhook code will have diverged enough # from the IPython source that it will be worthwhile rewriting # it rather than pretending to maintain the old API _ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc if _ProcessExecQueueHelper._return_control_osc: return True if not interpreter.exec_queue.empty(): return True return False set_return_control_callback(return_control) from _pydev_bundle.pydev_import_hook import import_hook_manager from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot import_hook_manager.add_module_name("matplotlib", lambda: activate_matplotlib(interpreter.enableGui)) # enable_gui_function in activate_matplotlib should be called in main thread. That's why we call # interpreter.enableGui which put it into the interpreter's exec_queue and executes it in the main thread. import_hook_manager.add_module_name("pylab", activate_pylab) import_hook_manager.add_module_name("pyplot", activate_pyplot) while 1: # Running the request may have changed the inputhook in use inputhook = get_inputhook() if _ProcessExecQueueHelper._debug_hook: _ProcessExecQueueHelper._debug_hook() if inputhook: try: # Note: it'll block here until return_control returns True. inputhook() except: import traceback;traceback.print_exc() try: try: code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second except _queue.Empty: continue if hasattr(code_fragment, '__call__'): # It can be a callable (i.e.: something that must run in the main # thread can be put in the queue for later execution). code_fragment() else: more = interpreter.add_exec(code_fragment) except KeyboardInterrupt: interpreter.buffer = None continue except SystemExit: raise except: type, value, tb = sys.exc_info() traceback.print_exception(type, value, tb, file=sys.__stderr__) exit() if 'IPYTHONENABLE' in os.environ: IPYTHON = os.environ['IPYTHONENABLE'] == 'True' else: IPYTHON = True try: try: exitfunc = sys.exitfunc except AttributeError: exitfunc = None if IPYTHON: from _pydev_bundle.pydev_ipython_console import InterpreterInterface if exitfunc is not None: sys.exitfunc = exitfunc else: try: delattr(sys, 'exitfunc') except: pass except: IPYTHON = False pass set_result_ipython_value(IPYTHON) #======================================================================================================================= # _DoExit #======================================================================================================================= def do_exit(*args): ''' We have to override the exit because calling sys.exit will only actually exit the main thread, and as we're in a Xml-rpc server, that won't work. ''' try: import java.lang.System java.lang.System.exit(1) except ImportError: if len(args) == 1: os._exit(args[0]) else: os._exit(0) def handshake(): return "PyCharm" #======================================================================================================================= # start_console_server #======================================================================================================================= def start_console_server(host, port, interpreter): if port == 0: host = '' #I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse. from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport try: if IS_PY24: server = XMLRPCServer((host, port), logRequests=False) else: server = XMLRPCServer((host, port), logRequests=False, allow_none=True) except: sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port)) sys.stderr.flush() raise # Tell UMD the proper default namespace _set_globals_function(interpreter.get_namespace) server.register_function(interpreter.execLine) server.register_function(interpreter.execMultipleLines) server.register_function(interpreter.getCompletions) server.register_function(interpreter.getFrame) server.register_function(interpreter.getVariable) server.register_function(interpreter.changeVariable) server.register_function(interpreter.getDescription) server.register_function(interpreter.close) server.register_function(interpreter.interrupt) server.register_function(handshake) server.register_function(interpreter.connectToDebugger) server.register_function(interpreter.hello) server.register_function(interpreter.getArray) server.register_function(interpreter.evaluate) # Functions for GUI main loop integration server.register_function(interpreter.enableGui) if port == 0: (h, port) = server.socket.getsockname() print(port) print(interpreter.client_port) sys.stderr.write(interpreter.get_greeting_msg()) sys.stderr.flush() while True: try: server.serve_forever() except: # Ugly code to be py2/3 compatible # https://sw-brainwy.rhcloud.com/tracker/PyDev/534: # Unhandled "interrupted system call" error in the pydevconsol.py e = sys.exc_info()[1] retry = False try: retry = e.args[0] == 4 #errno.EINTR except: pass if not retry: raise # Otherwise, keep on going return server def start_server(host, port, client_port): #replace exit (see comments on method) #note that this does not work in jython!!! (sys method can't be replaced). sys.exit = do_exit interpreter = InterpreterInterface(host, client_port, threading.currentThread()) start_new_thread(start_console_server,(host, port, interpreter)) process_exec_queue(interpreter) def get_ipython_hidden_vars_dict(): useful_ipython_vars = ['_', '__'] try: if IPYTHON and hasattr(__builtin__, 'interpreter'): pydev_interpreter = get_interpreter().interpreter if hasattr(pydev_interpreter, 'ipython') and hasattr(pydev_interpreter.ipython, 'user_ns_hidden'): res_dict = dict([(key, val) for key, val in pydev_interpreter.ipython.user_ns_hidden.items() if key not in useful_ipython_vars]) return res_dict return None except Exception: traceback.print_exc() return None def get_interpreter(): try: interpreterInterface = getattr(__builtin__, 'interpreter') except AttributeError: interpreterInterface = InterpreterInterface(None, None, threading.currentThread()) setattr(__builtin__, 'interpreter', interpreterInterface) sys.stderr.write(interpreterInterface.get_greeting_msg()) sys.stderr.flush() return interpreterInterface def get_completions(text, token, globals, locals): interpreterInterface = get_interpreter() interpreterInterface.interpreter.update(globals, locals) return interpreterInterface.getCompletions(text, token) #=============================================================================== # Debugger integration #=============================================================================== def exec_code(code, globals, locals, debugger): interpreterInterface = get_interpreter() interpreterInterface.interpreter.update(globals, locals) res = interpreterInterface.need_more(code) if res: return True interpreterInterface.add_exec(code, debugger) return False class ConsoleWriter(InteractiveInterpreter): skip = 0 def __init__(self, locals=None): InteractiveInterpreter.__init__(self, locals) def write(self, data): #if (data.find("global_vars") == -1 and data.find("pydevd") == -1): if self.skip > 0: self.skip -= 1 else: if data == "Traceback (most recent call last):\n": self.skip = 1 sys.stderr.write(data) def showsyntaxerror(self, filename=None): """Display the syntax error that just occurred.""" #Override for avoid using sys.excepthook PY-12600 type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: msg, (dummy_filename, lineno, offset, line) = value.args except ValueError: # Not the format we expect; leave it alone pass else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) sys.last_value = value list = traceback.format_exception_only(type, value) sys.stderr.write(''.join(list)) def showtraceback(self): """Display the exception that just occurred.""" #Override for avoid using sys.excepthook PY-12600 try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] lines = traceback.format_list(tblist) if lines: lines.insert(0, "Traceback (most recent call last):\n") lines.extend(traceback.format_exception_only(type, value)) finally: tblist = tb = None sys.stderr.write(''.join(lines)) def console_exec(thread_id, frame_id, expression, dbg): """returns 'False' in case expression is partially correct """ frame = pydevd_vars.find_frame(thread_id, frame_id) expression = str(expression.replace('@LINE@', '\n')) #Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 #(Names not resolved in generator expression in method) #See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals if IPYTHON: need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg) if not need_more: pydevd_save_locals.save_locals(frame) return need_more interpreter = ConsoleWriter() try: code = compile_command(expression) except (OverflowError, SyntaxError, ValueError): # Case 1 interpreter.showsyntaxerror() return False if code is None: # Case 2 return True #Case 3 try: Exec(code, updated_globals, frame.f_locals) except SystemExit: raise except: interpreter.showtraceback() else: pydevd_save_locals.save_locals(frame) return False #======================================================================================================================= # main #======================================================================================================================= if __name__ == '__main__': #Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole #so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple #representations of its classes). #See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446: #'Variables' and 'Expressions' views stopped working when debugging interactive console import pydevconsole sys.stdin = pydevconsole.BaseStdIn(sys.stdin) port, client_port = sys.argv[1:3] from _pydev_bundle import pydev_localhost if int(port) == 0 and int(client_port) == 0: (h, p) = pydev_localhost.get_socket_name() client_port = p pydevconsole.start_server(pydev_localhost.get_localhost(), int(port), int(client_port))
apache-2.0
KennyCandy/HAR
module45/CCCPCC_32_32.py
2
18310
# Note that the dataset must be already downloaded for this script to work, do: # $ cd data/ # $ python download_dataset.py # quoc_trinh import tensorflow as tf import numpy as np import matplotlib import matplotlib.pyplot as plt from sklearn import metrics import os import sys import datetime # get current file_name as [0] of array file_name = os.path.splitext(os.path.basename(sys.argv[0]))[0] print(" File Name:") print(file_name) print("") # FLAG to know that whether this is traning process or not. FLAG = 'train' POOL_X = 16 POOL_Y = 18 N_HIDDEN_CONFIG = 32 save_path_name = file_name + "/model.ckpt" print(datetime.datetime.now()) # Write to file: time to start, type, time to end f = open(file_name + '/time.txt', 'a+') f.write("------------- \n") f.write("This is time \n") f.write("Started at \n") f.write(str(datetime.datetime.now())+'\n') if __name__ == "__main__": # ----------------------------- # step1: load and prepare data # ----------------------------- # Those are separate normalised input features for the neural network INPUT_SIGNAL_TYPES = [ "body_acc_x_", "body_acc_y_", "body_acc_z_", "body_gyro_x_", "body_gyro_y_", "body_gyro_z_", "total_acc_x_", "total_acc_y_", "total_acc_z_" ] # Output classes to learn how to classify LABELS = [ "WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING" ] DATA_PATH = "../data/" DATASET_PATH = DATA_PATH + "UCI HAR Dataset/" print("\n" + "Dataset is now located at: " + DATASET_PATH) # Preparing data set: TRAIN = "train/" TEST = "test/" # Load "X" (the neural network's training and testing inputs) def load_X(X_signals_paths): X_signals = [] for signal_type_path in X_signals_paths: file = open(signal_type_path, 'rb') # Read dataset from disk, dealing with text files' syntax X_signals.append( [np.array(serie, dtype=np.float32) for serie in [ row.replace(' ', ' ').strip().split(' ') for row in file ]] ) file.close() """Examples -------- >> > x = np.arange(4).reshape((2, 2)) >> > x array([[0, 1], [2, 3]]) >> > np.transpose(x) array([[0, 2], [1, 3]]) >> > x = np.ones((1, 2, 3)) >> > np.transpose(x, (1, 0, 2)).shape (2, 1, 3) """ return np.transpose(np.array(X_signals), (1, 2, 0)) X_train_signals_paths = [ DATASET_PATH + TRAIN + "Inertial Signals/" + signal + "train.txt" for signal in INPUT_SIGNAL_TYPES ] X_test_signals_paths = [ DATASET_PATH + TEST + "Inertial Signals/" + signal + "test.txt" for signal in INPUT_SIGNAL_TYPES ] X_train = load_X(X_train_signals_paths) # [7352, 128, 9] X_test = load_X(X_test_signals_paths) # [7352, 128, 9] # print(X_train) print(len(X_train)) # 7352 print(len(X_train[0])) # 128 print(len(X_train[0][0])) # 9 print(type(X_train)) X_train = np.reshape(X_train, [-1, 32, 36]) X_test = np.reshape(X_test, [-1, 32, 36]) print("-----------------X_train---------------") # print(X_train) print(len(X_train)) # 7352 print(len(X_train[0])) # 32 print(len(X_train[0][0])) # 36 print(type(X_train)) # exit() y_train_path = DATASET_PATH + TRAIN + "y_train.txt" y_test_path = DATASET_PATH + TEST + "y_test.txt" def one_hot(label): """convert label from dense to one hot argument: label: ndarray dense label ,shape: [sample_num,1] return: one_hot_label: ndarray one hot, shape: [sample_num,n_class] """ label_num = len(label) new_label = label.reshape(label_num) # shape : [sample_num] # because max is 5, and we will create 6 columns n_values = np.max(new_label) + 1 return np.eye(n_values)[np.array(new_label, dtype=np.int32)] # Load "y" (the neural network's training and testing outputs) def load_y(y_path): file = open(y_path, 'rb') # Read dataset from disk, dealing with text file's syntax y_ = np.array( [elem for elem in [ row.replace(' ', ' ').strip().split(' ') for row in file ]], dtype=np.int32 ) file.close() # Subtract 1 to each output class for friendly 0-based indexing return y_ - 1 y_train = one_hot(load_y(y_train_path)) y_test = one_hot(load_y(y_test_path)) print("---------y_train----------") # print(y_train) print(len(y_train)) # 7352 print(len(y_train[0])) # 6 # ----------------------------------- # step2: define parameters for model # ----------------------------------- class Config(object): """ define a class to store parameters, the input should be feature mat of training and testing """ def __init__(self, X_train, X_test): # Input data self.train_count = len(X_train) # 7352 training series self.test_data_count = len(X_test) # 2947 testing series self.n_steps = len(X_train[0]) # 128 time_steps per series # Training self.learning_rate = 0.0025 self.lambda_loss_amount = 0.0015 self.training_epochs = 300 self.batch_size = 1000 # LSTM structure self.n_inputs = len(X_train[0][0]) # Features count is of 9: three 3D sensors features over time self.n_hidden = N_HIDDEN_CONFIG # nb of neurons inside the neural network self.n_classes = 6 # Final output classes self.W = { 'hidden': tf.Variable(tf.random_normal([self.n_inputs, self.n_hidden])), # [9, 32] 'output': tf.Variable(tf.random_normal([self.n_hidden, self.n_classes])) # [32, 6] } self.biases = { 'hidden': tf.Variable(tf.random_normal([self.n_hidden], mean=1.0)), # [32] 'output': tf.Variable(tf.random_normal([self.n_classes])) # [6] } config = Config(X_train, X_test) # print("Some useful info to get an insight on dataset's shape and normalisation:") # print("features shape, labels shape, each features mean, each features standard deviation") # print(X_test.shape, y_test.shape, # np.mean(X_test), np.std(X_test)) # print("the dataset is therefore properly normalised, as expected.") # # # ------------------------------------------------------ # step3: Let's get serious and build the neural network # ------------------------------------------------------ # [none, 128, 9] X = tf.placeholder(tf.float32, [None, config.n_steps, config.n_inputs]) # [none, 6] Y = tf.placeholder(tf.float32, [None, config.n_classes]) print("-------X Y----------") print(X) X = tf.reshape(X, shape=[-1, 32, 36]) print(X) print(Y) Y = tf.reshape(Y, shape=[-1, 6]) print(Y) # Weight Initialization def weight_variable(shape): # tra ve 1 gia tri random theo thuat toan truncated_ normal initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1, dtype=tf.float32) return tf.Variable(initial) def bias_varibale(shape): initial = tf.constant(0.1, shape=shape, name='Bias') return tf.Variable(initial) # Convolution and Pooling def conv2d(x, W): # Must have `strides[0] = strides[3] = 1 `. # For the most common case of the same horizontal and vertices strides, `strides = [1, stride, stride, 1] `. return tf.nn.conv2d(input=x, filter=W, strides=[1, 1, 1, 1], padding='SAME', name='conv_2d') def max_pool_2x2(x): return tf.nn.max_pool(value=x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='max_pool') def LSTM_Network(feature_mat, config): """model a LSTM Network, it stacks 2 LSTM layers, each layer has n_hidden=32 cells and 1 output layer, it is a full connet layer argument: feature_mat: ndarray feature matrix, shape=[batch_size,time_steps,n_inputs] config: class containing config of network return: : matrix output shape [batch_size,n_classes] """ W_conv1 = weight_variable([3, 3, 1, 32]) b_conv1 = bias_varibale([32]) # x_image = tf.reshape(x, shape=[-1, 28, 28, 1]) feature_mat_image = tf.reshape(feature_mat, shape=[-1, 32, 36, 1]) print("----feature_mat_image-----") print(feature_mat_image.get_shape()) h_conv1 = tf.nn.relu(conv2d(feature_mat_image, W_conv1) + b_conv1) h_pool1 = h_conv1 # Second Convolutional Layer W_conv2 = weight_variable([3, 3, 32, 32]) b_conv2 = weight_variable([32]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = h_conv2 # Third Convolutional Layer W_conv3 = weight_variable([3, 3, 32, 32]) b_conv3 = weight_variable([32]) h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3) h_pool3 = max_pool_2x2(h_conv3) # Forth Convolutional Layer W_conv4 = weight_variable([3, 3, 32, 128]) b_conv4 = weight_variable([128]) h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4) h_pool4 = (h_conv4) # Fifth Convolutional Layer W_conv5 = weight_variable([3, 3, 128, 128]) b_conv5 = weight_variable([128]) h_conv5 = tf.nn.relu(conv2d(h_pool4, W_conv5) + b_conv5) h_pool5 = h_conv5 # Sixth Convolutional Layer W_conv6 = weight_variable([3, 3, 128, 1]) b_conv6 = weight_variable([1]) h_conv6 = tf.nn.relu(conv2d(h_pool5, W_conv6) + b_conv6) h_pool6 = h_conv6 h_pool6 = tf.reshape(h_pool6, shape=[-1, POOL_X, POOL_Y]) feature_mat = h_pool6 print("----feature_mat-----") print(feature_mat) # exit() # W_fc1 = weight_variable([8 * 9 * 1, 1024]) # b_fc1 = bias_varibale([1024]) # h_pool2_flat = tf.reshape(h_pool2, [-1, 8 * 9 * 1]) # h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # print("----h_fc1_drop-----") # print(h_fc1) # exit() # # # keep_prob = tf.placeholder(tf.float32) # keep_prob = tf.placeholder(1.0) # h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=keep_prob) # print("----h_fc1_drop-----") # print(h_fc1_drop) # exit() # # W_fc2 = weight_variable([1024, 10]) # b_fc2 = bias_varibale([10]) # # y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # print("----y_conv-----") # print(y_conv) # exit() # Exchange dim 1 and dim 0 # Start at: [0,1,2] = [batch_size, 128, 9] => [batch_size, 32, 36] feature_mat = tf.transpose(feature_mat, [1, 0, 2]) # New feature_mat's shape: [time_steps, batch_size, n_inputs] [128, batch_size, 9] print("----feature_mat-----") print(feature_mat) # exit() # Temporarily crush the feature_mat's dimensions feature_mat = tf.reshape(feature_mat, [-1, config.n_inputs]) # 9 # New feature_mat's shape: [time_steps*batch_size, n_inputs] # 128 * batch_size, 9 # Linear activation, reshaping inputs to the LSTM's number of hidden: hidden = tf.nn.relu(tf.matmul( feature_mat, config.W['hidden'] ) + config.biases['hidden']) # New feature_mat (hidden) shape: [time_steps*batch_size, n_hidden] [128*batch_size, 32] print("--n_steps--") print(config.n_steps) print("--hidden--") print(hidden) # Split the series because the rnn cell needs time_steps features, each of shape: hidden = tf.split(0, config.n_steps/4, hidden) # (0, 128, [128*batch_size, 32]) # New hidden's shape: a list of length "time_step" containing tensors of shape [batch_size, n_hidden] # Define LSTM cell of first hidden layer: lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(config.n_hidden, forget_bias=1.0) # Stack two LSTM layers, both layers has the same shape lsmt_layers = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * 2) # Get LSTM outputs, the states are internal to the LSTM cells,they are not our attention here outputs, _ = tf.nn.rnn(lsmt_layers, hidden, dtype=tf.float32) # outputs' shape: a list of lenght "time_step" containing tensors of shape [batch_size, n_hidden] print("------------------list-------------------") print(outputs) # Get last time step's output feature for a "many to one" style classifier, # as in the image describing RNNs at the top of this page lstm_last_output = outputs[-1] # Get the last element of the array: [?, 32] print("------------------last outputs-------------------") print (lstm_last_output) # Linear activation return tf.matmul(lstm_last_output, config.W['output']) + config.biases['output'] pred_Y = LSTM_Network(X, config) # shape[?,6] print("------------------pred_Y-------------------") print(pred_Y) # Loss,train_step,evaluation l2 = config.lambda_loss_amount * \ sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) # Softmax loss and L2 cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(pred_Y, Y)) + l2 train_step = tf.train.AdamOptimizer( learning_rate=config.learning_rate).minimize(cost) correct_prediction = tf.equal(tf.argmax(pred_Y, 1), tf.argmax(Y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32)) # -------------------------------------------- # step4: Hooray, now train the neural network # -------------------------------------------- # Note that log_device_placement can be turned ON but will cause console spam. # Initializing the variables init = tf.initialize_all_variables() # Add ops to save and restore all the variables. saver = tf.train.Saver() best_accuracy = 0.0 # sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=False)) if (FLAG == 'train') : # If it is the training mode with tf.Session() as sess: # tf.initialize_all_variables().run() sess.run(init) # .run() f.write("---Save model \n") # Start training for each batch and loop epochs for i in range(config.training_epochs): for start, end in zip(range(0, config.train_count, config.batch_size), # (0, 7352, 1500) range(config.batch_size, config.train_count + 1, config.batch_size)): # (1500, 7353, 1500) print(start) print(end) sess.run(train_step, feed_dict={X: X_train[start:end], Y: y_train[start:end]}) # Test completely at every epoch: calculate accuracy pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={ X: X_test, Y: y_test}) print("traing iter: {},".format(i) + \ " test accuracy : {},".format(accuracy_out) + \ " loss : {}".format(loss_out)) best_accuracy = max(best_accuracy, accuracy_out) # Save the model in this session save_path = saver.save(sess, file_name + "/model.ckpt") print("Model saved in file: %s" % save_path) print("") print("final loss: {}").format(loss_out) print("final test accuracy: {}".format(accuracy_out)) print("best epoch's test accuracy: {}".format(best_accuracy)) print("") # Write all output to file f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") else : # Running a new session print("Starting 2nd session...") with tf.Session() as sess: # Initialize variables sess.run(init) f.write("---Restore model \n") # Restore model weights from previously saved model saver.restore(sess, file_name+ "/model.ckpt") print("Model restored from file: %s" % save_path_name) # Test completely at every epoch: calculate accuracy pred_out, accuracy_out, loss_out = sess.run([pred_Y, accuracy, cost], feed_dict={ X: X_test, Y: y_test}) # print("traing iter: {}," + \ # " test accuracy : {},".format(accuracy_out) + \ # " loss : {}".format(loss_out)) best_accuracy = max(best_accuracy, accuracy_out) print("") print("final loss: {}").format(loss_out) print("final test accuracy: {}".format(accuracy_out)) print("best epoch's test accuracy: {}".format(best_accuracy)) print("") # Write all output to file f.write("final loss:" + str(format(loss_out)) +" \n") f.write("final test accuracy:" + str(format(accuracy_out)) +" \n") f.write("best epoch's test accuracy:" + str(format(best_accuracy)) + " \n") # # #------------------------------------------------------------------ # # step5: Training is good, but having visual insight is even better # #------------------------------------------------------------------ # # The code is in the .ipynb # # #------------------------------------------------------------------ # # step6: And finally, the multi-class confusion matrix and metrics! # #------------------------------------------------------------------ # # The code is in the .ipynb f.write("Ended at \n") f.write(str(datetime.datetime.now())+'\n') f.write("------------- \n") f.close()
mit
arabenjamin/scikit-learn
sklearn/utils/fixes.py
133
12882
"""Compatibility fixes for older version of python, numpy and scipy If you add content to this file, please give the version of the package at which the fixe is no longer needed. """ # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Fabian Pedregosa <fpedregosa@acm.org> # Lars Buitinck # # License: BSD 3 clause import inspect import warnings import sys import functools import os import errno import numpy as np import scipy.sparse as sp import scipy def _parse_version(version_string): version = [] for x in version_string.split('.'): try: version.append(int(x)) except ValueError: # x may be of the form dev-1ea1592 version.append(x) return tuple(version) np_version = _parse_version(np.__version__) sp_version = _parse_version(scipy.__version__) try: from scipy.special import expit # SciPy >= 0.10 with np.errstate(invalid='ignore', over='ignore'): if np.isnan(expit(1000)): # SciPy < 0.14 raise ImportError("no stable expit in scipy.special") except ImportError: def expit(x, out=None): """Logistic sigmoid function, ``1 / (1 + exp(-x))``. See sklearn.utils.extmath.log_logistic for the log of this function. """ if out is None: out = np.empty(np.atleast_1d(x).shape, dtype=np.float64) out[:] = x # 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2 # This way of computing the logistic is both fast and stable. out *= .5 np.tanh(out, out) out += 1 out *= .5 return out.reshape(np.shape(x)) # little danse to see if np.copy has an 'order' keyword argument if 'order' in inspect.getargspec(np.copy)[0]: def safe_copy(X): # Copy, but keep the order return np.copy(X, order='K') else: # Before an 'order' argument was introduced, numpy wouldn't muck with # the ordering safe_copy = np.copy try: if (not np.allclose(np.divide(.4, 1, casting="unsafe"), np.divide(.4, 1, casting="unsafe", dtype=np.float)) or not np.allclose(np.divide(.4, 1), .4)): raise TypeError('Divide not working with dtype: ' 'https://github.com/numpy/numpy/issues/3484') divide = np.divide except TypeError: # Compat for old versions of np.divide that do not provide support for # the dtype args def divide(x1, x2, out=None, dtype=None): out_orig = out if out is None: out = np.asarray(x1, dtype=dtype) if out is x1: out = x1.copy() else: if out is not x1: out[:] = x1 if dtype is not None and out.dtype != dtype: out = out.astype(dtype) out /= x2 if out_orig is None and np.isscalar(x1): out = np.asscalar(out) return out try: np.array(5).astype(float, copy=False) except TypeError: # Compat where astype accepted no copy argument def astype(array, dtype, copy=True): if not copy and array.dtype == dtype: return array return array.astype(dtype) else: astype = np.ndarray.astype try: with warnings.catch_warnings(record=True): # Don't raise the numpy deprecation warnings that appear in # 1.9, but avoid Python bug due to simplefilter('ignore') warnings.simplefilter('always') sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0) except (TypeError, AttributeError): # in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument # the following code is taken from the scipy 0.14 codebase def _minor_reduce(X, ufunc): major_index = np.flatnonzero(np.diff(X.indptr)) if X.data.size == 0 and major_index.size == 0: # Numpy < 1.8.0 don't handle empty arrays in reduceat value = np.zeros_like(X.data) else: value = ufunc.reduceat(X.data, X.indptr[major_index]) return major_index, value def _min_or_max_axis(X, axis, min_or_max): N = X.shape[axis] if N == 0: raise ValueError("zero-size array to reduction operation") M = X.shape[1 - axis] mat = X.tocsc() if axis == 0 else X.tocsr() mat.sum_duplicates() major_index, value = _minor_reduce(mat, min_or_max) not_full = np.diff(mat.indptr)[major_index] < N value[not_full] = min_or_max(value[not_full], 0) mask = value != 0 major_index = np.compress(mask, major_index) value = np.compress(mask, value) from scipy.sparse import coo_matrix if axis == 0: res = coo_matrix((value, (np.zeros(len(value)), major_index)), dtype=X.dtype, shape=(1, M)) else: res = coo_matrix((value, (major_index, np.zeros(len(value)))), dtype=X.dtype, shape=(M, 1)) return res.A.ravel() def _sparse_min_or_max(X, axis, min_or_max): if axis is None: if 0 in X.shape: raise ValueError("zero-size array to reduction operation") zero = X.dtype.type(0) if X.nnz == 0: return zero m = min_or_max.reduce(X.data.ravel()) if X.nnz != np.product(X.shape): m = min_or_max(zero, m) return m if axis < 0: axis += 2 if (axis == 0) or (axis == 1): return _min_or_max_axis(X, axis, min_or_max) else: raise ValueError("invalid axis, use 0 for rows, or 1 for columns") def sparse_min_max(X, axis): return (_sparse_min_or_max(X, axis, np.minimum), _sparse_min_or_max(X, axis, np.maximum)) else: def sparse_min_max(X, axis): return (X.min(axis=axis).toarray().ravel(), X.max(axis=axis).toarray().ravel()) try: from numpy import argpartition except ImportError: # numpy.argpartition was introduced in v 1.8.0 def argpartition(a, kth, axis=-1, kind='introselect', order=None): return np.argsort(a, axis=axis, order=order) try: from itertools import combinations_with_replacement except ImportError: # Backport of itertools.combinations_with_replacement for Python 2.6, # from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright # Python Software Foundation (https://docs.python.org/3/license.html) def combinations_with_replacement(iterable, r): # combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC pool = tuple(iterable) n = len(pool) if not n and r: return indices = [0] * r yield tuple(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != n - 1: break else: return indices[i:] = [indices[i] + 1] * (r - i) yield tuple(pool[i] for i in indices) try: from numpy import isclose except ImportError: def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): """ Returns a boolean array where two arrays are element-wise equal within a tolerance. This function was added to numpy v1.7.0, and the version you are running has been backported from numpy v1.8.1. See its documentation for more details. """ def within_tol(x, y, atol, rtol): with np.errstate(invalid='ignore'): result = np.less_equal(abs(x - y), atol + rtol * abs(y)) if np.isscalar(a) and np.isscalar(b): result = bool(result) return result x = np.array(a, copy=False, subok=True, ndmin=1) y = np.array(b, copy=False, subok=True, ndmin=1) xfin = np.isfinite(x) yfin = np.isfinite(y) if all(xfin) and all(yfin): return within_tol(x, y, atol, rtol) else: finite = xfin & yfin cond = np.zeros_like(finite, subok=True) # Since we're using boolean indexing, x & y must be the same shape. # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in # lib.stride_tricks, though, so we can't import it here. x = x * np.ones_like(cond) y = y * np.ones_like(cond) # Avoid subtraction with infinite/nan values... cond[finite] = within_tol(x[finite], y[finite], atol, rtol) # Check for equality of infinite values... cond[~finite] = (x[~finite] == y[~finite]) if equal_nan: # Make NaN == NaN cond[np.isnan(x) & np.isnan(y)] = True return cond if np_version < (1, 7): # Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg. def frombuffer_empty(buf, dtype): if len(buf) == 0: return np.empty(0, dtype=dtype) else: return np.frombuffer(buf, dtype=dtype) else: frombuffer_empty = np.frombuffer if np_version < (1, 8): def in1d(ar1, ar2, assume_unique=False, invert=False): # Backport of numpy function in1d 1.8.1 to support numpy 1.6.2 # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # This code is significantly faster when the condition is satisfied. if len(ar2) < 10 * len(ar1) ** 0.145: if invert: mask = np.ones(len(ar1), dtype=np.bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=np.bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) indx = order.argsort(kind='mergesort')[:len(ar1)] if assume_unique: return flag[indx] else: return flag[indx][rev_idx] else: from numpy import in1d if sp_version < (0, 15): # Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142 from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr else: from scipy.sparse.linalg import lsqr as sparse_lsqr if sys.version_info < (2, 7, 0): # partial cannot be pickled in Python 2.6 # http://bugs.python.org/issue1398 class partial(object): def __init__(self, func, *args, **keywords): functools.update_wrapper(self, func) self.func = func self.args = args self.keywords = keywords def __call__(self, *args, **keywords): args = self.args + args kwargs = self.keywords.copy() kwargs.update(keywords) return self.func(*args, **kwargs) else: from functools import partial if np_version < (1, 6, 2): # Allow bincount to accept empty arrays # https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040 def bincount(x, weights=None, minlength=None): if len(x) > 0: return np.bincount(x, weights, minlength) else: if minlength is None: minlength = 0 minlength = np.asscalar(np.asarray(minlength, dtype=np.intp)) return np.zeros(minlength, dtype=np.intp) else: from numpy import bincount if 'exist_ok' in inspect.getargspec(os.makedirs).args: makedirs = os.makedirs else: def makedirs(name, mode=0o777, exist_ok=False): """makedirs(name [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory already exists, raise an OSError if exist_ok is False. Otherwise no exception is raised. This is recursive. """ try: os.makedirs(name, mode=mode) except OSError as e: if (not exist_ok or e.errno != errno.EEXIST or not os.path.isdir(name)): raise
bsd-3-clause
alephu5/Soundbyte
environment/lib/python3.3/site-packages/pandas/io/tests/test_excel.py
1
44576
# pylint: disable=E1101 from pandas.compat import u, range, map from datetime import datetime, date import os import nose from numpy import nan import numpy as np from pandas import DataFrame, Index, MultiIndex from pandas.io.parsers import read_csv from pandas.io.excel import ( ExcelFile, ExcelWriter, read_excel, _XlwtWriter, _OpenpyxlWriter, register_writer, _XlsxWriter ) from pandas.util.testing import ensure_clean from pandas.core.config import set_option, get_option import pandas.util.testing as tm import pandas as pd def _skip_if_no_xlrd(): try: import xlrd ver = tuple(map(int, xlrd.__VERSION__.split(".")[:2])) if ver < (0, 9): raise nose.SkipTest('xlrd < 0.9, skipping') except ImportError: raise nose.SkipTest('xlrd not installed, skipping') def _skip_if_no_xlwt(): try: import xlwt # NOQA except ImportError: raise nose.SkipTest('xlwt not installed, skipping') def _skip_if_no_openpyxl(): try: import openpyxl # NOQA except ImportError: raise nose.SkipTest('openpyxl not installed, skipping') def _skip_if_no_xlsxwriter(): try: import xlsxwriter # NOQA except ImportError: raise nose.SkipTest('xlsxwriter not installed, skipping') def _skip_if_no_excelsuite(): _skip_if_no_xlrd() _skip_if_no_xlwt() _skip_if_no_openpyxl() _seriesd = tm.getSeriesData() _tsd = tm.getTimeSeriesData() _frame = DataFrame(_seriesd)[:10] _frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])[:10] _tsframe = tm.makeTimeDataFrame()[:5] _mixed_frame = _frame.copy() _mixed_frame['foo'] = 'bar' class SharedItems(object): def setUp(self): self.dirpath = tm.get_data_path() self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') self.xlsx1 = os.path.join(self.dirpath, 'test.xlsx') self.frame = _frame.copy() self.frame2 = _frame2.copy() self.tsframe = _tsframe.copy() self.mixed_frame = _mixed_frame.copy() def read_csv(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = 'python' return read_csv(*args, **kwds) class ExcelReaderTests(SharedItems, tm.TestCase): def test_parse_cols_int(self): _skip_if_no_openpyxl() _skip_if_no_xlrd() suffix = ['xls', 'xlsx', 'xlsm'] for s in suffix: pth = os.path.join(self.dirpath, 'test.%s' % s) xls = ExcelFile(pth) df = xls.parse('Sheet1', index_col=0, parse_dates=True, parse_cols=3) df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = df2.reindex(columns=['A', 'B', 'C']) df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True, parse_cols=3) # TODO add index to xls file) tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) def test_parse_cols_list(self): _skip_if_no_openpyxl() _skip_if_no_xlrd() suffix = ['xls', 'xlsx', 'xlsm'] for s in suffix: pth = os.path.join(self.dirpath, 'test.%s' % s) xls = ExcelFile(pth) df = xls.parse('Sheet1', index_col=0, parse_dates=True, parse_cols=[0, 2, 3]) df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) df2 = df2.reindex(columns=['B', 'C']) df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True, parse_cols=[0, 2, 3]) # TODO add index to xls file) tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) def test_parse_cols_str(self): _skip_if_no_openpyxl() _skip_if_no_xlrd() suffix = ['xls', 'xlsx', 'xlsm'] for s in suffix: pth = os.path.join(self.dirpath, 'test.%s' % s) xls = ExcelFile(pth) df = xls.parse('Sheet1', index_col=0, parse_dates=True, parse_cols='A:D') df2 = read_csv(self.csv1, index_col=0, parse_dates=True) df2 = df2.reindex(columns=['A', 'B', 'C']) df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True, parse_cols='A:D') # TODO add index to xls, read xls ignores index name ? tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) del df, df2, df3 df = xls.parse('Sheet1', index_col=0, parse_dates=True, parse_cols='A,C,D') df2 = read_csv(self.csv1, index_col=0, parse_dates=True) df2 = df2.reindex(columns=['B', 'C']) df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True, parse_cols='A,C,D') # TODO add index to xls file tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) del df, df2, df3 df = xls.parse('Sheet1', index_col=0, parse_dates=True, parse_cols='A,C:D') df2 = read_csv(self.csv1, index_col=0, parse_dates=True) df2 = df2.reindex(columns=['B', 'C']) df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True, parse_cols='A,C:D') tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) def test_excel_stop_iterator(self): _skip_if_no_xlrd() excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xls')) parsed = excel_data.parse('Sheet1') expected = DataFrame([['aaaa', 'bbbbb']], columns=['Test', 'Test1']) tm.assert_frame_equal(parsed, expected) def test_excel_cell_error_na(self): _skip_if_no_xlrd() excel_data = ExcelFile(os.path.join(self.dirpath, 'test3.xls')) parsed = excel_data.parse('Sheet1') expected = DataFrame([[np.nan]], columns=['Test']) tm.assert_frame_equal(parsed, expected) def test_excel_passes_na(self): _skip_if_no_xlrd() excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xlsx')) parsed = excel_data.parse('Sheet1', keep_default_na=False, na_values=['apple']) expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], columns=['Test']) tm.assert_frame_equal(parsed, expected) parsed = excel_data.parse('Sheet1', keep_default_na=True, na_values=['apple']) expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], columns=['Test']) tm.assert_frame_equal(parsed, expected) def check_excel_table_sheet_by_index(self, filename, csvfile): import xlrd pth = os.path.join(self.dirpath, filename) xls = ExcelFile(pth) df = xls.parse(0, index_col=0, parse_dates=True) df2 = self.read_csv(csvfile, index_col=0, parse_dates=True) df3 = xls.parse(1, skiprows=[1], index_col=0, parse_dates=True) tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) df4 = xls.parse(0, index_col=0, parse_dates=True, skipfooter=1) df5 = xls.parse(0, index_col=0, parse_dates=True, skip_footer=1) tm.assert_frame_equal(df4, df.ix[:-1]) tm.assert_frame_equal(df4, df5) self.assertRaises(xlrd.XLRDError, xls.parse, 'asdf') def test_excel_table_sheet_by_index(self): _skip_if_no_xlrd() for filename, csvfile in [(self.xls1, self.csv1), (self.xlsx1, self.csv1)]: self.check_excel_table_sheet_by_index(filename, csvfile) def test_excel_table(self): _skip_if_no_xlrd() pth = os.path.join(self.dirpath, 'test.xls') xls = ExcelFile(pth) df = xls.parse('Sheet1', index_col=0, parse_dates=True) df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) df3 = xls.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True) tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) df4 = xls.parse('Sheet1', index_col=0, parse_dates=True, skipfooter=1) df5 = xls.parse('Sheet1', index_col=0, parse_dates=True, skip_footer=1) tm.assert_frame_equal(df4, df.ix[:-1]) tm.assert_frame_equal(df4, df5) def test_excel_read_buffer(self): _skip_if_no_xlrd() _skip_if_no_openpyxl() pth = os.path.join(self.dirpath, 'test.xls') f = open(pth, 'rb') xls = ExcelFile(f) # it works xls.parse('Sheet1', index_col=0, parse_dates=True) pth = os.path.join(self.dirpath, 'test.xlsx') f = open(pth, 'rb') xl = ExcelFile(f) xl.parse('Sheet1', index_col=0, parse_dates=True) def test_read_xlrd_Book(self): _skip_if_no_xlrd() _skip_if_no_xlwt() import xlrd df = self.frame with ensure_clean('.xls') as pth: df.to_excel(pth, "SheetA") book = xlrd.open_workbook(pth) with ExcelFile(book, engine="xlrd") as xl: result = xl.parse("SheetA") tm.assert_frame_equal(df, result) result = read_excel(book, sheetname="SheetA", engine="xlrd") tm.assert_frame_equal(df, result) def test_xlsx_table(self): _skip_if_no_xlrd() _skip_if_no_openpyxl() pth = os.path.join(self.dirpath, 'test.xlsx') xlsx = ExcelFile(pth) df = xlsx.parse('Sheet1', index_col=0, parse_dates=True) df2 = self.read_csv(self.csv1, index_col=0, parse_dates=True) df3 = xlsx.parse('Sheet2', skiprows=[1], index_col=0, parse_dates=True) # TODO add index to xlsx file tm.assert_frame_equal(df, df2, check_names=False) tm.assert_frame_equal(df3, df2, check_names=False) df4 = xlsx.parse('Sheet1', index_col=0, parse_dates=True, skipfooter=1) df5 = xlsx.parse('Sheet1', index_col=0, parse_dates=True, skip_footer=1) tm.assert_frame_equal(df4, df.ix[:-1]) tm.assert_frame_equal(df4, df5) def test_reader_closes_file(self): _skip_if_no_xlrd() _skip_if_no_openpyxl() pth = os.path.join(self.dirpath, 'test.xlsx') f = open(pth, 'rb') with ExcelFile(f) as xlsx: # parses okay xlsx.parse('Sheet1', index_col=0) self.assertTrue(f.closed) def test_reader_special_dtypes(self): _skip_if_no_xlrd() expected = DataFrame.from_items([ ("IntCol", [1, 2, -3, 4, 0]), ("FloatCol", [1.25, 2.25, 1.83, 1.92, 0.0000000005]), ("BoolCol", [True, False, True, True, False]), ("StrCol", [1, 2, 3, 4, 5]), # GH5394 - this is why convert_float isn't vectorized ("Str2Col", ["a", 3, "c", "d", "e"]), ("DateCol", [datetime(2013, 10, 30), datetime(2013, 10, 31), datetime(1905, 1, 1), datetime(2013, 12, 14), datetime(2015, 3, 14)]) ]) xlsx_path = os.path.join(self.dirpath, 'test_types.xlsx') xls_path = os.path.join(self.dirpath, 'test_types.xls') # should read in correctly and infer types for path in (xls_path, xlsx_path): actual = read_excel(path, 'Sheet1') tm.assert_frame_equal(actual, expected) # if not coercing number, then int comes in as float float_expected = expected.copy() float_expected["IntCol"] = float_expected["IntCol"].astype(float) float_expected.loc[1, "Str2Col"] = 3.0 for path in (xls_path, xlsx_path): actual = read_excel(path, 'Sheet1', convert_float=False) tm.assert_frame_equal(actual, float_expected) # check setting Index (assuming xls and xlsx are the same here) for icol, name in enumerate(expected.columns): actual = read_excel(xlsx_path, 'Sheet1', index_col=icol) actual2 = read_excel(xlsx_path, 'Sheet1', index_col=name) exp = expected.set_index(name) tm.assert_frame_equal(actual, exp) tm.assert_frame_equal(actual2, exp) # convert_float and converters should be different but both accepted expected["StrCol"] = expected["StrCol"].apply(str) actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str}) tm.assert_frame_equal(actual, expected) no_convert_float = float_expected.copy() no_convert_float["StrCol"] = no_convert_float["StrCol"].apply(str) actual = read_excel(xlsx_path, 'Sheet1', converters={"StrCol": str}, convert_float=False) tm.assert_frame_equal(actual, no_convert_float) class ExcelWriterBase(SharedItems): # Base class for test cases to run with different Excel writers. # To add a writer test, define the following: # 1. A check_skip function that skips your tests if your writer isn't # installed. # 2. Add a property ext, which is the file extension that your writer # writes to. (needs to start with '.' so it's a valid path) # 3. Add a property engine_name, which is the name of the writer class. # Test with MultiIndex and Hierarchical Rows as merged cells. merge_cells = True def setUp(self): self.check_skip() super(ExcelWriterBase, self).setUp() self.option_name = 'io.excel.%s.writer' % self.ext.strip('.') self.prev_engine = get_option(self.option_name) set_option(self.option_name, self.engine_name) def tearDown(self): set_option(self.option_name, self.prev_engine) def test_excel_sheet_by_name_raise(self): _skip_if_no_xlrd() import xlrd with ensure_clean(self.ext) as pth: gt = DataFrame(np.random.randn(10, 2)) gt.to_excel(pth) xl = ExcelFile(pth) df = xl.parse(0) tm.assert_frame_equal(gt, df) self.assertRaises(xlrd.XLRDError, xl.parse, '0') def test_excelwriter_contextmanager(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as pth: with ExcelWriter(pth) as writer: self.frame.to_excel(writer, 'Data1') self.frame2.to_excel(writer, 'Data2') with ExcelFile(pth) as reader: found_df = reader.parse('Data1') found_df2 = reader.parse('Data2') tm.assert_frame_equal(found_df, self.frame) tm.assert_frame_equal(found_df2, self.frame2) def test_roundtrip(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', cols=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # test roundtrip self.frame.to_excel(path, 'test1') recons = read_excel(path, 'test1', index_col=0) tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, 'test1', index=False) recons = read_excel(path, 'test1', index_col=None) recons.index = self.frame.index tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, 'test1', na_rep='NA') recons = read_excel(path, 'test1', index_col=0, na_values=['NA']) tm.assert_frame_equal(self.frame, recons) # GH 3611 self.frame.to_excel(path, 'test1', na_rep='88') recons = read_excel(path, 'test1', index_col=0, na_values=['88']) tm.assert_frame_equal(self.frame, recons) self.frame.to_excel(path, 'test1', na_rep='88') recons = read_excel(path, 'test1', index_col=0, na_values=[88, 88.0]) tm.assert_frame_equal(self.frame, recons) def test_mixed(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.mixed_frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = reader.parse('test1', index_col=0) tm.assert_frame_equal(self.mixed_frame, recons) def test_tsframe(self): _skip_if_no_xlrd() df = tm.makeTimeDataFrame()[:5] with ensure_clean(self.ext) as path: df.to_excel(path, 'test1') reader = ExcelFile(path) recons = reader.parse('test1') tm.assert_frame_equal(df, recons) def test_basics_with_nan(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', cols=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) def test_int_types(self): _skip_if_no_xlrd() for np_type in (np.int8, np.int16, np.int32, np.int64): with ensure_clean(self.ext) as path: # Test np.int values read come back as int (rather than float # which is Excel's format). frame = DataFrame(np.random.randint(-10, 10, size=(10, 2)), dtype=np_type) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = reader.parse('test1') int_frame = frame.astype(np.int64) tm.assert_frame_equal(int_frame, recons) recons2 = read_excel(path, 'test1') tm.assert_frame_equal(int_frame, recons2) # test with convert_float=False comes back as float float_frame = frame.astype(float) recons = read_excel(path, 'test1', convert_float=False) tm.assert_frame_equal(recons, float_frame) def test_float_types(self): _skip_if_no_xlrd() for np_type in (np.float16, np.float32, np.float64): with ensure_clean(self.ext) as path: # Test np.float values read come back as float. frame = DataFrame(np.random.random_sample(10), dtype=np_type) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = reader.parse('test1').astype(np_type) tm.assert_frame_equal(frame, recons, check_dtype=False) def test_bool_types(self): _skip_if_no_xlrd() for np_type in (np.bool8, np.bool_): with ensure_clean(self.ext) as path: # Test np.bool values read come back as float. frame = (DataFrame([1, 0, True, False], dtype=np_type)) frame.to_excel(path, 'test1') reader = ExcelFile(path) recons = reader.parse('test1').astype(np_type) tm.assert_frame_equal(frame, recons) def test_sheets(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', cols=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # Test writing to separate sheets writer = ExcelWriter(path) self.frame.to_excel(writer, 'test1') self.tsframe.to_excel(writer, 'test2') writer.save() reader = ExcelFile(path) recons = reader.parse('test1', index_col=0) tm.assert_frame_equal(self.frame, recons) recons = reader.parse('test2', index_col=0) tm.assert_frame_equal(self.tsframe, recons) np.testing.assert_equal(2, len(reader.sheet_names)) np.testing.assert_equal('test1', reader.sheet_names[0]) np.testing.assert_equal('test2', reader.sheet_names[1]) def test_colaliases(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', cols=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # column aliases col_aliases = Index(['AA', 'X', 'Y', 'Z']) self.frame2.to_excel(path, 'test1', header=col_aliases) reader = ExcelFile(path) rs = reader.parse('test1', index_col=0) xp = self.frame2.copy() xp.columns = col_aliases tm.assert_frame_equal(xp, rs) def test_roundtrip_indexlabels(self): _skip_if_no_xlrd() with ensure_clean(self.ext) as path: self.frame['A'][:5] = nan self.frame.to_excel(path, 'test1') self.frame.to_excel(path, 'test1', cols=['A', 'B']) self.frame.to_excel(path, 'test1', header=False) self.frame.to_excel(path, 'test1', index=False) # test index_label frame = (DataFrame(np.random.randn(10, 2)) >= 0) frame.to_excel(path, 'test1', index_label=['test'], merge_cells=self.merge_cells) reader = ExcelFile(path) recons = reader.parse('test1', index_col=0, has_index_names=self.merge_cells ).astype(np.int64) frame.index.names = ['test'] self.assertEqual(frame.index.names, recons.index.names) frame = (DataFrame(np.random.randn(10, 2)) >= 0) frame.to_excel(path, 'test1', index_label=['test', 'dummy', 'dummy2'], merge_cells=self.merge_cells) reader = ExcelFile(path) recons = reader.parse('test1', index_col=0, has_index_names=self.merge_cells ).astype(np.int64) frame.index.names = ['test'] self.assertEqual(frame.index.names, recons.index.names) frame = (DataFrame(np.random.randn(10, 2)) >= 0) frame.to_excel(path, 'test1', index_label='test', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = reader.parse('test1', index_col=0, has_index_names=self.merge_cells ).astype(np.int64) frame.index.names = ['test'] tm.assert_frame_equal(frame, recons.astype(bool)) with ensure_clean(self.ext) as path: self.frame.to_excel(path, 'test1', cols=['A', 'B', 'C', 'D'], index=False, merge_cells=self.merge_cells) # take 'A' and 'B' as indexes (same row as cols 'C', 'D') df = self.frame.copy() df = df.set_index(['A', 'B']) reader = ExcelFile(path) recons = reader.parse('test1', index_col=[0, 1]) tm.assert_frame_equal(df, recons, check_less_precise=True) def test_excel_roundtrip_indexname(self): _skip_if_no_xlrd() df = DataFrame(np.random.randn(10, 4)) df.index.name = 'foo' with ensure_clean(self.ext) as path: df.to_excel(path, merge_cells=self.merge_cells) xf = ExcelFile(path) result = xf.parse(xf.sheet_names[0], index_col=0, has_index_names=self.merge_cells) tm.assert_frame_equal(result, df) self.assertEqual(result.index.name, 'foo') def test_excel_roundtrip_datetime(self): _skip_if_no_xlrd() # datetime.date, not sure what to test here exactly tsf = self.tsframe.copy() with ensure_clean(self.ext) as path: tsf.index = [x.date() for x in self.tsframe.index] tsf.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = reader.parse('test1') tm.assert_frame_equal(self.tsframe, recons) # GH4133 - excel output format strings def test_excel_date_datetime_format(self): _skip_if_no_xlrd() df = DataFrame([[date(2014, 1, 31), date(1999, 9, 24)], [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)]], index=['DATE', 'DATETIME'], columns=['X', 'Y']) df_expected = DataFrame([[datetime(2014, 1, 31), datetime(1999, 9, 24)], [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)]], index=['DATE', 'DATETIME'], columns=['X', 'Y']) with ensure_clean(self.ext) as filename1: with ensure_clean(self.ext) as filename2: writer1 = ExcelWriter(filename1) writer2 = ExcelWriter(filename2, date_format='DD.MM.YYYY', datetime_format='DD.MM.YYYY HH-MM-SS') df.to_excel(writer1, 'test1') df.to_excel(writer2, 'test1') writer1.close() writer2.close() reader1 = ExcelFile(filename1) reader2 = ExcelFile(filename2) rs1 = reader1.parse('test1', index_col=None) rs2 = reader2.parse('test1', index_col=None) tm.assert_frame_equal(rs1, rs2) # since the reader returns a datetime object for dates, we need # to use df_expected to check the result tm.assert_frame_equal(rs2, df_expected) def test_to_excel_periodindex(self): _skip_if_no_xlrd() frame = self.tsframe xp = frame.resample('M', kind='period') with ensure_clean(self.ext) as path: xp.to_excel(path, 'sht1') reader = ExcelFile(path) rs = reader.parse('sht1', index_col=0, parse_dates=True) tm.assert_frame_equal(xp, rs.to_period('M')) def test_to_excel_multiindex(self): _skip_if_no_xlrd() frame = self.frame arrays = np.arange(len(frame.index) * 2).reshape(2, -1) new_index = MultiIndex.from_arrays(arrays, names=['first', 'second']) frame.index = new_index with ensure_clean(self.ext) as path: frame.to_excel(path, 'test1', header=False) frame.to_excel(path, 'test1', cols=['A', 'B']) # round trip frame.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) df = reader.parse('test1', index_col=[0, 1], parse_dates=False, has_index_names=self.merge_cells) tm.assert_frame_equal(frame, df) self.assertEqual(frame.index.names, df.index.names) def test_to_excel_multiindex_dates(self): _skip_if_no_xlrd() # try multiindex with dates tsframe = self.tsframe.copy() new_index = [tsframe.index, np.arange(len(tsframe.index))] tsframe.index = MultiIndex.from_arrays(new_index) with ensure_clean(self.ext) as path: tsframe.index.names = ['time', 'foo'] tsframe.to_excel(path, 'test1', merge_cells=self.merge_cells) reader = ExcelFile(path) recons = reader.parse('test1', index_col=[0, 1], has_index_names=self.merge_cells) tm.assert_frame_equal(tsframe, recons) self.assertEquals(recons.index.names, ('time', 'foo')) def test_to_excel_multiindex_no_write_index(self): _skip_if_no_xlrd() # Test writing and re-reading a MI witout the index. GH 5616. # Initial non-MI frame. frame1 = pd.DataFrame({'a': [10, 20], 'b': [30, 40], 'c': [50, 60]}) # Add a MI. frame2 = frame1.copy() multi_index = pd.MultiIndex.from_tuples([(70, 80), (90, 100)]) frame2.index = multi_index with ensure_clean(self.ext) as path: # Write out to Excel without the index. frame2.to_excel(path, 'test1', index=False) # Read it back in. reader = ExcelFile(path) frame3 = reader.parse('test1') # Test that it is the same as the initial frame. tm.assert_frame_equal(frame1, frame3) def test_to_excel_float_format(self): _skip_if_no_xlrd() df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) with ensure_clean(self.ext) as filename: df.to_excel(filename, 'test1', float_format='%.2f') reader = ExcelFile(filename) rs = reader.parse('test1', index_col=None) xp = DataFrame([[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], index=['A', 'B'], columns=['X', 'Y', 'Z']) tm.assert_frame_equal(rs, xp) def test_to_excel_unicode_filename(self): _skip_if_no_xlrd() with ensure_clean(u('\u0192u.') + self.ext) as filename: try: f = open(filename, 'wb') except UnicodeEncodeError: raise nose.SkipTest('no unicode file names on this system') else: f.close() df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) df.to_excel(filename, 'test1', float_format='%.2f') reader = ExcelFile(filename) rs = reader.parse('test1', index_col=None) xp = DataFrame([[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], index=['A', 'B'], columns=['X', 'Y', 'Z']) tm.assert_frame_equal(rs, xp) # def test_to_excel_header_styling_xls(self): # import StringIO # s = StringIO( # """Date,ticker,type,value # 2001-01-01,x,close,12.2 # 2001-01-01,x,open ,12.1 # 2001-01-01,y,close,12.2 # 2001-01-01,y,open ,12.1 # 2001-02-01,x,close,12.2 # 2001-02-01,x,open ,12.1 # 2001-02-01,y,close,12.2 # 2001-02-01,y,open ,12.1 # 2001-03-01,x,close,12.2 # 2001-03-01,x,open ,12.1 # 2001-03-01,y,close,12.2 # 2001-03-01,y,open ,12.1""") # df = read_csv(s, parse_dates=["Date"]) # pdf = df.pivot_table(values="value", rows=["ticker"], # cols=["Date", "type"]) # try: # import xlwt # import xlrd # except ImportError: # raise nose.SkipTest # filename = '__tmp_to_excel_header_styling_xls__.xls' # pdf.to_excel(filename, 'test1') # wbk = xlrd.open_workbook(filename, # formatting_info=True) # self.assertEquals(["test1"], wbk.sheet_names()) # ws = wbk.sheet_by_name('test1') # self.assertEquals([(0, 1, 5, 7), (0, 1, 3, 5), (0, 1, 1, 3)], # ws.merged_cells) # for i in range(0, 2): # for j in range(0, 7): # xfx = ws.cell_xf_index(0, 0) # cell_xf = wbk.xf_list[xfx] # font = wbk.font_list # self.assertEquals(1, font[cell_xf.font_index].bold) # self.assertEquals(1, cell_xf.border.top_line_style) # self.assertEquals(1, cell_xf.border.right_line_style) # self.assertEquals(1, cell_xf.border.bottom_line_style) # self.assertEquals(1, cell_xf.border.left_line_style) # self.assertEquals(2, cell_xf.alignment.hor_align) # os.remove(filename) # def test_to_excel_header_styling_xlsx(self): # import StringIO # s = StringIO( # """Date,ticker,type,value # 2001-01-01,x,close,12.2 # 2001-01-01,x,open ,12.1 # 2001-01-01,y,close,12.2 # 2001-01-01,y,open ,12.1 # 2001-02-01,x,close,12.2 # 2001-02-01,x,open ,12.1 # 2001-02-01,y,close,12.2 # 2001-02-01,y,open ,12.1 # 2001-03-01,x,close,12.2 # 2001-03-01,x,open ,12.1 # 2001-03-01,y,close,12.2 # 2001-03-01,y,open ,12.1""") # df = read_csv(s, parse_dates=["Date"]) # pdf = df.pivot_table(values="value", rows=["ticker"], # cols=["Date", "type"]) # try: # import openpyxl # from openpyxl.cell import get_column_letter # except ImportError: # raise nose.SkipTest # if openpyxl.__version__ < '1.6.1': # raise nose.SkipTest # # test xlsx_styling # filename = '__tmp_to_excel_header_styling_xlsx__.xlsx' # pdf.to_excel(filename, 'test1') # wbk = openpyxl.load_workbook(filename) # self.assertEquals(["test1"], wbk.get_sheet_names()) # ws = wbk.get_sheet_by_name('test1') # xlsaddrs = ["%s2" % chr(i) for i in range(ord('A'), ord('H'))] # xlsaddrs += ["A%s" % i for i in range(1, 6)] # xlsaddrs += ["B1", "D1", "F1"] # for xlsaddr in xlsaddrs: # cell = ws.cell(xlsaddr) # self.assertTrue(cell.style.font.bold) # self.assertEquals(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.top.border_style) # self.assertEquals(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.right.border_style) # self.assertEquals(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.bottom.border_style) # self.assertEquals(openpyxl.style.Border.BORDER_THIN, # cell.style.borders.left.border_style) # self.assertEquals(openpyxl.style.Alignment.HORIZONTAL_CENTER, # cell.style.alignment.horizontal) # mergedcells_addrs = ["C1", "E1", "G1"] # for maddr in mergedcells_addrs: # self.assertTrue(ws.cell(maddr).merged) # os.remove(filename) def test_excel_010_hemstring(self): _skip_if_no_xlrd() if self.merge_cells: raise nose.SkipTest('Skip tests for merged MI format.') from pandas.util.testing import makeCustomDataframe as mkdf # ensure limited functionality in 0.10 # override of #2370 until sorted out in 0.11 def roundtrip(df, header=True, parser_hdr=0): with ensure_clean(self.ext) as path: df.to_excel(path, header=header, merge_cells=self.merge_cells) xf = pd.ExcelFile(path) res = xf.parse(xf.sheet_names[0], header=parser_hdr) return res nrows = 5 ncols = 3 for i in range(1, 4): # row multindex upto nlevel=3 for j in range(1, 4): # col "" df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) res = roundtrip(df) # shape self.assertEqual(res.shape, (nrows, ncols + i)) # no nans for r in range(len(res.index)): for c in range(len(res.columns)): self.assertTrue(res.ix[r, c] is not np.nan) for i in range(1, 4): # row multindex upto nlevel=3 for j in range(1, 4): # col "" df = mkdf(nrows, ncols, r_idx_nlevels=i, c_idx_nlevels=j) res = roundtrip(df, False) # shape self.assertEqual(res.shape, ( nrows - 1, ncols + i)) # first row taken as columns # no nans for r in range(len(res.index)): for c in range(len(res.columns)): self.assertTrue(res.ix[r, c] is not np.nan) res = roundtrip(DataFrame([0])) self.assertEqual(res.shape, (1, 1)) self.assertTrue(res.ix[0, 0] is not np.nan) res = roundtrip(DataFrame([0]), False, None) self.assertEqual(res.shape, (1, 2)) self.assertTrue(res.ix[0, 0] is not np.nan) def test_duplicated_columns(self): # Test for issue #5235. _skip_if_no_xlrd() with ensure_clean(self.ext) as path: write_frame = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) colnames = ['A', 'B', 'B'] write_frame.columns = colnames write_frame.to_excel(path, 'test1') read_frame = read_excel(path, 'test1') read_frame.columns = colnames tm.assert_frame_equal(write_frame, read_frame) def test_swapped_columns(self): # Test for issue #5427. _skip_if_no_xlrd() with ensure_clean(self.ext) as path: write_frame = DataFrame({'A': [1, 1, 1], 'B': [2, 2, 2]}) write_frame.to_excel(path, 'test1', cols=['B', 'A']) read_frame = read_excel(path, 'test1', header=0) tm.assert_series_equal(write_frame['A'], read_frame['A']) tm.assert_series_equal(write_frame['B'], read_frame['B']) class OpenpyxlTests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl' check_skip = staticmethod(_skip_if_no_openpyxl) def test_to_excel_styleconverter(self): _skip_if_no_openpyxl() import openpyxl hstyle = {"font": {"bold": True}, "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, "alignment": {"horizontal": "center", "vertical": "top"}} xlsx_style = _OpenpyxlWriter._convert_to_style(hstyle) self.assertTrue(xlsx_style.font.bold) self.assertEquals(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.top.border_style) self.assertEquals(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.right.border_style) self.assertEquals(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.bottom.border_style) self.assertEquals(openpyxl.style.Border.BORDER_THIN, xlsx_style.borders.left.border_style) self.assertEquals(openpyxl.style.Alignment.HORIZONTAL_CENTER, xlsx_style.alignment.horizontal) self.assertEquals(openpyxl.style.Alignment.VERTICAL_TOP, xlsx_style.alignment.vertical) class XlwtTests(ExcelWriterBase, tm.TestCase): ext = '.xls' engine_name = 'xlwt' check_skip = staticmethod(_skip_if_no_xlwt) def test_to_excel_styleconverter(self): _skip_if_no_xlwt() import xlwt hstyle = {"font": {"bold": True}, "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, "alignment": {"horizontal": "center", "vertical": "top"}} xls_style = _XlwtWriter._convert_to_style(hstyle) self.assertTrue(xls_style.font.bold) self.assertEquals(xlwt.Borders.THIN, xls_style.borders.top) self.assertEquals(xlwt.Borders.THIN, xls_style.borders.right) self.assertEquals(xlwt.Borders.THIN, xls_style.borders.bottom) self.assertEquals(xlwt.Borders.THIN, xls_style.borders.left) self.assertEquals(xlwt.Alignment.HORZ_CENTER, xls_style.alignment.horz) self.assertEquals(xlwt.Alignment.VERT_TOP, xls_style.alignment.vert) class XlsxWriterTests(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'xlsxwriter' check_skip = staticmethod(_skip_if_no_xlsxwriter) class OpenpyxlTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'openpyxl' check_skip = staticmethod(_skip_if_no_openpyxl) # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. merge_cells = False class XlwtTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xls' engine_name = 'xlwt' check_skip = staticmethod(_skip_if_no_xlwt) # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. merge_cells = False class XlsxWriterTests_NoMerge(ExcelWriterBase, tm.TestCase): ext = '.xlsx' engine_name = 'xlsxwriter' check_skip = staticmethod(_skip_if_no_xlsxwriter) # Test < 0.13 non-merge behaviour for MultiIndex and Hierarchical Rows. merge_cells = False class ExcelWriterEngineTests(tm.TestCase): def test_ExcelWriter_dispatch(self): with tm.assertRaisesRegexp(ValueError, 'No engine'): ExcelWriter('nothing') try: import xlsxwriter writer_klass = _XlsxWriter except ImportError: _skip_if_no_openpyxl() writer_klass = _OpenpyxlWriter with ensure_clean('.xlsx') as path: writer = ExcelWriter(path) tm.assert_isinstance(writer, writer_klass) _skip_if_no_xlwt() with ensure_clean('.xls') as path: writer = ExcelWriter(path) tm.assert_isinstance(writer, _XlwtWriter) def test_register_writer(self): # some awkward mocking to test out dispatch and such actually works called_save = [] called_write_cells = [] class DummyClass(ExcelWriter): called_save = False called_write_cells = False supported_extensions = ['test', 'xlsx', 'xls'] engine = 'dummy' def save(self): called_save.append(True) def write_cells(self, *args, **kwargs): called_write_cells.append(True) def check_called(func): func() self.assert_(len(called_save) >= 1) self.assert_(len(called_write_cells) >= 1) del called_save[:] del called_write_cells[:] register_writer(DummyClass) writer = ExcelWriter('something.test') tm.assert_isinstance(writer, DummyClass) df = tm.makeCustomDataframe(1, 1) panel = tm.makePanel() func = lambda: df.to_excel('something.test') check_called(func) check_called(lambda: panel.to_excel('something.test')) val = get_option('io.excel.xlsx.writer') set_option('io.excel.xlsx.writer', 'dummy') check_called(lambda: df.to_excel('something.xlsx')) check_called(lambda: df.to_excel('something.xls', engine='dummy')) set_option('io.excel.xlsx.writer', val) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
gpl-3.0
raghavrv/scikit-learn
examples/ensemble/plot_adaboost_regression.py
311
1529
""" ====================================== Decision Tree Regression with AdaBoost ====================================== A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D sinusoidal dataset with a small amount of Gaussian noise. 299 boosts (300 decision trees) is compared with a single decision tree regressor. As the number of boosts is increased the regressor can fit more detail. .. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause # importing necessary libraries import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor # Create the dataset rng = np.random.RandomState(1) X = np.linspace(0, 6, 100)[:, np.newaxis] y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0]) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=4) regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), n_estimators=300, random_state=rng) regr_1.fit(X, y) regr_2.fit(X, y) # Predict y_1 = regr_1.predict(X) y_2 = regr_2.predict(X) # Plot the results plt.figure() plt.scatter(X, y, c="k", label="training samples") plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2) plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Boosted Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
kdebrab/pandas
pandas/tests/reshape/merge/test_merge_index_as_string.py
3
7108
import numpy as np import pytest from pandas import DataFrame from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal @pytest.fixture def df1(): return DataFrame(dict( outer=[1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4], inner=[1, 2, 3, 1, 2, 3, 4, 1, 2, 1, 2], v1=np.linspace(0, 1, 11))) @pytest.fixture def df2(): return DataFrame(dict( outer=[1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3], inner=[1, 2, 2, 3, 3, 4, 2, 3, 1, 1, 2, 3], v2=np.linspace(10, 11, 12))) @pytest.fixture(params=[[], ['outer'], ['outer', 'inner']]) def left_df(request, df1): """ Construct left test DataFrame with specified levels (any of 'outer', 'inner', and 'v1')""" levels = request.param if levels: df1 = df1.set_index(levels) return df1 @pytest.fixture(params=[[], ['outer'], ['outer', 'inner']]) def right_df(request, df2): """ Construct right test DataFrame with specified levels (any of 'outer', 'inner', and 'v2')""" levels = request.param if levels: df2 = df2.set_index(levels) return df2 def compute_expected(df_left, df_right, on=None, left_on=None, right_on=None, how=None): """ Compute the expected merge result for the test case. This method computes the expected result of merging two DataFrames on a combination of their columns and index levels. It does so by explicitly dropping/resetting their named index levels, performing a merge on their columns, and then finally restoring the appropriate index in the result. Parameters ---------- df_left : DataFrame The left DataFrame (may have zero or more named index levels) df_right : DataFrame The right DataFrame (may have zero or more named index levels) on : list of str The on parameter to the merge operation left_on : list of str The left_on parameter to the merge operation right_on : list of str The right_on parameter to the merge operation how : str The how parameter to the merge operation Returns ------- DataFrame The expected merge result """ # Handle on param if specified if on is not None: left_on, right_on = on, on # Compute input named index levels left_levels = [n for n in df_left.index.names if n is not None] right_levels = [n for n in df_right.index.names if n is not None] # Compute output named index levels output_levels = [i for i in left_on if i in right_levels and i in left_levels] # Drop index levels that aren't involved in the merge drop_left = [n for n in left_levels if n not in left_on] if drop_left: df_left = df_left.reset_index(drop_left, drop=True) drop_right = [n for n in right_levels if n not in right_on] if drop_right: df_right = df_right.reset_index(drop_right, drop=True) # Convert remaining index levels to columns reset_left = [n for n in left_levels if n in left_on] if reset_left: df_left = df_left.reset_index(level=reset_left) reset_right = [n for n in right_levels if n in right_on] if reset_right: df_right = df_right.reset_index(level=reset_right) # Perform merge expected = df_left.merge(df_right, left_on=left_on, right_on=right_on, how=how) # Restore index levels if output_levels: expected = expected.set_index(output_levels) return expected @pytest.mark.parametrize('on,how', [(['outer'], 'inner'), (['inner'], 'left'), (['outer', 'inner'], 'right'), (['inner', 'outer'], 'outer')]) def test_merge_indexes_and_columns_on(left_df, right_df, on, how): # Construct expected result expected = compute_expected(left_df, right_df, on=on, how=how) # Perform merge result = left_df.merge(right_df, on=on, how=how) assert_frame_equal(result, expected, check_like=True) @pytest.mark.parametrize('left_on,right_on,how', [(['outer'], ['outer'], 'inner'), (['inner'], ['inner'], 'right'), (['outer', 'inner'], ['outer', 'inner'], 'left'), (['inner', 'outer'], ['inner', 'outer'], 'outer')]) def test_merge_indexes_and_columns_lefton_righton( left_df, right_df, left_on, right_on, how): # Construct expected result expected = compute_expected(left_df, right_df, left_on=left_on, right_on=right_on, how=how) # Perform merge result = left_df.merge(right_df, left_on=left_on, right_on=right_on, how=how) assert_frame_equal(result, expected, check_like=True) @pytest.mark.parametrize('left_index', ['inner', ['inner', 'outer']]) def test_join_indexes_and_columns_on(df1, df2, left_index, join_type): # Construct left_df left_df = df1.set_index(left_index) # Construct right_df right_df = df2.set_index(['outer', 'inner']) # Result expected = (left_df.reset_index() .join(right_df, on=['outer', 'inner'], how=join_type, lsuffix='_x', rsuffix='_y') .set_index(left_index)) # Perform join result = left_df.join(right_df, on=['outer', 'inner'], how=join_type, lsuffix='_x', rsuffix='_y') assert_frame_equal(result, expected, check_like=True) def test_merge_index_column_precedence(df1, df2): # Construct left_df with both an index and a column named 'outer'. # We make this 'outer' column equal to the 'inner' column so that we # can verify that the correct values are used by the merge operation left_df = df1.set_index('outer') left_df['outer'] = left_df['inner'] # Construct right_df with an index level named 'outer' right_df = df2.set_index('outer') # Construct expected result. # The 'outer' column from left_df is chosen and the resulting # frame has no index levels expected = (left_df.reset_index(level='outer', drop=True) .merge(right_df.reset_index(), on=['outer', 'inner'])) # Merge left_df and right_df on 'outer' and 'inner' # 'outer' for left_df should refer to the 'outer' column, not the # 'outer' index level and a FutureWarning should be raised with tm.assert_produces_warning(FutureWarning): result = left_df.merge(right_df, on=['outer', 'inner']) # Check results assert_frame_equal(result, expected) # Perform the same using the left_on and right_on parameters with tm.assert_produces_warning(FutureWarning): result = left_df.merge(right_df, left_on=['outer', 'inner'], right_on=['outer', 'inner']) assert_frame_equal(result, expected)
bsd-3-clause
zorroblue/scikit-learn
examples/ensemble/plot_random_forest_regression_multioutput.py
29
2685
""" ============================================================ Comparing random forests and the multi-output meta estimator ============================================================ An example to compare multi-output regression with random forest and the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator. This example illustrates the use of the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator to perform multi-output regression. A random forest regressor is used, which supports multi-output regression natively, so the results can be compared. The random forest regressor will only ever predict values within the range of observations or closer to zero for each of the targets. As a result the predictions are biased towards the centre of the circle. Using a single underlying feature the model learns both the x and y coordinate as output. """ print(__doc__) # Author: Tim Head <betatim@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.multioutput import MultiOutputRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(600, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y += (0.5 - rng.rand(*y.shape)) X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=400, random_state=4) max_depth = 30 regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth, random_state=0)) regr_multirf.fit(X_train, y_train) regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2) regr_rf.fit(X_train, y_train) # Predict on new data y_multirf = regr_multirf.predict(X_test) y_rf = regr_rf.predict(X_test) # Plot the results plt.figure() s = 50 a = 0.4 plt.scatter(y_test[:, 0], y_test[:, 1], edgecolor='k', c="navy", s=s, marker="s", alpha=a, label="Data") plt.scatter(y_multirf[:, 0], y_multirf[:, 1], edgecolor='k', c="cornflowerblue", s=s, alpha=a, label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test)) plt.scatter(y_rf[:, 0], y_rf[:, 1], edgecolor='k', c="c", s=s, marker="^", alpha=a, label="RF score=%.2f" % regr_rf.score(X_test, y_test)) plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("target 1") plt.ylabel("target 2") plt.title("Comparing random forests and the multi-output meta estimator") plt.legend() plt.show()
bsd-3-clause
INM-6/elephant
doc/conf.py
2
11596
# -*- coding: utf-8 -*- # # Elephant documentation build configuration file, created by # sphinx-quickstart on Wed Feb 5 17:11:26 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys from datetime import date # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, '..') # -- General configuration ----------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax', 'sphinxcontrib.bibtex', 'matplotlib.sphinxext.plot_directive', 'numpydoc', 'nbsphinx', 'sphinx_tabs.tabs', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Elephant' authors = u'Elephant authors and contributors' copyright = u"2014-{this_year}, {authors}".format(this_year=date.today().year, authors=authors) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # root_dir = os.path.dirname(os.path.dirname(__file__)) with open(os.path.join(root_dir, 'elephant', 'VERSION')) as version_file: # The full version, including alpha/beta/rc tags. release = version_file.read().strip() # The short X.Y version. version = '.'.join(release.split('.')[:-1]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ '_build', '**.ipynb_checkpoints', 'maintainers_guide.rst', # should not be visible for users ] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # Only execute Jupyter notebooks that have no evaluated cells nbsphinx_execute = 'auto' # Kernel to use for execution nbsphinx_kernel_name = 'python3' # Cancel compile on errors in notebooks nbsphinx_allow_errors = False # Required to automatically create a summary page for each function listed in # the autosummary fields of each module. autosummary_generate = True # Set to False to not overwrite the custom _toctree/*.rst autosummary_generate_overwrite = True # -- Options for HTML output --------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' html_theme_options = { 'font_family': 'Arial', 'page_width': '1200px', # default is 940 'sidebar_width': '280px', # default is 220 } # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'images/elephant_logo_sidebar.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'images/elephant_favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'elephantdoc' # Suppresses wrong numpy doc warnings # see here https://github.com/phn/pytpm/issues/3#issuecomment-12133978 numpydoc_show_class_members = False # A fix for Alabaster theme for no space between a citation reference # and citation text # https://github.com/sphinx-doc/sphinx/issues/6705#issuecomment-536197438 html4_writer = True # -- Options for LaTeX output -------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'elephant.tex', u'Elephant Documentation', authors, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'elephant', u'Elephant Documentation', [authors], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Elephant', u'Elephant Documentation', authors, 'Elephant', 'Elephant is a package for the analysis of neurophysiology data.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = authors epub_publisher = authors epub_copyright = copyright # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # configuration for intersphinx: refer to Viziphant intersphinx_mapping = { 'viziphant': ('https://viziphant.readthedocs.io/en/latest/', None), 'numpy': ('https://numpy.org/doc/stable', None) } # Use more reliable mathjax source mathjax_path = 'https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML' # Remove the copyright notice from docstrings: def process_docstring_remove_copyright(app, what, name, obj, options, lines): copyright_line = None for i, line in enumerate(lines): if line.startswith(':copyright:'): copyright_line = i break if copyright_line: while len(lines) > copyright_line: lines.pop() def setup(app): app.connect('autodoc-process-docstring', process_docstring_remove_copyright)
bsd-3-clause
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/scipy/signal/ltisys.py
13
123258
""" ltisys -- a collection of classes and functions for modeling linear time invariant systems. """ from __future__ import division, print_function, absolute_import # # Author: Travis Oliphant 2001 # # Feb 2010: Warren Weckesser # Rewrote lsim2 and added impulse2. # Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com> # Added dlsim, dstep, dimpulse, cont2discrete # Aug 2013: Juan Luis Cano # Rewrote abcd_normalize. # Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr # Added pole placement # Mar 2015: Clancy Rowley # Rewrote lsim # May 2015: Felix Berkenkamp # Split lti class into subclasses # Merged discrete systems and added dlti import warnings # np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7 # use scipy's qr until this is solved from scipy.linalg import qr as s_qr from scipy import integrate, interpolate, linalg from scipy.interpolate import interp1d from scipy._lib.six import xrange from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk, freqz_zpk) from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk, cont2discrete) import numpy import numpy as np from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros, dot, transpose, ones, zeros_like, linspace, nan_to_num) import copy __all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode', 'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse', 'dfreqresp', 'dbode'] class LinearTimeInvariant(object): def __new__(cls, *system, **kwargs): """Create a new object, don't allow direct instances.""" if cls is LinearTimeInvariant: raise NotImplementedError('The LinearTimeInvariant class is not ' 'meant to be used directly, use `lti` ' 'or `dlti` instead.') return super(LinearTimeInvariant, cls).__new__(cls) def __init__(self): """ Initialize the `lti` baseclass. The heavy lifting is done by the subclasses. """ super(LinearTimeInvariant, self).__init__() self.inputs = None self.outputs = None self._dt = None @property def dt(self): """Return the sampling time of the system, `None` for `lti` systems.""" return self._dt @property def _dt_dict(self): if self.dt is None: return {} else: return {'dt': self.dt} @property def num(self): """Numerator of the `TransferFunction` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_tf().num`instead.', DeprecationWarning) return self.to_tf().num @num.setter def num(self, num): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_tf()` ' 'before setting `num`.', DeprecationWarning) obj = self.to_tf() obj.num = num source_class = type(self) self._copy(source_class(obj)) @property def den(self): """Denominator of the `TransferFunction` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_tf().den`instead.', DeprecationWarning) return self.to_tf().den @den.setter def den(self, den): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_tf()` ' 'before setting `den`.', DeprecationWarning) obj = self.to_tf() obj.den = den source_class = type(self) self._copy(source_class(obj)) @property def zeros(self): """Zeros of the system.""" return self.to_zpk().zeros @zeros.setter def zeros(self, zeros): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_zpk()` ' 'before setting `zeros`.', DeprecationWarning) obj = self.to_zpk() obj.zeros = zeros source_class = type(self) self._copy(source_class(obj)) @property def poles(self): """Poles of the system.""" return self.to_zpk().poles @poles.setter def poles(self, poles): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_zpk()` ' 'before setting `poles`.', DeprecationWarning) obj = self.to_zpk() obj.poles = poles source_class = type(self) self._copy(source_class(obj)) @property def gain(self): """Gain of the `ZerosPolesGain` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_zpk().gain`instead.', DeprecationWarning) return self.to_zpk().gain @gain.setter def gain(self, gain): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_zpk()` ' 'before setting `gain`.', DeprecationWarning) obj = self.to_zpk() obj.gain = gain source_class = type(self) self._copy(source_class(obj)) @property def A(self): """State matrix of the `StateSpace` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_ss().A`instead.', DeprecationWarning) return self.to_ss().A @A.setter def A(self, A): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_ss()` ' 'before setting `A`.', DeprecationWarning) obj = self.to_ss() obj.A = A source_class = type(self) self._copy(source_class(obj)) @property def B(self): """Input matrix of the `StateSpace` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_ss().B`instead.', DeprecationWarning) return self.to_ss().B @B.setter def B(self, B): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_ss()` ' 'before setting `B`.', DeprecationWarning) obj = self.to_ss() obj.B = B source_class = type(self) self._copy(source_class(obj)) @property def C(self): """Output matrix of the `StateSpace` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_ss().C`instead.', DeprecationWarning) return self.to_ss().C @C.setter def C(self, C): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_ss()` ' 'before setting `C`.', DeprecationWarning) obj = self.to_ss() obj.C = C source_class = type(self) self._copy(source_class(obj)) @property def D(self): """Feedthrough matrix of the `StateSpace` system.""" warnings.warn('Cross-class properties have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please use `sys.to_ss().D`instead.', DeprecationWarning) return self.to_ss().D @D.setter def D(self, D): warnings.warn('Cross-class setters have been deprecated in scipy ' '0.18.0 and will be removed in a future version of ' 'scipy. Please convert your system with `sys.to_ss()` ' 'before setting `D`.', DeprecationWarning) obj = self.to_ss() obj.D = D source_class = type(self) self._copy(source_class(obj)) def _as_ss(self): """Convert to `StateSpace` system, without copying. Returns ------- sys: StateSpace The `StateSpace` system. If the class is already an instance of `StateSpace` then this instance is returned. """ if isinstance(self, StateSpace): return self else: return self.to_ss() def _as_zpk(self): """Convert to `ZerosPolesGain` system, without copying. Returns ------- sys: ZerosPolesGain The `ZerosPolesGain` system. If the class is already an instance of `ZerosPolesGain` then this instance is returned. """ if isinstance(self, ZerosPolesGain): return self else: return self.to_zpk() def _as_tf(self): """Convert to `TransferFunction` system, without copying. Returns ------- sys: ZerosPolesGain The `TransferFunction` system. If the class is already an instance of `TransferFunction` then this instance is returned. """ if isinstance(self, TransferFunction): return self else: return self.to_tf() class lti(LinearTimeInvariant): """ Continuous-time linear time invariant system base class. Parameters ---------- *system : arguments The `lti` class can be instantiated with either 2, 3 or 4 arguments. The following gives the number of arguments and the corresponding continuous-time subclass that is created: * 2: `TransferFunction`: (numerator, denominator) * 3: `ZerosPolesGain`: (zeros, poles, gain) * 4: `StateSpace`: (A, B, C, D) Each argument can be an array or a sequence. See Also -------- ZerosPolesGain, StateSpace, TransferFunction, dlti Notes ----- `lti` instances do not exist directly. Instead, `lti` creates an instance of one of its subclasses: `StateSpace`, `TransferFunction` or `ZerosPolesGain`. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Changing the value of properties that are not directly part of the current system representation (such as the `zeros` of a `StateSpace` system) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> signal.lti(1, 2, 3, 4) StateSpaceContinuous( array([[1]]), array([[2]]), array([[3]]), array([[4]]), dt: None ) >>> signal.lti([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) >>> signal.lti([3, 4], [1, 2]) TransferFunctionContinuous( array([ 3., 4.]), array([ 1., 2.]), dt: None ) """ def __new__(cls, *system): """Create an instance of the appropriate subclass.""" if cls is lti: N = len(system) if N == 2: return TransferFunctionContinuous.__new__( TransferFunctionContinuous, *system) elif N == 3: return ZerosPolesGainContinuous.__new__( ZerosPolesGainContinuous, *system) elif N == 4: return StateSpaceContinuous.__new__(StateSpaceContinuous, *system) else: raise ValueError("`system` needs to be an instance of `lti` " "or have 2, 3 or 4 arguments.") # __new__ was called from a subclass, let it call its own functions return super(lti, cls).__new__(cls) def __init__(self, *system): """ Initialize the `lti` baseclass. The heavy lifting is done by the subclasses. """ super(lti, self).__init__(*system) def impulse(self, X0=None, T=None, N=None): """ Return the impulse response of a continuous-time system. See `impulse` for details. """ return impulse(self, X0=X0, T=T, N=N) def step(self, X0=None, T=None, N=None): """ Return the step response of a continuous-time system. See `step` for details. """ return step(self, X0=X0, T=T, N=N) def output(self, U, T, X0=None): """ Return the response of a continuous-time system to input `U`. See `lsim` for details. """ return lsim(self, U, T, X0=X0) def bode(self, w=None, n=100): """ Calculate Bode magnitude and phase data of a continuous-time system. Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude [dB] and phase [deg]. See `bode` for details. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sys = signal.TransferFunction([1], [1, 1]) >>> w, mag, phase = sys.bode() >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ return bode(self, w=w, n=n) def freqresp(self, w=None, n=10000): """ Calculate the frequency response of a continuous-time system. Returns a 2-tuple containing arrays of frequencies [rad/s] and complex magnitude. See `freqresp` for details. """ return freqresp(self, w=w, n=n) def to_discrete(self, dt, method='zoh', alpha=None): """Return a discretized version of the current system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` """ raise NotImplementedError('to_discrete is not implemented for this ' 'system class.') class dlti(LinearTimeInvariant): """ Discrete-time linear time invariant system base class. Parameters ---------- *system: arguments The `dlti` class can be instantiated with either 2, 3 or 4 arguments. The following gives the number of arguments and the corresponding discrete-time subclass that is created: * 2: `TransferFunction`: (numerator, denominator) * 3: `ZerosPolesGain`: (zeros, poles, gain) * 4: `StateSpace`: (A, B, C, D) Each argument can be an array or a sequence. dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to ``True`` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- ZerosPolesGain, StateSpace, TransferFunction, lti Notes ----- `dlti` instances do not exist directly. Instead, `dlti` creates an instance of one of its subclasses: `StateSpace`, `TransferFunction` or `ZerosPolesGain`. Changing the value of properties that are not directly part of the current system representation (such as the `zeros` of a `StateSpace` system) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.18.0 Examples -------- >>> from scipy import signal >>> signal.dlti(1, 2, 3, 4) StateSpaceDiscrete( array([[1]]), array([[2]]), array([[3]]), array([[4]]), dt: True ) >>> signal.dlti(1, 2, 3, 4, dt=0.1) StateSpaceDiscrete( array([[1]]), array([[2]]), array([[3]]), array([[4]]), dt: 0.1 ) >>> signal.dlti([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 ) >>> signal.dlti([3, 4], [1, 2], dt=0.1) TransferFunctionDiscrete( array([ 3., 4.]), array([ 1., 2.]), dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Create an instance of the appropriate subclass.""" if cls is dlti: N = len(system) if N == 2: return TransferFunctionDiscrete.__new__( TransferFunctionDiscrete, *system, **kwargs) elif N == 3: return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, *system, **kwargs) elif N == 4: return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs) else: raise ValueError("`system` needs to be an instance of `dlti` " "or have 2, 3 or 4 arguments.") # __new__ was called from a subclass, let it call its own functions return super(dlti, cls).__new__(cls) def __init__(self, *system, **kwargs): """ Initialize the `lti` baseclass. The heavy lifting is done by the subclasses. """ dt = kwargs.pop('dt', True) super(dlti, self).__init__(*system, **kwargs) self.dt = dt @property def dt(self): """Return the sampling time of the system.""" return self._dt @dt.setter def dt(self, dt): self._dt = dt def impulse(self, x0=None, t=None, n=None): """ Return the impulse response of the discrete-time `dlti` system. See `dimpulse` for details. """ return dimpulse(self, x0=x0, t=t, n=n) def step(self, x0=None, t=None, n=None): """ Return the step response of the discrete-time `dlti` system. See `dstep` for details. """ return dstep(self, x0=x0, t=t, n=n) def output(self, u, t, x0=None): """ Return the response of the discrete-time system to input `u`. See `dlsim` for details. """ return dlsim(self, u, t, x0=x0) def bode(self, w=None, n=100): """ Calculate Bode magnitude and phase data of a discrete-time system. Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude [dB] and phase [deg]. See `dbode` for details. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Transfer function: H(z) = 1 / (z^2 + 2z + 3) with sampling time 0.5s >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5) Equivalent: signal.dbode(sys) >>> w, mag, phase = sys.bode() >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ return dbode(self, w=w, n=n) def freqresp(self, w=None, n=10000, whole=False): """ Calculate the frequency response of a discrete-time system. Returns a 2-tuple containing arrays of frequencies [rad/s] and complex magnitude. See `dfreqresp` for details. """ return dfreqresp(self, w=w, n=n, whole=whole) class TransferFunction(LinearTimeInvariant): r"""Linear Time Invariant system class in transfer function form. Represents the system as the continuous-time transfer function :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the discrete-time transfer function :math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where :math:`b` are elements of the numerator `num`, :math:`a` are elements of the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. `TransferFunction` systems inherit additional functionality from the `lti`, respectively the `dlti` classes, depending on which system representation is used. Parameters ---------- *system: arguments The `TransferFunction` class can be instantiated with 1 or 2 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 2: array_like: (numerator, denominator) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `None` (continuous-time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- ZerosPolesGain, StateSpace, lti, dlti tf2ss, tf2zpk, tf2sos Notes ----- Changing the value of properties that are not part of the `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``) Examples -------- Construct the transfer function: .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den) TransferFunctionContinuous( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: None ) Contruct the transfer function with a sampling time of 0.1 seconds: .. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1} >>> signal.TransferFunction(num, den, dt=0.1) TransferFunctionDiscrete( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Handle object conversion if input is an instance of lti.""" if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_tf() # Choose whether to inherit from `lti` or from `dlti` if cls is TransferFunction: if kwargs.get('dt') is None: return TransferFunctionContinuous.__new__( TransferFunctionContinuous, *system, **kwargs) else: return TransferFunctionDiscrete.__new__( TransferFunctionDiscrete, *system, **kwargs) # No special conversion needed return super(TransferFunction, cls).__new__(cls) def __init__(self, *system, **kwargs): """Initialize the state space LTI system.""" # Conversion of lti instances is handled in __new__ if isinstance(system[0], LinearTimeInvariant): return # Remove system arguments, not needed by parents anymore super(TransferFunction, self).__init__(**kwargs) self._num = None self._den = None self.num, self.den = normalize(*system) def __repr__(self): """Return representation of the system's transfer function""" return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format( self.__class__.__name__, repr(self.num), repr(self.den), repr(self.dt), ) @property def num(self): """Numerator of the `TransferFunction` system.""" return self._num @num.setter def num(self, num): self._num = atleast_1d(num) # Update dimensions if len(self.num.shape) > 1: self.outputs, self.inputs = self.num.shape else: self.outputs = 1 self.inputs = 1 @property def den(self): """Denominator of the `TransferFunction` system.""" return self._den @den.setter def den(self, den): self._den = atleast_1d(den) def _copy(self, system): """ Copy the parameters of another `TransferFunction` object Parameters ---------- system : `TransferFunction` The `StateSpace` system that is to be copied """ self.num = system.num self.den = system.den def to_tf(self): """ Return a copy of the current `TransferFunction` system. Returns ------- sys : instance of `TransferFunction` The current system (copy) """ return copy.deepcopy(self) def to_zpk(self): """ Convert system representation to `ZerosPolesGain`. Returns ------- sys : instance of `ZerosPolesGain` Zeros, poles, gain representation of the current system """ return ZerosPolesGain(*tf2zpk(self.num, self.den), **self._dt_dict) def to_ss(self): """ Convert system representation to `StateSpace`. Returns ------- sys : instance of `StateSpace` State space model of the current system """ return StateSpace(*tf2ss(self.num, self.den), **self._dt_dict) @staticmethod def _z_to_zinv(num, den): """Change a transfer function from the variable `z` to `z**-1`. Parameters ---------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree of 'z'. That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. Returns ------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of ascending degree of 'z**-1'. That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. """ diff = len(num) - len(den) if diff > 0: den = np.hstack((np.zeros(diff), den)) elif diff < 0: num = np.hstack((np.zeros(-diff), num)) return num, den @staticmethod def _zinv_to_z(num, den): """Change a transfer function from the variable `z` to `z**-1`. Parameters ---------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of ascending degree of 'z**-1'. That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``. Returns ------- num, den: 1d array_like Sequences representing the coefficients of the numerator and denominator polynomials, in order of descending degree of 'z'. That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``. """ diff = len(num) - len(den) if diff > 0: den = np.hstack((den, np.zeros(diff))) elif diff < 0: num = np.hstack((num, np.zeros(-diff))) return num, den class TransferFunctionContinuous(TransferFunction, lti): r""" Continuous-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where :math:`b` are elements of the numerator `num`, :math:`a` are elements of the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. Continuous-time `TransferFunction` systems inherit additional functionality from the `lti` class. Parameters ---------- *system: arguments The `TransferFunction` class can be instantiated with 1 or 2 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 2: array_like: (numerator, denominator) See Also -------- ZerosPolesGain, StateSpace, lti tf2ss, tf2zpk, tf2sos Notes ----- Changing the value of properties that are not part of the `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``) Examples -------- Construct the transfer function: .. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1} >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den) TransferFunctionContinuous( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: None ) """ def to_discrete(self, dt, method='zoh', alpha=None): """ Returns the discretized `TransferFunction` system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` and `StateSpace` """ return TransferFunction(*cont2discrete((self.num, self.den), dt, method=method, alpha=alpha)[:-1], dt=dt) class TransferFunctionDiscrete(TransferFunction, dlti): r""" Discrete-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where :math:`b` are elements of the numerator `num`, :math:`a` are elements of the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``. Discrete-time `TransferFunction` systems inherit additional functionality from the `dlti` class. Parameters ---------- *system: arguments The `TransferFunction` class can be instantiated with 1 or 2 arguments. The following gives the number of input arguments and their interpretation: * 1: `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 2: array_like: (numerator, denominator) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `True` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- ZerosPolesGain, StateSpace, dlti tf2ss, tf2zpk, tf2sos Notes ----- Changing the value of properties that are not part of the `TransferFunction` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. If (numerator, denominator) is passed in for ``*system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). Examples -------- Construct the transfer function with a sampling time of 0.5 seconds: .. math:: H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1} >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den, 0.5) TransferFunctionDiscrete( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: 0.5 ) """ pass class ZerosPolesGain(LinearTimeInvariant): r""" Linear Time Invariant system class in zeros, poles, gain form. Represents the system as the continuous- or discrete-time transfer function :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. `ZerosPolesGain` systems inherit additional functionality from the `lti`, respectively the `dlti` classes, depending on which system representation is used. Parameters ---------- *system : arguments The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 3: array_like: (zeros, poles, gain) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `None` (continuous-time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, StateSpace, lti, dlti zpk2ss, zpk2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. Examples -------- >>> from scipy import signal Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4) >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4) >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Handle object conversion if input is an instance of `lti`""" if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_zpk() # Choose whether to inherit from `lti` or from `dlti` if cls is ZerosPolesGain: if kwargs.get('dt') is None: return ZerosPolesGainContinuous.__new__( ZerosPolesGainContinuous, *system, **kwargs) else: return ZerosPolesGainDiscrete.__new__( ZerosPolesGainDiscrete, *system, **kwargs ) # No special conversion needed return super(ZerosPolesGain, cls).__new__(cls) def __init__(self, *system, **kwargs): """Initialize the zeros, poles, gain system.""" # Conversion of lti instances is handled in __new__ if isinstance(system[0], LinearTimeInvariant): return super(ZerosPolesGain, self).__init__(**kwargs) self._zeros = None self._poles = None self._gain = None self.zeros, self.poles, self.gain = system def __repr__(self): """Return representation of the `ZerosPolesGain` system.""" return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format( self.__class__.__name__, repr(self.zeros), repr(self.poles), repr(self.gain), repr(self.dt), ) @property def zeros(self): """Zeros of the `ZerosPolesGain` system.""" return self._zeros @zeros.setter def zeros(self, zeros): self._zeros = atleast_1d(zeros) # Update dimensions if len(self.zeros.shape) > 1: self.outputs, self.inputs = self.zeros.shape else: self.outputs = 1 self.inputs = 1 @property def poles(self): """Poles of the `ZerosPolesGain` system.""" return self._poles @poles.setter def poles(self, poles): self._poles = atleast_1d(poles) @property def gain(self): """Gain of the `ZerosPolesGain` system.""" return self._gain @gain.setter def gain(self, gain): self._gain = gain def _copy(self, system): """ Copy the parameters of another `ZerosPolesGain` system. Parameters ---------- system : instance of `ZerosPolesGain` The zeros, poles gain system that is to be copied """ self.poles = system.poles self.zeros = system.zeros self.gain = system.gain def to_tf(self): """ Convert system representation to `TransferFunction`. Returns ------- sys : instance of `TransferFunction` Transfer function of the current system """ return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain), **self._dt_dict) def to_zpk(self): """ Return a copy of the current 'ZerosPolesGain' system. Returns ------- sys : instance of `ZerosPolesGain` The current system (copy) """ return copy.deepcopy(self) def to_ss(self): """ Convert system representation to `StateSpace`. Returns ------- sys : instance of `StateSpace` State space model of the current system """ return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain), **self._dt_dict) class ZerosPolesGainContinuous(ZerosPolesGain, lti): r""" Continuous-time Linear Time Invariant system in zeros, poles, gain form. Represents the system as the continuous time transfer function :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. Continuous-time `ZerosPolesGain` systems inherit additional functionality from the `lti` class. Parameters ---------- *system : arguments The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 3: array_like: (zeros, poles, gain) See Also -------- TransferFunction, StateSpace, lti zpk2ss, zpk2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. Examples -------- >>> from scipy import signal Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4) >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) """ def to_discrete(self, dt, method='zoh', alpha=None): """ Returns the discretized `ZerosPolesGain` system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` and `ZerosPolesGain` """ return ZerosPolesGain( *cont2discrete((self.zeros, self.poles, self.gain), dt, method=method, alpha=alpha)[:-1], dt=dt) class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): r""" Discrete-time Linear Time Invariant system in zeros, poles, gain form. Represents the system as the discrete-time transfer function :math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`. Discrete-time `ZerosPolesGain` systems inherit additional functionality from the `dlti` class. Parameters ---------- *system : arguments The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 3: array_like: (zeros, poles, gain) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `True` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, StateSpace, dlti zpk2ss, zpk2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D` state-space matrices) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_ss()`` before accessing/changing the A, B, C, D system matrices. Examples -------- >>> from scipy import signal Transfer function: H(s) = 5(s - 1)(s - 2) / (s - 3)(s - 4) >>> signal.ZerosPolesGain([1, 2], [3, 4], 5) ZerosPolesGainContinuous( array([1, 2]), array([3, 4]), 5, dt: None ) Transfer function: H(z) = 5(z - 1)(z - 2) / (z - 3)(z - 4) >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 ) """ pass def _atleast_2d_or_none(arg): if arg is not None: return atleast_2d(arg) class StateSpace(LinearTimeInvariant): r""" Linear Time Invariant system in state-space form. Represents the system as the continuous-time, first order differential equation :math:`\dot{x} = A x + B u` or the discrete-time difference equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems inherit additional functionality from the `lti`, respectively the `dlti` classes, depending on which system representation is used. Parameters ---------- *system: arguments The `StateSpace` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 4: array_like: (A, B, C, D) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `None` (continuous-time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, ZerosPolesGain, lti, dlti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `StateSpace` system representation (such as `zeros` or `poles`) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> a = np.array([[0, 1], [0, 0]]) >>> b = np.array([[0], [1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> sys = signal.StateSpace(a, b, c, d) >>> print(sys) StateSpaceContinuous( array([[0, 1], [0, 0]]), array([[0], [1]]), array([[1, 0]]), array([[0]]), dt: None ) >>> sys.to_discrete(0.1) StateSpaceDiscrete( array([[ 1. , 0.1], [ 0. , 1. ]]), array([[ 0.005], [ 0.1 ]]), array([[1, 0]]), array([[0]]), dt: 0.1 ) >>> a = np.array([[1, 0.1], [0, 1]]) >>> b = np.array([[0.005], [0.1]]) >>> signal.StateSpace(a, b, c, d, dt=0.1) StateSpaceDiscrete( array([[ 1. , 0.1], [ 0. , 1. ]]), array([[ 0.005], [ 0.1 ]]), array([[1, 0]]), array([[0]]), dt: 0.1 ) """ def __new__(cls, *system, **kwargs): """Create new StateSpace object and settle inheritance.""" # Handle object conversion if input is an instance of `lti` if len(system) == 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_ss() # Choose whether to inherit from `lti` or from `dlti` if cls is StateSpace: if kwargs.get('dt') is None: return StateSpaceContinuous.__new__(StateSpaceContinuous, *system, **kwargs) else: return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs) # No special conversion needed return super(StateSpace, cls).__new__(cls) def __init__(self, *system, **kwargs): """Initialize the state space lti/dlti system.""" # Conversion of lti instances is handled in __new__ if isinstance(system[0], LinearTimeInvariant): return # Remove system arguments, not needed by parents anymore super(StateSpace, self).__init__(**kwargs) self._A = None self._B = None self._C = None self._D = None self.A, self.B, self.C, self.D = abcd_normalize(*system) def __repr__(self): """Return representation of the `StateSpace` system.""" return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format( self.__class__.__name__, repr(self.A), repr(self.B), repr(self.C), repr(self.D), repr(self.dt), ) @property def A(self): """State matrix of the `StateSpace` system.""" return self._A @A.setter def A(self, A): self._A = _atleast_2d_or_none(A) @property def B(self): """Input matrix of the `StateSpace` system.""" return self._B @B.setter def B(self, B): self._B = _atleast_2d_or_none(B) self.inputs = self.B.shape[-1] @property def C(self): """Output matrix of the `StateSpace` system.""" return self._C @C.setter def C(self, C): self._C = _atleast_2d_or_none(C) self.outputs = self.C.shape[0] @property def D(self): """Feedthrough matrix of the `StateSpace` system.""" return self._D @D.setter def D(self, D): self._D = _atleast_2d_or_none(D) def _copy(self, system): """ Copy the parameters of another `StateSpace` system. Parameters ---------- system : instance of `StateSpace` The state-space system that is to be copied """ self.A = system.A self.B = system.B self.C = system.C self.D = system.D def to_tf(self, **kwargs): """ Convert system representation to `TransferFunction`. Parameters ---------- kwargs : dict, optional Additional keywords passed to `ss2zpk` Returns ------- sys : instance of `TransferFunction` Transfer function of the current system """ return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict) def to_zpk(self, **kwargs): """ Convert system representation to `ZerosPolesGain`. Parameters ---------- kwargs : dict, optional Additional keywords passed to `ss2zpk` Returns ------- sys : instance of `ZerosPolesGain` Zeros, poles, gain representation of the current system """ return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D, **kwargs), **self._dt_dict) def to_ss(self): """ Return a copy of the current `StateSpace` system. Returns ------- sys : instance of `StateSpace` The current system (copy) """ return copy.deepcopy(self) class StateSpaceContinuous(StateSpace, lti): r""" Continuous-time Linear Time Invariant system in state-space form. Represents the system as the continuous-time, first order differential equation :math:`\dot{x} = A x + B u`. Continuous-time `StateSpace` systems inherit additional functionality from the `lti` class. Parameters ---------- *system: arguments The `StateSpace` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `lti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 4: array_like: (A, B, C, D) See Also -------- TransferFunction, ZerosPolesGain, lti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `StateSpace` system representation (such as `zeros` or `poles`) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> a = np.array([[0, 1], [0, 0]]) >>> b = np.array([[0], [1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> sys = signal.StateSpace(a, b, c, d) >>> print(sys) StateSpaceContinuous( array([[0, 1], [0, 0]]), array([[0], [1]]), array([[1, 0]]), array([[0]]), dt: None ) """ def to_discrete(self, dt, method='zoh', alpha=None): """ Returns the discretized `StateSpace` system. Parameters: See `cont2discrete` for details. Returns ------- sys: instance of `dlti` and `StateSpace` """ return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D), dt, method=method, alpha=alpha)[:-1], dt=dt) class StateSpaceDiscrete(StateSpace, dlti): r""" Discrete-time Linear Time Invariant system in state-space form. Represents the system as the discrete-time difference equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems inherit additional functionality from the `dlti` class. Parameters ---------- *system: arguments The `StateSpace` class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: `dlti` system: (`StateSpace`, `TransferFunction` or `ZerosPolesGain`) * 4: array_like: (A, B, C, D) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to `True` (unspecified sampling time). Must be specified as a keyword argument, for example, ``dt=0.1``. See Also -------- TransferFunction, ZerosPolesGain, dlti ss2zpk, ss2tf, zpk2sos Notes ----- Changing the value of properties that are not part of the `StateSpace` system representation (such as `zeros` or `poles`) is very inefficient and may lead to numerical inaccuracies. It is better to convert to the specific system representation first. For example, call ``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain. Examples -------- >>> from scipy import signal >>> a = np.array([[1, 0.1], [0, 1]]) >>> b = np.array([[0.005], [0.1]]) >>> c = np.array([[1, 0]]) >>> d = np.array([[0]]) >>> signal.StateSpace(a, b, c, d, dt=0.1) StateSpaceDiscrete( array([[ 1. , 0.1], [ 0. , 1. ]]), array([[ 0.005], [ 0.1 ]]), array([[1, 0]]), array([[0]]), dt: 0.1 ) """ pass def lsim2(system, U=None, T=None, X0=None, **kwargs): """ Simulate output of a continuous-time linear system, by using the ODE solver `scipy.integrate.odeint`. Parameters ---------- system : an instance of the `lti` class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) U : array_like (1D or 2D), optional An input array describing the input at each time T. Linear interpolation is used between given times. If there are multiple inputs, then each column of the rank-2 array represents an input. If U is not given, the input is assumed to be zero. T : array_like (1D or 2D), optional The time steps at which the input is defined and at which the output is desired. The default is 101 evenly spaced points on the interval [0,10.0]. X0 : array_like (1D), optional The initial condition of the state vector. If `X0` is not given, the initial conditions are assumed to be 0. kwargs : dict Additional keyword arguments are passed on to the function `odeint`. See the notes below for more details. Returns ------- T : 1D ndarray The time values for the output. yout : ndarray The response of the system. xout : ndarray The time-evolution of the state-vector. Notes ----- This function uses `scipy.integrate.odeint` to solve the system's differential equations. Additional keyword arguments given to `lsim2` are passed on to `odeint`. See the documentation for `scipy.integrate.odeint` for the full list of arguments. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('lsim2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if X0 is None: X0 = zeros(sys.B.shape[0], sys.A.dtype) if T is None: # XXX T should really be a required argument, but U was # changed from a required positional argument to a keyword, # and T is after U in the argument list. So we either: change # the API and move T in front of U; check here for T being # None and raise an exception; or assign a default value to T # here. This code implements the latter. T = linspace(0, 10.0, 101) T = atleast_1d(T) if len(T.shape) != 1: raise ValueError("T must be a rank-1 array.") if U is not None: U = atleast_1d(U) if len(U.shape) == 1: U = U.reshape(-1, 1) sU = U.shape if sU[0] != len(T): raise ValueError("U must have the same number of rows " "as elements in T.") if sU[1] != sys.inputs: raise ValueError("The number of inputs in U (%d) is not " "compatible with the number of system " "inputs (%d)" % (sU[1], sys.inputs)) # Create a callable that uses linear interpolation to # calculate the input at any time. ufunc = interpolate.interp1d(T, U, kind='linear', axis=0, bounds_error=False) def fprime(x, t, sys, ufunc): """The vector field of the linear system.""" return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t])))) xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs) yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U)) else: def fprime(x, t, sys): """The vector field of the linear system.""" return dot(sys.A, x) xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs) yout = dot(sys.C, transpose(xout)) return T, squeeze(transpose(yout)), xout def _cast_to_array_dtype(in1, in2): """Cast array to dtype of other array, while avoiding ComplexWarning. Those can be raised when casting complex to real. """ if numpy.issubdtype(in2.dtype, numpy.float): # dtype to cast to is not complex, so use .real in1 = in1.real.astype(in2.dtype) else: in1 = in1.astype(in2.dtype) return in1 def lsim(system, U, T, X0=None, interp=True): """ Simulate output of a continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `lti`) * 2: (num, den) * 3: (zeros, poles, gain) * 4: (A, B, C, D) U : array_like An input array describing the input at each time `T` (interpolation is assumed between given times). If there are multiple inputs, then each column of the rank-2 array represents an input. If U = 0 or None, a zero input is used. T : array_like The time steps at which the input is defined and at which the output is desired. Must be nonnegative, increasing, and equally spaced. X0 : array_like, optional The initial conditions on the state vector (zero by default). interp : bool, optional Whether to use linear (True, the default) or zero-order-hold (False) interpolation for the input array. Returns ------- T : 1D ndarray Time values for the output. yout : 1D ndarray System response. xout : ndarray Time evolution of the state vector. Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- Simulate a double integrator y'' = u, with a constant input u = 1 >>> from scipy import signal >>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.) >>> t = np.linspace(0, 5) >>> u = np.ones_like(t) >>> tout, y, x = signal.lsim(system, u, t) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y) """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('lsim can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() T = atleast_1d(T) if len(T.shape) != 1: raise ValueError("T must be a rank-1 array.") A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D)) n_states = A.shape[0] n_inputs = B.shape[1] n_steps = T.size if X0 is None: X0 = zeros(n_states, sys.A.dtype) xout = zeros((n_steps, n_states), sys.A.dtype) if T[0] == 0: xout[0] = X0 elif T[0] > 0: # step forward to initial time, with zero input xout[0] = dot(X0, linalg.expm(transpose(A) * T[0])) else: raise ValueError("Initial time must be nonnegative") no_input = (U is None or (isinstance(U, (int, float)) and U == 0.) or not np.any(U)) if n_steps == 1: yout = squeeze(dot(xout, transpose(C))) if not no_input: yout += squeeze(dot(U, transpose(D))) return T, squeeze(yout), squeeze(xout) dt = T[1] - T[0] if not np.allclose((T[1:] - T[:-1]) / dt, 1.0): warnings.warn("Non-uniform timesteps are deprecated. Results may be " "slow and/or inaccurate.", DeprecationWarning) return lsim2(system, U, T, X0) if no_input: # Zero input: just use matrix exponential # take transpose because state is a row vector expAT_dt = linalg.expm(transpose(A) * dt) for i in xrange(1, n_steps): xout[i] = dot(xout[i-1], expAT_dt) yout = squeeze(dot(xout, transpose(C))) return T, squeeze(yout), squeeze(xout) # Nonzero input U = atleast_1d(U) if U.ndim == 1: U = U[:, np.newaxis] if U.shape[0] != n_steps: raise ValueError("U must have the same number of rows " "as elements in T.") if U.shape[1] != n_inputs: raise ValueError("System does not define that many inputs.") if not interp: # Zero-order hold # Algorithm: to integrate from time 0 to time dt, we solve # xdot = A x + B u, x(0) = x0 # udot = 0, u(0) = u0. # # Solution is # [ x(dt) ] [ A*dt B*dt ] [ x0 ] # [ u(dt) ] = exp [ 0 0 ] [ u0 ] M = np.vstack([np.hstack([A * dt, B * dt]), np.zeros((n_inputs, n_states + n_inputs))]) # transpose everything because the state and input are row vectors expMT = linalg.expm(transpose(M)) Ad = expMT[:n_states, :n_states] Bd = expMT[n_states:, :n_states] for i in xrange(1, n_steps): xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd) else: # Linear interpolation between steps # Algorithm: to integrate from time 0 to time dt, with linear # interpolation between inputs u(0) = u0 and u(dt) = u1, we solve # xdot = A x + B u, x(0) = x0 # udot = (u1 - u0) / dt, u(0) = u0. # # Solution is # [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ] # [ u(dt) ] = exp [ 0 0 I ] [ u0 ] # [u1 - u0] [ 0 0 0 ] [u1 - u0] M = np.vstack([np.hstack([A * dt, B * dt, np.zeros((n_states, n_inputs))]), np.hstack([np.zeros((n_inputs, n_states + n_inputs)), np.identity(n_inputs)]), np.zeros((n_inputs, n_states + 2 * n_inputs))]) expMT = linalg.expm(transpose(M)) Ad = expMT[:n_states, :n_states] Bd1 = expMT[n_states+n_inputs:, :n_states] Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1 for i in xrange(1, n_steps): xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1)) yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D)))) return T, squeeze(yout), squeeze(xout) def _default_response_times(A, n): """Compute a reasonable set of time samples for the response time. This function is used by `impulse`, `impulse2`, `step` and `step2` to compute the response time when the `T` argument to the function is None. Parameters ---------- A : array_like The system matrix, which is square. n : int The number of time samples to generate. Returns ------- t : ndarray The 1-D array of length `n` of time samples at which the response is to be computed. """ # Create a reasonable time interval. # TODO: This could use some more work. # For example, what is expected when the system is unstable? vals = linalg.eigvals(A) r = min(abs(real(vals))) if r == 0.0: r = 1.0 tc = 1.0 / r t = linspace(0.0, 7 * tc, n) return t def impulse(system, X0=None, T=None, N=None): """Impulse response of continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector. Defaults to zero. T : array_like, optional Time points. Computed if not given. N : int, optional The number of time points to compute (if `T` is not given). Returns ------- T : ndarray A 1-D array of time points. yout : ndarray A 1-D array containing the impulse response of the system (except for singularities at zero). Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('impulse can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if X0 is None: X = squeeze(sys.B) else: X = squeeze(sys.B + X0) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) else: T = asarray(T) _, h, _ = lsim(sys, 0., T, X, interp=False) return T, h def impulse2(system, X0=None, T=None, N=None, **kwargs): """ Impulse response of a single-input, continuous-time linear system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : 1-D array_like, optional The initial condition of the state vector. Default: 0 (the zero vector). T : 1-D array_like, optional The time steps at which the input is defined and at which the output is desired. If `T` is not given, the function will generate a set of time samples automatically. N : int, optional Number of time points to compute. Default: 100. kwargs : various types Additional keyword arguments are passed on to the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`; see the latter's documentation for information about these arguments. Returns ------- T : ndarray The time values for the output. yout : ndarray The output response of the system. See Also -------- impulse, lsim2, integrate.odeint Notes ----- The solution is generated by calling `scipy.signal.lsim2`, which uses the differential equation solver `scipy.integrate.odeint`. If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.8.0 Examples -------- Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t) >>> from scipy import signal >>> system = ([1.0], [1.0, 2.0, 1.0]) >>> t, y = signal.impulse2(system) >>> import matplotlib.pyplot as plt >>> plt.plot(t, y) """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('impulse2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() B = sys.B if B.shape[-1] != 1: raise ValueError("impulse2() requires a single-input system.") B = B.squeeze() if X0 is None: X0 = zeros_like(B) if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) # Move the impulse in the input to the initial conditions, and then # solve using lsim2(). ic = B + X0 Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs) return Tr, Yr def step(system, X0=None, T=None, N=None): """Step response of continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector (default is zero). T : array_like, optional Time points (computed if not given). N : int, optional Number of time points to compute if `T` is not given. Returns ------- T : 1D ndarray Output time points. yout : 1D ndarray Step response of system. See also -------- scipy.signal.step2 Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('step can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) else: T = asarray(T) U = ones(T.shape, sys.A.dtype) vals = lsim(sys, U, T, X0=X0, interp=False) return vals[0], vals[1] def step2(system, X0=None, T=None, N=None, **kwargs): """Step response of continuous-time system. This function is functionally the same as `scipy.signal.step`, but it uses the function `scipy.signal.lsim2` to compute the step response. Parameters ---------- system : an instance of the LTI class or a tuple of array_like describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) X0 : array_like, optional Initial state-vector (default is zero). T : array_like, optional Time points (computed if not given). N : int, optional Number of time points to compute if `T` is not given. kwargs : various types Additional keyword arguments are passed on the function `scipy.signal.lsim2`, which in turn passes them on to `scipy.integrate.odeint`. See the documentation for `scipy.integrate.odeint` for information about these arguments. Returns ------- T : 1D ndarray Output time points. yout : 1D ndarray Step response of system. See also -------- scipy.signal.step Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.8.0 """ if isinstance(system, lti): sys = system._as_ss() elif isinstance(system, dlti): raise AttributeError('step2 can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_ss() if N is None: N = 100 if T is None: T = _default_response_times(sys.A, N) else: T = asarray(T) U = ones(T.shape, sys.A.dtype) vals = lsim2(sys, U, T, X0=X0, **kwargs) return vals[0], vals[1] def bode(system, w=None, n=100): """ Calculate Bode magnitude and phase data of a continuous-time system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) w : array_like, optional Array of frequencies (in rad/s). Magnitude and phase data is calculated for every value in this array. If not given a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/s] mag : 1D ndarray Magnitude array [dB] phase : 1D ndarray Phase array [deg] Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.11.0 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> sys = signal.TransferFunction([1], [1, 1]) >>> w, mag, phase = signal.bode(sys) >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ w, y = freqresp(system, w=w, n=n) mag = 20.0 * numpy.log10(abs(y)) phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi return w, mag, phase def freqresp(system, w=None, n=10000): """Calculate the frequency response of a continuous-time system. Parameters ---------- system : an instance of the `lti` class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `lti`) * 2 (num, den) * 3 (zeros, poles, gain) * 4 (A, B, C, D) w : array_like, optional Array of frequencies (in rad/s). Magnitude and phase data is calculated for every value in this array. If not given, a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/s] H : 1D ndarray Array of complex magnitude values Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). Examples -------- Generating the Nyquist plot of a transfer function >>> from scipy import signal >>> import matplotlib.pyplot as plt Transfer function: H(s) = 5 / (s-1)^3 >>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5]) >>> w, H = signal.freqresp(s1) >>> plt.figure() >>> plt.plot(H.real, H.imag, "b") >>> plt.plot(H.real, -H.imag, "r") >>> plt.show() """ if isinstance(system, lti): if isinstance(system, (TransferFunction, ZerosPolesGain)): sys = system else: sys = system._as_zpk() elif isinstance(system, dlti): raise AttributeError('freqresp can only be used with continuous-time ' 'systems.') else: sys = lti(*system)._as_zpk() if sys.inputs != 1 or sys.outputs != 1: raise ValueError("freqresp() requires a SISO (single input, single " "output) system.") if w is not None: worN = w else: worN = n if isinstance(sys, TransferFunction): # In the call to freqs(), sys.num.ravel() is used because there are # cases where sys.num is a 2-D array with a single row. w, h = freqs(sys.num.ravel(), sys.den, worN=worN) elif isinstance(sys, ZerosPolesGain): w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN) return w, h # This class will be used by place_poles to return its results # see http://code.activestate.com/recipes/52308/ class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) def _valid_inputs(A, B, poles, method, rtol, maxiter): """ Check the poles come in complex conjugage pairs Check shapes of A, B and poles are compatible. Check the method chosen is compatible with provided poles Return update method to use and ordered poles """ poles = np.asarray(poles) if poles.ndim > 1: raise ValueError("Poles must be a 1D array like.") # Will raise ValueError if poles do not come in complex conjugates pairs poles = _order_complex_poles(poles) if A.ndim > 2: raise ValueError("A must be a 2D array/matrix.") if B.ndim > 2: raise ValueError("B must be a 2D array/matrix") if A.shape[0] != A.shape[1]: raise ValueError("A must be square") if len(poles) > A.shape[0]: raise ValueError("maximum number of poles is %d but you asked for %d" % (A.shape[0], len(poles))) if len(poles) < A.shape[0]: raise ValueError("number of poles is %d but you should provide %d" % (len(poles), A.shape[0])) r = np.linalg.matrix_rank(B) for p in poles: if sum(p == poles) > r: raise ValueError("at least one of the requested pole is repeated " "more than rank(B) times") # Choose update method update_loop = _YT_loop if method not in ('KNV0','YT'): raise ValueError("The method keyword must be one of 'YT' or 'KNV0'") if method == "KNV0": update_loop = _KNV0_loop if not all(np.isreal(poles)): raise ValueError("Complex poles are not supported by KNV0") if maxiter < 1: raise ValueError("maxiter must be at least equal to 1") # We do not check rtol <= 0 as the user can use a negative rtol to # force maxiter iterations if rtol > 1: raise ValueError("rtol can not be greater than 1") return update_loop, poles def _order_complex_poles(poles): """ Check we have complex conjugates pairs and reorder P according to YT, ie real_poles, complex_i, conjugate complex_i, .... The lexicographic sort on the complex poles is added to help the user to compare sets of poles. """ ordered_poles = np.sort(poles[np.isreal(poles)]) im_poles = [] for p in np.sort(poles[np.imag(poles) < 0]): if np.conj(p) in poles: im_poles.extend((p, np.conj(p))) ordered_poles = np.hstack((ordered_poles, im_poles)) if poles.shape[0] != len(ordered_poles): raise ValueError("Complex poles must come with their conjugates") return ordered_poles def _KNV0(B, ker_pole, transfer_matrix, j, poles): """ Algorithm "KNV0" Kautsky et Al. Robust pole assignment in linear state feedback, Int journal of Control 1985, vol 41 p 1129->1155 http://la.epfl.ch/files/content/sites/la/files/ users/105941/public/KautskyNicholsDooren """ # Remove xj form the base transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1) # If we QR this matrix in full mode Q=Q0|Q1 # then Q1 will be a single column orthogonnal to # Q0, that's what we are looking for ! # After merge of gh-4249 great speed improvements could be achieved # using QR updates instead of full QR in the line below # To debug with numpy qr uncomment the line below # Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete") Q, R = s_qr(transfer_matrix_not_j, mode="full") mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T) yj = np.dot(mat_ker_pj, Q[:, -1]) # If Q[:, -1] is "almost" orthogonal to ker_pole[j] its # projection into ker_pole[j] will yield a vector # close to 0. As we are looking for a vector in ker_pole[j] # simply stick with transfer_matrix[:, j] (unless someone provides me with # a better choice ?) if not np.allclose(yj, 0): xj = yj/np.linalg.norm(yj) transfer_matrix[:, j] = xj # KNV does not support complex poles, using YT technique the two lines # below seem to work 9 out of 10 times but it is not reliable enough: # transfer_matrix[:, j]=real(xj) # transfer_matrix[:, j+1]=imag(xj) # Add this at the beginning of this function if you wish to test # complex support: # if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])): # return # Problems arise when imag(xj)=>0 I have no idea on how to fix this def _YT_real(ker_pole, Q, transfer_matrix, i, j): """ Applies algorithm from YT section 6.1 page 19 related to real pairs """ # step 1 page 19 u = Q[:, -2, np.newaxis] v = Q[:, -1, np.newaxis] # step 2 page 19 m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) - np.dot(v, u.T)), ker_pole[j]) # step 3 page 19 um, sm, vm = np.linalg.svd(m) # mu1, mu2 two first columns of U => 2 first lines of U.T mu1, mu2 = um.T[:2, :, np.newaxis] # VM is V.T with numpy we want the first two lines of V.T nu1, nu2 = vm[:2, :, np.newaxis] # what follows is a rough python translation of the formulas # in section 6.2 page 20 (step 4) transfer_matrix_j_mo_transfer_matrix_j = np.vstack(( transfer_matrix[:, i, np.newaxis], transfer_matrix[:, j, np.newaxis])) if not np.allclose(sm[0], sm[1]): ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1) ker_pole_i_nu1 = np.dot(ker_pole[j], nu1) ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1)) else: ker_pole_ij = np.vstack(( np.hstack((ker_pole[i], np.zeros(ker_pole[i].shape))), np.hstack((np.zeros(ker_pole[j].shape), ker_pole[j])) )) mu_nu_matrix = np.vstack( (np.hstack((mu1, mu2)), np.hstack((nu1, nu2))) ) ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix) transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T), transfer_matrix_j_mo_transfer_matrix_j) if not np.allclose(transfer_matrix_ij, 0): transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij / np.linalg.norm(transfer_matrix_ij)) transfer_matrix[:, i] = transfer_matrix_ij[ :transfer_matrix[:, i].shape[0], 0 ] transfer_matrix[:, j] = transfer_matrix_ij[ transfer_matrix[:, i].shape[0]:, 0 ] else: # As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to # Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to # ker_pole_mu_nu and iterate. As we are looking for a vector in # Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help # (that's a guess, not a claim !) transfer_matrix[:, i] = ker_pole_mu_nu[ :transfer_matrix[:, i].shape[0], 0 ] transfer_matrix[:, j] = ker_pole_mu_nu[ transfer_matrix[:, i].shape[0]:, 0 ] def _YT_complex(ker_pole, Q, transfer_matrix, i, j): """ Applies algorithm from YT section 6.2 page 20 related to complex pairs """ # step 1 page 20 ur = np.sqrt(2)*Q[:, -2, np.newaxis] ui = np.sqrt(2)*Q[:, -1, np.newaxis] u = ur + 1j*ui # step 2 page 20 ker_pole_ij = ker_pole[i] m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) - np.dot(np.conj(u), u.T)), ker_pole_ij) # step 3 page 20 e_val, e_vec = np.linalg.eig(m) # sort eigenvalues according to their module e_val_idx = np.argsort(np.abs(e_val)) mu1 = e_vec[:, e_val_idx[-1], np.newaxis] mu2 = e_vec[:, e_val_idx[-2], np.newaxis] # what follows is a rough python translation of the formulas # in section 6.2 page 20 (step 4) # remember transfer_matrix_i has been split as # transfer_matrix[i]=real(transfer_matrix_i) and # transfer_matrix[j]=imag(transfer_matrix_i) transfer_matrix_j_mo_transfer_matrix_j = ( transfer_matrix[:, i, np.newaxis] + 1j*transfer_matrix[:, j, np.newaxis] ) if not np.allclose(np.abs(e_val[e_val_idx[-1]]), np.abs(e_val[e_val_idx[-2]])): ker_pole_mu = np.dot(ker_pole_ij, mu1) else: mu1_mu2_matrix = np.hstack((mu1, mu2)) ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix) transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)), transfer_matrix_j_mo_transfer_matrix_j) if not np.allclose(transfer_matrix_i_j, 0): transfer_matrix_i_j = (transfer_matrix_i_j / np.linalg.norm(transfer_matrix_i_j)) transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0]) transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0]) else: # same idea as in YT_real transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0]) transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0]) def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): """ Algorithm "YT" Tits, Yang. Globally Convergent Algorithms for Robust Pole Assignment by State Feedback http://drum.lib.umd.edu/handle/1903/5598 The poles P have to be sorted accordingly to section 6.2 page 20 """ # The IEEE edition of the YT paper gives useful information on the # optimal update order for the real poles in order to minimize the number # of times we have to loop over all poles, see page 1442 nb_real = poles[np.isreal(poles)].shape[0] # hnb => Half Nb Real hnb = nb_real // 2 # Stick to the indices in the paper and then remove one to get numpy array # index it is a bit easier to link the code to the paper this way even if it # is not very clean. The paper is unclear about what should be done when # there is only one real pole => use KNV0 on this real pole seem to work if nb_real > 0: #update the biggest real pole with the smallest one update_order = [[nb_real], [1]] else: update_order = [[],[]] r_comp = np.arange(nb_real+1, len(poles)+1, 2) # step 1.a r_p = np.arange(1, hnb+nb_real % 2) update_order[0].extend(2*r_p) update_order[1].extend(2*r_p+1) # step 1.b update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 1.c r_p = np.arange(1, hnb+1) update_order[0].extend(2*r_p-1) update_order[1].extend(2*r_p) # step 1.d if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 2.a r_j = np.arange(2, hnb+nb_real % 2) for j in r_j: for i in range(1, hnb+1): update_order[0].append(i) update_order[1].append(i+j) # step 2.b if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 2.c r_j = np.arange(2, hnb+nb_real % 2) for j in r_j: for i in range(hnb+1, nb_real+1): idx_1 = i+j if idx_1 > nb_real: idx_1 = i+j-nb_real update_order[0].append(i) update_order[1].append(idx_1) # step 2.d if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) # step 3.a for i in range(1, hnb+1): update_order[0].append(i) update_order[1].append(i+hnb) # step 3.b if hnb == 0 and np.isreal(poles[0]): update_order[0].append(1) update_order[1].append(1) update_order[0].extend(r_comp) update_order[1].extend(r_comp+1) update_order = np.array(update_order).T-1 stop = False nb_try = 0 while nb_try < maxiter and not stop: det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) for i, j in update_order: if i == j: assert i == 0, "i!=0 for KNV call in YT" assert np.isreal(poles[i]), "calling KNV on a complex pole" _KNV0(B, ker_pole, transfer_matrix, i, poles) else: transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j), axis=1) # after merge of gh-4249 great speed improvements could be # achieved using QR updates instead of full QR in the line below #to debug with numpy qr uncomment the line below #Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete") Q, _ = s_qr(transfer_matrix_not_i_j, mode="full") if np.isreal(poles[i]): assert np.isreal(poles[j]), "mixing real and complex " + \ "in YT_real" + str(poles) _YT_real(ker_pole, Q, transfer_matrix, i, j) else: assert ~np.isreal(poles[i]), "mixing real and complex " + \ "in YT_real" + str(poles) _YT_complex(ker_pole, Q, transfer_matrix, i, j) det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), np.abs(np.linalg.det(transfer_matrix)))) cur_rtol = np.abs( (det_transfer_matrix - det_transfer_matrixb) / det_transfer_matrix) if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): # Convergence test from YT page 21 stop = True nb_try += 1 return stop, cur_rtol, nb_try def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol): """ Loop over all poles one by one and apply KNV method 0 algorithm """ # This method is useful only because we need to be able to call # _KNV0 from YT without looping over all poles, otherwise it would # have been fine to mix _KNV0_loop and _KNV0 in a single function stop = False nb_try = 0 while nb_try < maxiter and not stop: det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix)) for j in range(B.shape[0]): _KNV0(B, ker_pole, transfer_matrix, j, poles) det_transfer_matrix = np.max((np.sqrt(np.spacing(1)), np.abs(np.linalg.det(transfer_matrix)))) cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) / det_transfer_matrix) if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)): # Convergence test from YT page 21 stop = True nb_try += 1 return stop, cur_rtol, nb_try def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30): """ Compute K such that eigenvalues (A - dot(B, K))=poles. K is the gain matrix such as the plant described by the linear system ``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``, as close as possible to those asked for in poles. SISO, MISO and MIMO systems are supported. Parameters ---------- A, B : ndarray State-space representation of linear system ``AX + BU``. poles : array_like Desired real poles and/or complex conjugates poles. Complex poles are only supported with ``method="YT"`` (default). method: {'YT', 'KNV0'}, optional Which method to choose to find the gain matrix K. One of: - 'YT': Yang Tits - 'KNV0': Kautsky, Nichols, Van Dooren update method 0 See References and Notes for details on the algorithms. rtol: float, optional After each iteration the determinant of the eigenvectors of ``A - B*K`` is compared to its previous value, when the relative error between these two values becomes lower than `rtol` the algorithm stops. Default is 1e-3. maxiter: int, optional Maximum number of iterations to compute the gain matrix. Default is 30. Returns ------- full_state_feedback : Bunch object full_state_feedback is composed of: gain_matrix : 1-D ndarray The closed loop matrix K such as the eigenvalues of ``A-BK`` are as close as possible to the requested poles. computed_poles : 1-D ndarray The poles corresponding to ``A-BK`` sorted as first the real poles in increasing order, then the complex congugates in lexicographic order. requested_poles : 1-D ndarray The poles the algorithm was asked to place sorted as above, they may differ from what was achieved. X : 2-D ndarray The transfer matrix such as ``X * diag(poles) = (A - B*K)*X`` (see Notes) rtol : float The relative tolerance achieved on ``det(X)`` (see Notes). `rtol` will be NaN if it is possible to solve the system ``diag(poles) = (A - B*K)``, or 0 when the optimization algorithms can't do anything i.e when ``B.shape[1] == 1``. nb_iter : int The number of iterations performed before converging. `nb_iter` will be NaN if it is possible to solve the system ``diag(poles) = (A - B*K)``, or 0 when the optimization algorithms can't do anything i.e when ``B.shape[1] == 1``. Notes ----- The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses rank-2 updates. This yields on average more robust solutions (see [2]_ pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV does not in its original version. Only update method 0 proposed by KNV has been implemented here, hence the name ``'KNV0'``. KNV extended to complex poles is used in Matlab's ``place`` function, YT is distributed under a non-free licence by Slicot under the name ``robpole``. It is unclear and undocumented how KNV0 has been extended to complex poles (Tits and Yang claim on page 14 of their paper that their method can not be used to extend KNV to complex poles), therefore only YT supports them in this implementation. As the solution to the problem of pole placement is not unique for MIMO systems, both methods start with a tentative transfer matrix which is altered in various way to increase its determinant. Both methods have been proven to converge to a stable solution, however depending on the way the initial transfer matrix is chosen they will converge to different solutions and therefore there is absolutely no guarantee that using ``'KNV0'`` will yield results similar to Matlab's or any other implementation of these algorithms. Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'`` is only provided because it is needed by ``'YT'`` in some specific cases. Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'`` when ``abs(det(X))`` is used as a robustness indicator. [2]_ is available as a technical report on the following URL: http://drum.lib.umd.edu/handle/1903/5598 References ---------- .. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment in linear state feedback", International Journal of Control, Vol. 41 pp. 1129-1155, 1985. .. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust pole assignment by state feedback, IEEE Transactions on Automatic Control, Vol. 41, pp. 1432-1452, 1996. Examples -------- A simple example demonstrating real pole placement using both KNV and YT algorithms. This is example number 1 from section 4 of the reference KNV publication ([1]_): >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ], ... [-0.5814, -4.290, 0, 0.6750 ], ... [ 1.067, 4.273, -6.654, 5.893 ], ... [ 0.0480, 4.273, 1.343, -2.104 ]]) >>> B = np.array([[ 0, 5.679 ], ... [ 1.136, 1.136 ], ... [ 0, 0, ], ... [-3.146, 0 ]]) >>> P = np.array([-0.2, -0.5, -5.0566, -8.6659]) Now compute K with KNV method 0, with the default YT method and with the YT method while forcing 100 iterations of the algorithm and print some results after each call. >>> fsf1 = signal.place_poles(A, B, P, method='KNV0') >>> fsf1.gain_matrix array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785], [ 0.50587268, 0.57779091, 0.51795763, -0.41991442]]) >>> fsf2 = signal.place_poles(A, B, P) # uses YT method >>> fsf2.computed_poles array([-8.6659, -5.0566, -0.5 , -0.2 ]) >>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100) >>> fsf3.X array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j], [-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j], [-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j], [ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]]) The absolute value of the determinant of X is a good indicator to check the robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing it. Below a comparison of the robustness of the results above: >>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X)) True >>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X)) True Now a simple example for complex poles: >>> A = np.array([[ 0, 7/3., 0, 0 ], ... [ 0, 0, 0, 7/9. ], ... [ 0, 0, 0, 0 ], ... [ 0, 0, 0, 0 ]]) >>> B = np.array([[ 0, 0 ], ... [ 0, 0 ], ... [ 1, 0 ], ... [ 0, 1 ]]) >>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3. >>> fsf = signal.place_poles(A, B, P, method='YT') We can plot the desired and computed poles in the complex plane: >>> t = np.linspace(0, 2*np.pi, 401) >>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle >>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag, ... 'wo', label='Desired') >>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx', ... label='Placed') >>> plt.grid() >>> plt.axis('image') >>> plt.axis([-1.1, 1.1, -1.1, 1.1]) >>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1) """ # Move away all the inputs checking, it only adds noise to the code update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter) # The current value of the relative tolerance we achieved cur_rtol = 0 # The number of iterations needed before converging nb_iter = 0 # Step A: QR decomposition of B page 1132 KN # to debug with numpy qr uncomment the line below # u, z = np.linalg.qr(B, mode="complete") u, z = s_qr(B, mode="full") rankB = np.linalg.matrix_rank(B) u0 = u[:, :rankB] u1 = u[:, rankB:] z = z[:rankB, :] # If we can use the identity matrix as X the solution is obvious if B.shape[0] == rankB: # if B is square and full rank there is only one solution # such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0]) # i.e K=inv(B)*(diag(P)-A) # if B has as many lines as its rank (but not square) there are many # solutions and we can choose one using least squares # => use lstsq in both cases. # In both cases the transfer matrix X will be eye(A.shape[0]) and I # can hardly think of a better one so there is nothing to optimize # # for complex poles we use the following trick # # |a -b| has for eigenvalues a+b and a-b # |b a| # # |a+bi 0| has the obvious eigenvalues a+bi and a-bi # |0 a-bi| # # e.g solving the first one in R gives the solution # for the second one in C diag_poles = np.zeros(A.shape) idx = 0 while idx < poles.shape[0]: p = poles[idx] diag_poles[idx, idx] = np.real(p) if ~np.isreal(p): diag_poles[idx, idx+1] = -np.imag(p) diag_poles[idx+1, idx+1] = np.real(p) diag_poles[idx+1, idx] = np.imag(p) idx += 1 # skip next one idx += 1 gain_matrix = np.linalg.lstsq(B, diag_poles-A)[0] transfer_matrix = np.eye(A.shape[0]) cur_rtol = np.nan nb_iter = np.nan else: # step A (p1144 KNV) and begining of step F: decompose # dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors # in the same loop ker_pole = [] # flag to skip the conjugate of a complex pole skip_conjugate = False # select orthonormal base ker_pole for each Pole and vectors for # transfer_matrix for j in range(B.shape[0]): if skip_conjugate: skip_conjugate = False continue pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T # after QR Q=Q0|Q1 # only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix. # Q1 is orthogonnal to Q0 and will be multiplied by the zeros in # R when using mode "complete". In default mode Q1 and the zeros # in R are not computed # To debug with numpy qr uncomment the line below # Q, _ = np.linalg.qr(pole_space_j, mode="complete") Q, _ = s_qr(pole_space_j, mode="full") ker_pole_j = Q[:, pole_space_j.shape[1]:] # We want to select one vector in ker_pole_j to build the transfer # matrix, however qr returns sometimes vectors with zeros on the # same line for each pole and this yields very long convergence # times. # Or some other times a set of vectors, one with zero imaginary # part and one (or several) with imaginary parts. After trying # many ways to select the best possible one (eg ditch vectors # with zero imaginary part for complex poles) I ended up summing # all vectors in ker_pole_j, this solves 100% of the problems and # is a valid choice for transfer_matrix. # This way for complex poles we are sure to have a non zero # imaginary part that way, and the problem of lines full of zeros # in transfer_matrix is solved too as when a vector from # ker_pole_j has a zero the other one(s) when # ker_pole_j.shape[1]>1) for sure won't have a zero there. transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis] transfer_matrix_j = (transfer_matrix_j / np.linalg.norm(transfer_matrix_j)) if ~np.isreal(poles[j]): # complex pole transfer_matrix_j = np.hstack([np.real(transfer_matrix_j), np.imag(transfer_matrix_j)]) ker_pole.extend([ker_pole_j, ker_pole_j]) # Skip next pole as it is the conjugate skip_conjugate = True else: # real pole, nothing to do ker_pole.append(ker_pole_j) if j == 0: transfer_matrix = transfer_matrix_j else: transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j)) if rankB > 1: # otherwise there is nothing we can optimize stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol) if not stop and rtol > 0: # if rtol<=0 the user has probably done that on purpose, # don't annoy him err_msg = ( "Convergence was not reached after maxiter iterations.\n" "You asked for a relative tolerance of %f we got %f" % (rtol, cur_rtol) ) warnings.warn(err_msg) # reconstruct transfer_matrix to match complex conjugate pairs, # ie transfer_matrix_j/transfer_matrix_j+1 are # Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after transfer_matrix = transfer_matrix.astype(complex) idx = 0 while idx < poles.shape[0]-1: if ~np.isreal(poles[idx]): rel = transfer_matrix[:, idx].copy() img = transfer_matrix[:, idx+1] # rel will be an array referencing a column of transfer_matrix # if we don't copy() it will changer after the next line and # and the line after will not yield the correct value transfer_matrix[:, idx] = rel-1j*img transfer_matrix[:, idx+1] = rel+1j*img idx += 1 # skip next one idx += 1 try: m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles), transfer_matrix.T)).T gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A)) except np.linalg.LinAlgError: raise ValueError("The poles you've chosen can't be placed. " "Check the controllability matrix and try " "another set of poles") # Beware: Kautsky solves A+BK but the usual form is A-BK gain_matrix = -gain_matrix # K still contains complex with ~=0j imaginary parts, get rid of them gain_matrix = np.real(gain_matrix) full_state_feedback = Bunch() full_state_feedback.gain_matrix = gain_matrix full_state_feedback.computed_poles = _order_complex_poles( np.linalg.eig(A - np.dot(B, gain_matrix))[0] ) full_state_feedback.requested_poles = poles full_state_feedback.X = transfer_matrix full_state_feedback.rtol = cur_rtol full_state_feedback.nb_iter = nb_iter return full_state_feedback def dlsim(system, u, t=None, x0=None): """ Simulate output of a discrete-time linear system. Parameters ---------- system : tuple of array_like or instance of `dlti` A tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `dlti`) * 3: (num, den, dt) * 4: (zeros, poles, gain, dt) * 5: (A, B, C, D, dt) u : array_like An input array describing the input at each time `t` (interpolation is assumed between given times). If there are multiple inputs, then each column of the rank-2 array represents an input. t : array_like, optional The time steps at which the input is defined. If `t` is given, it must be the same length as `u`, and the final value in `t` determines the number of steps returned in the output. x0 : array_like, optional The initial conditions on the state vector (zero by default). Returns ------- tout : ndarray Time values for the output, as a 1-D array. yout : ndarray System response, as a 1-D array. xout : ndarray, optional Time-evolution of the state-vector. Only generated if the input is a `StateSpace` system. See Also -------- lsim, dstep, dimpulse, cont2discrete Examples -------- A simple integrator transfer function with a discrete time step of 1.0 could be implemented as: >>> from scipy import signal >>> tf = ([1.0,], [1.0, -1.0], 1.0) >>> t_in = [0.0, 1.0, 2.0, 3.0] >>> u = np.asarray([0.0, 0.0, 1.0, 1.0]) >>> t_out, y = signal.dlsim(tf, u, t=t_in) >>> y.T array([[ 0., 0., 0., 1.]]) """ # Convert system to dlti-StateSpace if isinstance(system, lti): raise AttributeError('dlsim can only be used with discrete-time dlti ' 'systems.') elif not isinstance(system, dlti): system = dlti(*system[:-1], dt=system[-1]) # Condition needed to ensure output remains compatible is_ss_input = isinstance(system, StateSpace) system = system._as_ss() u = np.atleast_1d(u) if u.ndim == 1: u = np.atleast_2d(u).T if t is None: out_samples = len(u) stoptime = (out_samples - 1) * system.dt else: stoptime = t[-1] out_samples = int(np.floor(stoptime / system.dt)) + 1 # Pre-build output arrays xout = np.zeros((out_samples, system.A.shape[0])) yout = np.zeros((out_samples, system.C.shape[0])) tout = np.linspace(0.0, stoptime, num=out_samples) # Check initial condition if x0 is None: xout[0, :] = np.zeros((system.A.shape[1],)) else: xout[0, :] = np.asarray(x0) # Pre-interpolate inputs into the desired time steps if t is None: u_dt = u else: if len(u.shape) == 1: u = u[:, np.newaxis] u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True) u_dt = u_dt_interp(tout).transpose() # Simulate the system for i in range(0, out_samples - 1): xout[i+1, :] = (np.dot(system.A, xout[i, :]) + np.dot(system.B, u_dt[i, :])) yout[i, :] = (np.dot(system.C, xout[i, :]) + np.dot(system.D, u_dt[i, :])) # Last point yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) + np.dot(system.D, u_dt[out_samples-1, :])) if is_ss_input: return tout, yout, xout else: return tout, yout def dimpulse(system, x0=None, t=None, n=None): """ Impulse response of discrete-time system. Parameters ---------- system : tuple of array_like or instance of `dlti` A tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `dlti`) * 3: (num, den, dt) * 4: (zeros, poles, gain, dt) * 5: (A, B, C, D, dt) x0 : array_like, optional Initial state-vector. Defaults to zero. t : array_like, optional Time points. Computed if not given. n : int, optional The number of time points to compute (if `t` is not given). Returns ------- tout : ndarray Time values for the output, as a 1-D array. yout : ndarray Impulse response of system. Each element of the tuple represents the output of the system based on an impulse in each input. See Also -------- impulse, dstep, dlsim, cont2discrete """ # Convert system to dlti-StateSpace if isinstance(system, dlti): system = system._as_ss() elif isinstance(system, lti): raise AttributeError('dimpulse can only be used with discrete-time ' 'dlti systems.') else: system = dlti(*system[:-1], dt=system[-1])._as_ss() # Default to 100 samples if unspecified if n is None: n = 100 # If time is not specified, use the number of samples # and system dt if t is None: t = np.linspace(0, n * system.dt, n, endpoint=False) else: t = np.asarray(t) # For each input, implement a step change yout = None for i in range(0, system.inputs): u = np.zeros((t.shape[0], system.inputs)) u[0, i] = 1.0 one_output = dlsim(system, u, t=t, x0=x0) if yout is None: yout = (one_output[1],) else: yout = yout + (one_output[1],) tout = one_output[0] return tout, yout def dstep(system, x0=None, t=None, n=None): """ Step response of discrete-time system. Parameters ---------- system : tuple of array_like A tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1: (instance of `dlti`) * 3: (num, den, dt) * 4: (zeros, poles, gain, dt) * 5: (A, B, C, D, dt) x0 : array_like, optional Initial state-vector. Defaults to zero. t : array_like, optional Time points. Computed if not given. n : int, optional The number of time points to compute (if `t` is not given). Returns ------- tout : ndarray Output time points, as a 1-D array. yout : ndarray Step response of system. Each element of the tuple represents the output of the system based on a step response to each input. See Also -------- step, dimpulse, dlsim, cont2discrete """ # Convert system to dlti-StateSpace if isinstance(system, dlti): system = system._as_ss() elif isinstance(system, lti): raise AttributeError('dstep can only be used with discrete-time dlti ' 'systems.') else: system = dlti(*system[:-1], dt=system[-1])._as_ss() # Default to 100 samples if unspecified if n is None: n = 100 # If time is not specified, use the number of samples # and system dt if t is None: t = np.linspace(0, n * system.dt, n, endpoint=False) else: t = np.asarray(t) # For each input, implement a step change yout = None for i in range(0, system.inputs): u = np.zeros((t.shape[0], system.inputs)) u[:, i] = np.ones((t.shape[0],)) one_output = dlsim(system, u, t=t, x0=x0) if yout is None: yout = (one_output[1],) else: yout = yout + (one_output[1],) tout = one_output[0] return tout, yout def dfreqresp(system, w=None, n=10000, whole=False): """ Calculate the frequency response of a discrete-time system. Parameters ---------- system : an instance of the `dlti` class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `dlti`) * 2 (numerator, denominator, dt) * 3 (zeros, poles, gain, dt) * 4 (A, B, C, D, dt) w : array_like, optional Array of frequencies (in radians/sample). Magnitude and phase data is calculated for every value in this array. If not given a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. whole : bool, optional Normally, if 'w' is not given, frequencies are computed from 0 to the Nyquist frequency, pi radians/sample (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to 2*pi radians/sample. Returns ------- w : 1D ndarray Frequency array [radians/sample] H : 1D ndarray Array of complex magnitude values Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.18.0 Examples -------- Generating the Nyquist plot of a transfer function >>> from scipy import signal >>> import matplotlib.pyplot as plt Transfer function: H(z) = 1 / (z^2 + 2z + 3) >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) >>> w, H = signal.dfreqresp(sys) >>> plt.figure() >>> plt.plot(H.real, H.imag, "b") >>> plt.plot(H.real, -H.imag, "r") >>> plt.show() """ if isinstance(system, dlti): if isinstance(system, (TransferFunction, ZerosPolesGain)): sys = system else: sys = system._as_zpk() elif isinstance(system, lti): raise AttributeError('dfreqresp can only be used with discrete-time ' 'systems.') else: sys = dlti(*system[:-1], dt=system[-1])._as_zpk() if sys.inputs != 1 or sys.outputs != 1: raise ValueError("dfreqresp requires a SISO (single input, single " "output) system.") if w is not None: worN = w else: worN = n if isinstance(sys, TransferFunction): # Convert numerator and denominator from polynomials in the variable # 'z' to polynomials in the variable 'z^-1', as freqz expects. num, den = TransferFunction._z_to_zinv(sys.num.ravel(), sys.den) w, h = freqz(num, den, worN=worN, whole=whole) elif isinstance(system, ZerosPolesGain): w, h = freqz_zpk(sys.zeros, sys.poles, sys.gain, worN=worN, whole=whole) return w, h def dbode(system, w=None, n=100): """ Calculate Bode magnitude and phase data of a discrete-time system. Parameters ---------- system : an instance of the LTI class or a tuple describing the system. The following gives the number of elements in the tuple and the interpretation: * 1 (instance of `dlti`) * 2 (num, den, dt) * 3 (zeros, poles, gain, dt) * 4 (A, B, C, D, dt) w : array_like, optional Array of frequencies (in radians/sample). Magnitude and phase data is calculated for every value in this array. If not given a reasonable set will be calculated. n : int, optional Number of frequency points to compute if `w` is not given. The `n` frequencies are logarithmically spaced in an interval chosen to include the influence of the poles and zeros of the system. Returns ------- w : 1D ndarray Frequency array [rad/time_unit] mag : 1D ndarray Magnitude array [dB] phase : 1D ndarray Phase array [deg] Notes ----- If (num, den) is passed in for ``system``, coefficients for both the numerator and denominator should be specified in descending exponent order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``). .. versionadded:: 0.18.0 Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt Transfer function: H(z) = 1 / (z^2 + 2z + 3) >>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05) Equivalent: sys.bode() >>> w, mag, phase = signal.dbode(sys) >>> plt.figure() >>> plt.semilogx(w, mag) # Bode magnitude plot >>> plt.figure() >>> plt.semilogx(w, phase) # Bode phase plot >>> plt.show() """ w, y = dfreqresp(system, w=w, n=n) if isinstance(system, dlti): dt = system.dt else: dt = system[-1] mag = 20.0 * numpy.log10(abs(y)) phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y))) return w / dt, mag, phase
mit
sunzhxjs/JobGIS
lib/python2.7/site-packages/pandas/tests/test_series.py
9
288883
# coding=utf-8 # pylint: disable-msg=E1101,W0612 import re import sys from datetime import datetime, timedelta import operator import string from inspect import getargspec from itertools import product, starmap from distutils.version import LooseVersion import warnings import random import nose from numpy import nan, inf import numpy as np import numpy.ma as ma import pandas as pd from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range, date_range, period_range, timedelta_range, _np_version_under1p8) from pandas.core.index import MultiIndex from pandas.core.indexing import IndexingError from pandas.tseries.period import PeriodIndex from pandas.tseries.index import Timestamp, DatetimeIndex from pandas.tseries.tdi import Timedelta, TimedeltaIndex import pandas.core.common as com import pandas.core.config as cf import pandas.lib as lib import pandas.core.datetools as datetools import pandas.core.nanops as nanops from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long from pandas import compat from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, assert_index_equal, ensure_clean) import pandas.util.testing as tm #------------------------------------------------------------------------------ # Series test cases JOIN_TYPES = ['inner', 'outer', 'left', 'right'] class CheckNameIntegration(object): _multiprocess_can_split_ = True def test_scalarop_preserve_name(self): result = self.ts * 2 self.assertEqual(result.name, self.ts.name) def test_copy_name(self): result = self.ts.copy() self.assertEqual(result.name, self.ts.name) def test_copy_index_name_checking(self): # don't want to be able to modify the index stored elsewhere after # making a copy self.ts.index.name = None self.assertIsNone(self.ts.index.name) self.assertIs(self.ts, self.ts) cp = self.ts.copy() cp.index.name = 'foo' com.pprint_thing(self.ts.index.name) self.assertIsNone(self.ts.index.name) def test_append_preserve_name(self): result = self.ts[:5].append(self.ts[5:]) self.assertEqual(result.name, self.ts.name) def test_dt_namespace_accessor(self): # GH 7207 # test .dt namespace accessor ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq','days_in_month','daysinmonth'] ok_for_period = ok_for_base + ['qyear'] ok_for_period_methods = ['strftime'] ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end', 'tz'] ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime'] ok_for_td = ['days','seconds','microseconds','nanoseconds'] ok_for_td_methods = ['components','to_pytimedelta','total_seconds'] def get_expected(s, name): result = getattr(Index(s._values),prop) if isinstance(result, np.ndarray): if com.is_integer_dtype(result): result = result.astype('int64') elif not com.is_list_like(result): return result return Series(result,index=s.index) def compare(s, name): a = getattr(s.dt,prop) b = get_expected(s,prop) if not (com.is_list_like(a) and com.is_list_like(b)): self.assertEqual(a,b) else: tm.assert_series_equal(a,b) # datetimeindex for s in [Series(date_range('20130101',periods=5)), Series(date_range('20130101',periods=5,freq='s')), Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]: for prop in ok_for_dt: # we test freq below if prop != 'freq': compare(s, prop) for prop in ok_for_dt_methods: getattr(s.dt, prop) result = s.dt.to_pydatetime() self.assertIsInstance(result,np.ndarray) self.assertTrue(result.dtype == object) result = s.dt.tz_localize('US/Eastern') expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index) tm.assert_series_equal(result, expected) tz_result = result.dt.tz self.assertEqual(str(tz_result), 'US/Eastern') freq_result = s.dt.freq self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq) # let's localize, then convert result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index) tm.assert_series_equal(result, expected) # datetimeindex with tz s = Series(date_range('20130101',periods=5,tz='US/Eastern')) for prop in ok_for_dt: # we test freq below if prop != 'freq': compare(s, prop) for prop in ok_for_dt_methods: getattr(s.dt,prop) result = s.dt.to_pydatetime() self.assertIsInstance(result,np.ndarray) self.assertTrue(result.dtype == object) result = s.dt.tz_convert('CET') expected = Series(s._values.tz_convert('CET'),index=s.index) tm.assert_series_equal(result, expected) tz_result = result.dt.tz self.assertEqual(str(tz_result), 'CET') freq_result = s.dt.freq self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq) # timedeltaindex for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')), Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')), Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]: for prop in ok_for_td: # we test freq below if prop != 'freq': compare(s, prop) for prop in ok_for_td_methods: getattr(s.dt, prop) result = s.dt.components self.assertIsInstance(result,DataFrame) tm.assert_index_equal(result.index,s.index) result = s.dt.to_pytimedelta() self.assertIsInstance(result,np.ndarray) self.assertTrue(result.dtype == object) result = s.dt.total_seconds() self.assertIsInstance(result,pd.Series) self.assertTrue(result.dtype == 'float64') freq_result = s.dt.freq self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq) # both index = date_range('20130101',periods=3,freq='D') s = Series(date_range('20140204',periods=3,freq='s'),index=index) tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index)) tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index)) tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index)) tm.assert_series_equal(s.dt.normalize(), pd.Series([s[0]] * 3, index=index)) # periodindex for s in [Series(period_range('20130101',periods=5,freq='D'))]: for prop in ok_for_period: # we test freq below if prop != 'freq': compare(s, prop) for prop in ok_for_period_methods: getattr(s.dt, prop) freq_result = s.dt.freq self.assertEqual(freq_result, PeriodIndex(s.values).freq) # test limited display api def get_dir(s): results = [ r for r in s.dt.__dir__() if not r.startswith('_') ] return list(sorted(set(results))) s = Series(date_range('20130101',periods=5,freq='D')) results = get_dir(s) tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods)))) s = Series(period_range('20130101',periods=5,freq='D').asobject) results = get_dir(s) tm.assert_almost_equal(results, list(sorted(set(ok_for_period + ok_for_period_methods)))) # 11295 # ambiguous time error on the conversions s = Series(pd.date_range('2015-01-01', '2016-01-01', freq='T')) s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago') results = get_dir(s) tm.assert_almost_equal(results, list(sorted(set(ok_for_dt + ok_for_dt_methods)))) expected = Series(pd.date_range('2015-01-01', '2016-01-01', freq='T', tz='UTC').tz_convert('America/Chicago')) tm.assert_series_equal(s, expected) # no setting allowed s = Series(date_range('20130101',periods=5,freq='D')) with tm.assertRaisesRegexp(ValueError, "modifications"): s.dt.hour = 5 # trying to set a copy with pd.option_context('chained_assignment','raise'): def f(): s.dt.hour[0] = 5 self.assertRaises(com.SettingWithCopyError, f) def test_dt_accessor_no_new_attributes(self): # https://github.com/pydata/pandas/issues/10673 s = Series(date_range('20130101',periods=5,freq='D')) with tm.assertRaisesRegexp(AttributeError, "You cannot add any new attribute"): s.dt.xlabel = "a" def test_strftime(self): # GH 10086 s = Series(date_range('20130101', periods=5)) result = s.dt.strftime('%Y/%m/%d') expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05']) tm.assert_series_equal(result, expected) s = Series(date_range('2015-02-03 11:22:33.4567', periods=5)) result = s.dt.strftime('%Y/%m/%d %H-%M-%S') expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33', '2015/02/05 11-22-33', '2015/02/06 11-22-33', '2015/02/07 11-22-33']) tm.assert_series_equal(result, expected) s = Series(period_range('20130101', periods=5)) result = s.dt.strftime('%Y/%m/%d') expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05']) tm.assert_series_equal(result, expected) s = Series(period_range('2015-02-03 11:22:33.4567', periods=5, freq='s')) result = s.dt.strftime('%Y/%m/%d %H-%M-%S') expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34', '2015/02/03 11-22-35', '2015/02/03 11-22-36', '2015/02/03 11-22-37']) tm.assert_series_equal(result, expected) s = Series(date_range('20130101', periods=5)) s.iloc[0] = pd.NaT result = s.dt.strftime('%Y/%m/%d') expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05']) tm.assert_series_equal(result, expected) datetime_index = date_range('20150301', periods=5) result = datetime_index.strftime("%Y/%m/%d") expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object) self.assert_numpy_array_equal(result, expected) period_index = period_range('20150301', periods=5) result = period_index.strftime("%Y/%m/%d") expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object) self.assert_numpy_array_equal(result, expected) s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)]) result = s.dt.strftime('%Y-%m-%d %H:%M:%S') expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"]) tm.assert_series_equal(result, expected) s = Series(period_range('20130101', periods=4, freq='H')) result = s.dt.strftime('%Y/%m/%d %H:%M:%S') expected = Series(["2013/01/01 00:00:00", "2013/01/01 01:00:00", "2013/01/01 02:00:00", "2013/01/01 03:00:00"]) s = Series(period_range('20130101', periods=4, freq='L')) result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l') expected = Series(["2013/01/01 00:00:00.000", "2013/01/01 00:00:00.001", "2013/01/01 00:00:00.002", "2013/01/01 00:00:00.003"]) tm.assert_series_equal(result, expected) def test_valid_dt_with_missing_values(self): from datetime import date, time # GH 8689 s = Series(date_range('20130101',periods=5,freq='D')) s.iloc[2] = pd.NaT for attr in ['microsecond','nanosecond','second','minute','hour','day']: expected = getattr(s.dt,attr).copy() expected.iloc[2] = np.nan result = getattr(s.dt,attr) tm.assert_series_equal(result, expected) result = s.dt.date expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object') tm.assert_series_equal(result, expected) result = s.dt.time expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object') tm.assert_series_equal(result, expected) def test_dt_accessor_api(self): # GH 9322 from pandas.tseries.common import (CombinedDatetimelikeProperties, DatetimeProperties) self.assertIs(Series.dt, CombinedDatetimelikeProperties) s = Series(date_range('2000-01-01', periods=3)) self.assertIsInstance(s.dt, DatetimeProperties) for s in [Series(np.arange(5)), Series(list('abcde')), Series(np.random.randn(5))]: with tm.assertRaisesRegexp(AttributeError, "only use .dt accessor"): s.dt self.assertFalse(hasattr(s, 'dt')) def test_tab_completion(self): # GH 9910 s = Series(list('abcd')) # Series of str values should have .str but not .dt/.cat in __dir__ self.assertTrue('str' in dir(s)) self.assertTrue('dt' not in dir(s)) self.assertTrue('cat' not in dir(s)) # similiarly for .dt s = Series(date_range('1/1/2015', periods=5)) self.assertTrue('dt' in dir(s)) self.assertTrue('str' not in dir(s)) self.assertTrue('cat' not in dir(s)) # similiarly for .cat, but with the twist that str and dt should be there # if the categories are of that type # first cat and str s = Series(list('abbcd'), dtype="category") self.assertTrue('cat' in dir(s)) self.assertTrue('str' in dir(s)) # as it is a string categorical self.assertTrue('dt' not in dir(s)) # similar to cat and str s = Series(date_range('1/1/2015', periods=5)).astype("category") self.assertTrue('cat' in dir(s)) self.assertTrue('str' not in dir(s)) self.assertTrue('dt' in dir(s)) # as it is a datetime categorical def test_binop_maybe_preserve_name(self): # names match, preserve result = self.ts * self.ts self.assertEqual(result.name, self.ts.name) result = self.ts.mul(self.ts) self.assertEqual(result.name, self.ts.name) result = self.ts * self.ts[:-2] self.assertEqual(result.name, self.ts.name) # names don't match, don't preserve cp = self.ts.copy() cp.name = 'something else' result = self.ts + cp self.assertIsNone(result.name) result = self.ts.add(cp) self.assertIsNone(result.name) ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow'] ops = ops + ['r' + op for op in ops] for op in ops: # names match, preserve s = self.ts.copy() result = getattr(s, op)(s) self.assertEqual(result.name, self.ts.name) # names don't match, don't preserve cp = self.ts.copy() cp.name = 'changed' result = getattr(s, op)(cp) self.assertIsNone(result.name) def test_combine_first_name(self): result = self.ts.combine_first(self.ts[:5]) self.assertEqual(result.name, self.ts.name) def test_combine_first_dt64(self): from pandas.tseries.tools import to_datetime s0 = to_datetime(Series(["2010", np.NaN])) s1 = to_datetime(Series([np.NaN, "2011"])) rs = s0.combine_first(s1) xp = to_datetime(Series(['2010', '2011'])) assert_series_equal(rs, xp) s0 = to_datetime(Series(["2010", np.NaN])) s1 = Series([np.NaN, "2011"]) rs = s0.combine_first(s1) xp = Series([datetime(2010, 1, 1), '2011']) assert_series_equal(rs, xp) def test_get(self): # GH 6383 s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45, 51, 39, 55, 43, 54, 52, 51, 54])) result = s.get(25, 0) expected = 0 self.assertEqual(result,expected) s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45, 51, 39, 55, 43, 54, 52, 51, 54]), index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0, 121.0, 144.0, 169.0, 196.0, 1225.0, 1296.0, 1369.0, 1444.0, 1521.0, 1600.0, 1681.0, 1764.0, 1849.0, 1936.0], dtype='object')) result = s.get(25, 0) expected = 43 self.assertEqual(result,expected) # GH 7407 # with a boolean accessor df = pd.DataFrame({'i':[0]*3, 'b':[False]*3}) vc = df.i.value_counts() result = vc.get(99,default='Missing') self.assertEqual(result,'Missing') vc = df.b.value_counts() result = vc.get(False,default='Missing') self.assertEqual(result,3) result = vc.get(True,default='Missing') self.assertEqual(result,'Missing') def test_delitem(self): # GH 5542 # should delete the item inplace s = Series(lrange(5)) del s[0] expected = Series(lrange(1,5),index=lrange(1,5)) assert_series_equal(s, expected) del s[1] expected = Series(lrange(2,5),index=lrange(2,5)) assert_series_equal(s, expected) # empty s = Series() def f(): del s[0] self.assertRaises(KeyError, f) # only 1 left, del, add, del s = Series(1) del s[0] assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='int64'))) s[0] = 1 assert_series_equal(s, Series(1)) del s[0] assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='int64'))) # Index(dtype=object) s = Series(1, index=['a']) del s['a'] assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='object'))) s['a'] = 1 assert_series_equal(s, Series(1, index=['a'])) del s['a'] assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='object'))) def test_getitem_preserve_name(self): result = self.ts[self.ts > 0] self.assertEqual(result.name, self.ts.name) result = self.ts[[0, 2, 4]] self.assertEqual(result.name, self.ts.name) result = self.ts[5:10] self.assertEqual(result.name, self.ts.name) def test_getitem_setitem_ellipsis(self): s = Series(np.random.randn(10)) np.fix(s) result = s[...] assert_series_equal(result, s) s[...] = 5 self.assertTrue((result == 5).all()) def test_getitem_negative_out_of_bounds(self): s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10)) self.assertRaises(IndexError, s.__getitem__, -11) self.assertRaises(IndexError, s.__setitem__, -11, 'foo') def test_multilevel_name_print(self): index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) s = Series(lrange(0, len(index)), index=index, name='sth') expected = ["first second", "foo one 0", " two 1", " three 2", "bar one 3", " two 4", "baz two 5", " three 6", "qux one 7", " two 8", " three 9", "Name: sth, dtype: int64"] expected = "\n".join(expected) self.assertEqual(repr(s), expected) def test_multilevel_preserve_name(self): index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two', 'three']], labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], names=['first', 'second']) s = Series(np.random.randn(len(index)), index=index, name='sth') result = s['foo'] result2 = s.ix['foo'] self.assertEqual(result.name, s.name) self.assertEqual(result2.name, s.name) def test_name_printing(self): # test small series s = Series([0, 1, 2]) s.name = "test" self.assertIn("Name: test", repr(s)) s.name = None self.assertNotIn("Name:", repr(s)) # test big series (diff code path) s = Series(lrange(0, 1000)) s.name = "test" self.assertIn("Name: test", repr(s)) s.name = None self.assertNotIn("Name:", repr(s)) s = Series(index=date_range('20010101', '20020101'), name='test') self.assertIn("Name: test", repr(s)) def test_pickle_preserve_name(self): unpickled = self._pickle_roundtrip_name(self.ts) self.assertEqual(unpickled.name, self.ts.name) def _pickle_roundtrip_name(self, obj): with ensure_clean() as path: obj.to_pickle(path) unpickled = pd.read_pickle(path) return unpickled def test_argsort_preserve_name(self): result = self.ts.argsort() self.assertEqual(result.name, self.ts.name) def test_sort_index_name(self): result = self.ts.sort_index(ascending=False) self.assertEqual(result.name, self.ts.name) def test_to_sparse_pass_name(self): result = self.ts.to_sparse() self.assertEqual(result.name, self.ts.name) class TestNanops(tm.TestCase): _multiprocess_can_split_ = True def test_comparisons(self): left = np.random.randn(10) right = np.random.randn(10) left[:3] = np.nan result = nanops.nangt(left, right) expected = (left > right).astype('O') expected[:3] = np.nan assert_almost_equal(result, expected) s = Series(['a', 'b', 'c']) s2 = Series([False, True, False]) # it works! s == s2 s2 == s def test_sum_zero(self): arr = np.array([]) self.assertEqual(nanops.nansum(arr), 0) arr = np.empty((10, 0)) self.assertTrue((nanops.nansum(arr, axis=1) == 0).all()) # GH #844 s = Series([], index=[]) self.assertEqual(s.sum(), 0) df = DataFrame(np.empty((10, 0))) self.assertTrue((df.sum(1) == 0).all()) def test_nansum_buglet(self): s = Series([1.0, np.nan], index=[0, 1]) result = np.nansum(s) assert_almost_equal(result, 1) def test_overflow(self): # GH 6915 # overflowing on the smaller int dtypes for dtype in ['int32','int64']: v = np.arange(5000000,dtype=dtype) s = Series(v) # no bottleneck result = s.sum(skipna=False) self.assertEqual(int(result),v.sum(dtype='int64')) result = s.min(skipna=False) self.assertEqual(int(result),0) result = s.max(skipna=False) self.assertEqual(int(result),v[-1]) # use bottleneck if available result = s.sum() self.assertEqual(int(result),v.sum(dtype='int64')) result = s.min() self.assertEqual(int(result),0) result = s.max() self.assertEqual(int(result),v[-1]) for dtype in ['float32', 'float64']: v = np.arange(5000000, dtype=dtype) s = Series(v) # no bottleneck result = s.sum(skipna=False) self.assertEqual(result, v.sum(dtype=dtype)) result = s.min(skipna=False) self.assertTrue(np.allclose(float(result), 0.0)) result = s.max(skipna=False) self.assertTrue(np.allclose(float(result), v[-1])) # use bottleneck if available result = s.sum() self.assertEqual(result, v.sum(dtype=dtype)) result = s.min() self.assertTrue(np.allclose(float(result), 0.0)) result = s.max() self.assertTrue(np.allclose(float(result), v[-1])) class SafeForSparse(object): pass _ts = tm.makeTimeSeries() class TestSeries(tm.TestCase, CheckNameIntegration): _multiprocess_can_split_ = True def setUp(self): import warnings self.ts = _ts.copy() self.ts.name = 'ts' self.series = tm.makeStringSeries() self.series.name = 'series' self.objSeries = tm.makeObjectSeries() self.objSeries.name = 'objects' self.empty = Series([], index=[]) def test_scalar_conversion(self): # Pass in scalar is disabled scalar = Series(0.5) self.assertNotIsInstance(scalar, float) # coercion self.assertEqual(float(Series([1.])), 1.0) self.assertEqual(int(Series([1.])), 1) self.assertEqual(long(Series([1.])), 1) def test_astype(self): s = Series(np.random.randn(5),name='foo') for dtype in ['float32','float64','int64','int32']: astyped = s.astype(dtype) self.assertEqual(astyped.dtype, dtype) self.assertEqual(astyped.name, s.name) def test_TimeSeries_deprecation(self): # deprecation TimeSeries, #10890 with tm.assert_produces_warning(FutureWarning): pd.TimeSeries(1,index=date_range('20130101',periods=3)) def test_constructor(self): # Recognize TimeSeries with tm.assert_produces_warning(FutureWarning): self.assertTrue(self.ts.is_time_series) self.assertTrue(self.ts.index.is_all_dates) # Pass in Series derived = Series(self.ts) with tm.assert_produces_warning(FutureWarning): self.assertTrue(derived.is_time_series) self.assertTrue(derived.index.is_all_dates) self.assertTrue(tm.equalContents(derived.index, self.ts.index)) # Ensure new index is not created self.assertEqual(id(self.ts.index), id(derived.index)) # Mixed type Series mixed = Series(['hello', np.NaN], index=[0, 1]) self.assertEqual(mixed.dtype, np.object_) self.assertIs(mixed[1], np.NaN) with tm.assert_produces_warning(FutureWarning): self.assertFalse(self.empty.is_time_series) self.assertFalse(self.empty.index.is_all_dates) with tm.assert_produces_warning(FutureWarning): self.assertFalse(Series({}).is_time_series) self.assertFalse(Series({}).index.is_all_dates) self.assertRaises(Exception, Series, np.random.randn(3, 3), index=np.arange(3)) mixed.name = 'Series' rs = Series(mixed).name xp = 'Series' self.assertEqual(rs, xp) # raise on MultiIndex GH4187 m = MultiIndex.from_arrays([[1, 2], [3, 4]]) self.assertRaises(NotImplementedError, Series, m) def test_constructor_empty(self): empty = Series() empty2 = Series([]) assert_series_equal(empty, empty2, check_index_type=False) empty = Series(index=lrange(10)) empty2 = Series(np.nan, index=lrange(10)) assert_series_equal(empty, empty2) def test_constructor_series(self): index1 = ['d', 'b', 'a', 'c'] index2 = sorted(index1) s1 = Series([4, 7, -5, 3], index=index1) s2 = Series(s1, index=index2) assert_series_equal(s2, s1.sort_index()) def test_constructor_iterator(self): expected = Series(list(range(10)),dtype='int64') result = Series(range(10),dtype='int64') assert_series_equal(result, expected) def test_constructor_generator(self): gen = (i for i in range(10)) result = Series(gen) exp = Series(lrange(10)) assert_series_equal(result, exp) gen = (i for i in range(10)) result = Series(gen, index=lrange(10, 20)) exp.index = lrange(10, 20) assert_series_equal(result, exp) def test_constructor_map(self): # GH8909 m = map(lambda x: x, range(10)) result = Series(m) exp = Series(lrange(10)) assert_series_equal(result, exp) m = map(lambda x: x, range(10)) result = Series(m, index=lrange(10, 20)) exp.index = lrange(10, 20) assert_series_equal(result, exp) def test_constructor_categorical(self): cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True) res = Series(cat) self.assertTrue(res.values.equals(cat)) def test_constructor_maskedarray(self): data = ma.masked_all((3,), dtype=float) result = Series(data) expected = Series([nan, nan, nan]) assert_series_equal(result, expected) data[0] = 0.0 data[2] = 2.0 index = ['a', 'b', 'c'] result = Series(data, index=index) expected = Series([0.0, nan, 2.0], index=index) assert_series_equal(result, expected) data[1] = 1.0 result = Series(data, index=index) expected = Series([0.0, 1.0, 2.0], index=index) assert_series_equal(result, expected) data = ma.masked_all((3,), dtype=int) result = Series(data) expected = Series([nan, nan, nan], dtype=float) assert_series_equal(result, expected) data[0] = 0 data[2] = 2 index = ['a', 'b', 'c'] result = Series(data, index=index) expected = Series([0, nan, 2], index=index, dtype=float) assert_series_equal(result, expected) data[1] = 1 result = Series(data, index=index) expected = Series([0, 1, 2], index=index, dtype=int) assert_series_equal(result, expected) data = ma.masked_all((3,), dtype=bool) result = Series(data) expected = Series([nan, nan, nan], dtype=object) assert_series_equal(result, expected) data[0] = True data[2] = False index = ['a', 'b', 'c'] result = Series(data, index=index) expected = Series([True, nan, False], index=index, dtype=object) assert_series_equal(result, expected) data[1] = True result = Series(data, index=index) expected = Series([True, True, False], index=index, dtype=bool) assert_series_equal(result, expected) from pandas import tslib data = ma.masked_all((3,), dtype='M8[ns]') result = Series(data) expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]') assert_series_equal(result, expected) data[0] = datetime(2001, 1, 1) data[2] = datetime(2001, 1, 3) index = ['a', 'b', 'c'] result = Series(data, index=index) expected = Series([datetime(2001, 1, 1), tslib.iNaT, datetime(2001, 1, 3)], index=index, dtype='M8[ns]') assert_series_equal(result, expected) data[1] = datetime(2001, 1, 2) result = Series(data, index=index) expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)], index=index, dtype='M8[ns]') assert_series_equal(result, expected) def test_constructor_default_index(self): s = Series([0, 1, 2]) assert_almost_equal(s.index, np.arange(3)) def test_constructor_corner(self): df = tm.makeTimeDataFrame() objs = [df, df] s = Series(objs, index=[0, 1]) tm.assertIsInstance(s, Series) def test_constructor_sanitize(self): s = Series(np.array([1., 1., 8.]), dtype='i8') self.assertEqual(s.dtype, np.dtype('i8')) s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8') self.assertEqual(s.dtype, np.dtype('f8')) def test_constructor_pass_none(self): s = Series(None, index=lrange(5)) self.assertEqual(s.dtype, np.float64) s = Series(None, index=lrange(5), dtype=object) self.assertEqual(s.dtype, np.object_) # GH 7431 # inference on the index s = Series(index=np.array([None])) expected = Series(index=Index([None])) assert_series_equal(s,expected) def test_constructor_cast(self): self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float) def test_constructor_dtype_nocast(self): # 1572 s = Series([1, 2, 3]) s2 = Series(s, dtype=np.int64) s2[1] = 5 self.assertEqual(s[1], 5) def test_constructor_datelike_coercion(self): # GH 9477 # incorrectly infering on dateimelike looking when object dtype is specified s = Series([Timestamp('20130101'),'NOV'],dtype=object) self.assertEqual(s.iloc[0],Timestamp('20130101')) self.assertEqual(s.iloc[1],'NOV') self.assertTrue(s.dtype == object) # the dtype was being reset on the slicing and re-inferred to datetime even # thought the blocks are mixed belly = '216 3T19'.split() wing1 = '2T15 4H19'.split() wing2 = '416 4T20'.split() mat = pd.to_datetime('2016-01-22 2019-09-07'.split()) df = pd.DataFrame({'wing1':wing1, 'wing2':wing2, 'mat':mat}, index=belly) result = df.loc['3T19'] self.assertTrue(result.dtype == object) result = df.loc['216'] self.assertTrue(result.dtype == object) def test_constructor_dtype_datetime64(self): import pandas.tslib as tslib s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5)) self.assertTrue(isnull(s).all()) # in theory this should be all nulls, but since # we are not specifying a dtype is ambiguous s = Series(tslib.iNaT, index=lrange(5)) self.assertFalse(isnull(s).all()) s = Series(nan, dtype='M8[ns]', index=lrange(5)) self.assertTrue(isnull(s).all()) s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]') self.assertTrue(isnull(s[1])) self.assertEqual(s.dtype, 'M8[ns]') s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]') self.assertTrue(isnull(s[1])) self.assertEqual(s.dtype, 'M8[ns]') # GH3416 dates = [ np.datetime64(datetime(2013, 1, 1)), np.datetime64(datetime(2013, 1, 2)), np.datetime64(datetime(2013, 1, 3)), ] s = Series(dates) self.assertEqual(s.dtype, 'M8[ns]') s.ix[0] = np.nan self.assertEqual(s.dtype, 'M8[ns]') # invalid astypes for t in ['s', 'D', 'us', 'ms']: self.assertRaises(TypeError, s.astype, 'M8[%s]' % t) # GH3414 related self.assertRaises(TypeError, lambda x: Series( Series(dates).astype('int') / 1000000, dtype='M8[ms]')) self.assertRaises( TypeError, lambda x: Series(dates, dtype='datetime64')) # invalid dates can be help as object result = Series([datetime(2,1,1)]) self.assertEqual(result[0], datetime(2,1,1,0,0)) result = Series([datetime(3000,1,1)]) self.assertEqual(result[0], datetime(3000,1,1,0,0)) # don't mix types result = Series([ Timestamp('20130101'), 1],index=['a','b']) self.assertEqual(result['a'], Timestamp('20130101')) self.assertEqual(result['b'], 1) # GH6529 # coerce datetime64 non-ns properly dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M') values2 = dates.view(np.ndarray).astype('datetime64[ns]') expected = Series(values2, dates) for dtype in ['s', 'D', 'ms', 'us', 'ns']: values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype)) result = Series(values1, dates) assert_series_equal(result,expected) # leave datetime.date alone dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object) series1 = Series(dates2, dates) self.assert_numpy_array_equal(series1.values,dates2) self.assertEqual(series1.dtype,object) # these will correctly infer a datetime s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001']) self.assertEqual(s.dtype,'datetime64[ns]') s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001']) self.assertEqual(s.dtype,'datetime64[ns]') s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001']) self.assertEqual(s.dtype,'datetime64[ns]') s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001']) self.assertEqual(s.dtype,'datetime64[ns]') # tz-aware (UTC and other tz's) # GH 8411 dr = date_range('20130101',periods=3) self.assertTrue(Series(dr).iloc[0].tz is None) dr = date_range('20130101',periods=3,tz='UTC') self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC') dr = date_range('20130101',periods=3,tz='US/Eastern') self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern') # non-convertible s = Series([1479596223000, -1479590, pd.NaT]) self.assertTrue(s.dtype == 'object') self.assertTrue(s[2] is pd.NaT) self.assertTrue('NaT' in str(s)) # if we passed a NaT it remains s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT]) self.assertTrue(s.dtype == 'object') self.assertTrue(s[2] is pd.NaT) self.assertTrue('NaT' in str(s)) # if we passed a nan it remains s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan]) self.assertTrue(s.dtype == 'object') self.assertTrue(s[2] is np.nan) self.assertTrue('NaN' in str(s)) def test_constructor_with_datetime_tz(self): # 8260 # support datetime64 with tz dr = date_range('20130101',periods=3,tz='US/Eastern') s = Series(dr) self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]') self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]') self.assertTrue(com.is_datetime64tz_dtype(s.dtype)) self.assertTrue('datetime64[ns, US/Eastern]' in str(s)) # export result = s.values self.assertIsInstance(result, np.ndarray) self.assertTrue(result.dtype == 'datetime64[ns]') self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize('UTC').tz_convert(tz=s.dt.tz))) # indexing result = s.iloc[0] self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D')) result = s[0] self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D')) result = s[Series([True,True,False],index=s.index)] assert_series_equal(result,s[0:2]) result = s.iloc[0:1] assert_series_equal(result,Series(dr[0:1])) # concat result = pd.concat([s.iloc[0:1],s.iloc[1:]]) assert_series_equal(result,s) # astype result = s.astype(object) expected = Series(DatetimeIndex(s._values).asobject) assert_series_equal(result, expected) result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz) assert_series_equal(result, s) # astype - datetime64[ns, tz] result = Series(s.values).astype('datetime64[ns, US/Eastern]') assert_series_equal(result, s) result = Series(s.values).astype(s.dtype) assert_series_equal(result, s) result = s.astype('datetime64[ns, CET]') expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET')) assert_series_equal(result, expected) # short str self.assertTrue('datetime64[ns, US/Eastern]' in str(s)) # formatting with NaT result = s.shift() self.assertTrue('datetime64[ns, US/Eastern]' in str(result)) self.assertTrue('NaT' in str(result)) # long str t = Series(date_range('20130101',periods=1000,tz='US/Eastern')) self.assertTrue('datetime64[ns, US/Eastern]' in str(t)) result = pd.DatetimeIndex(s,freq='infer') tm.assert_index_equal(result, dr) # inference s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]) self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]') self.assertTrue(lib.infer_dtype(s) == 'datetime64') s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')]) self.assertTrue(s.dtype == 'object') self.assertTrue(lib.infer_dtype(s) == 'datetime') def test_constructor_periodindex(self): # GH7932 # converting a PeriodIndex when put in a Series pi = period_range('20130101',periods=5,freq='D') s = Series(pi) expected = Series(pi.asobject) assert_series_equal(s, expected) def test_constructor_dict(self): d = {'a': 0., 'b': 1., 'c': 2.} result = Series(d, index=['b', 'c', 'd', 'a']) expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a']) assert_series_equal(result, expected) pidx = tm.makePeriodIndex(100) d = {pidx[0]: 0, pidx[1]: 1} result = Series(d, index=pidx) expected = Series(np.nan, pidx) expected.ix[0] = 0 expected.ix[1] = 1 assert_series_equal(result, expected) def test_constructor_dict_multiindex(self): check = lambda result, expected: tm.assert_series_equal( result, expected, check_dtype=True, check_index_type=True, check_series_type=True) d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.} _d = sorted(d.items()) ser = Series(d) expected = Series([x[1] for x in _d], index=MultiIndex.from_tuples([x[0] for x in _d])) check(ser, expected) d['z'] = 111. _d.insert(0, ('z', d['z'])) ser = Series(d) expected = Series( [x[1] for x in _d], index=Index([x[0] for x in _d], tupleize_cols=False)) ser = ser.reindex(index=expected.index) check(ser, expected) def test_constructor_subclass_dict(self): data = tm.TestSubDict((x, 10.0 * x) for x in range(10)) series = Series(data) refseries = Series(dict(compat.iteritems(data))) assert_series_equal(refseries, series) def test_constructor_dict_datetime64_index(self): # GH 9456 dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15'] values = [42544017.198965244, 1234565, 40512335.181958228, -1] def create_data(constructor): return dict(zip((constructor(x) for x in dates_as_str), values)) data_datetime64 = create_data(np.datetime64) data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d')) data_Timestamp = create_data(Timestamp) expected = Series(values, (Timestamp(x) for x in dates_as_str)) result_datetime64 = Series(data_datetime64) result_datetime = Series(data_datetime) result_Timestamp = Series(data_Timestamp) assert_series_equal(result_datetime64, expected) assert_series_equal(result_datetime, expected) assert_series_equal(result_Timestamp, expected) def test_orderedDict_ctor(self): # GH3283 import pandas import random data = OrderedDict([('col%s' % i, random.random()) for i in range(12)]) s = pandas.Series(data) self.assertTrue(all(s.values == list(data.values()))) def test_orderedDict_subclass_ctor(self): # GH3283 import pandas import random class A(OrderedDict): pass data = A([('col%s' % i, random.random()) for i in range(12)]) s = pandas.Series(data) self.assertTrue(all(s.values == list(data.values()))) def test_constructor_list_of_tuples(self): data = [(1, 1), (2, 2), (2, 3)] s = Series(data) self.assertEqual(list(s), data) def test_constructor_tuple_of_tuples(self): data = ((1, 1), (2, 2), (2, 3)) s = Series(data) self.assertEqual(tuple(s), data) def test_constructor_set(self): values = set([1, 2, 3, 4, 5]) self.assertRaises(TypeError, Series, values) values = frozenset(values) self.assertRaises(TypeError, Series, values) def test_fromDict(self): data = {'a': 0, 'b': 1, 'c': 2, 'd': 3} series = Series(data) self.assertTrue(tm.is_sorted(series.index)) data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()} series = Series(data) self.assertEqual(series.dtype, np.object_) data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'} series = Series(data) self.assertEqual(series.dtype, np.object_) data = {'a': '0', 'b': '1'} series = Series(data, dtype=float) self.assertEqual(series.dtype, np.float64) def test_setindex(self): # wrong type series = self.series.copy() self.assertRaises(TypeError, setattr, series, 'index', None) # wrong length series = self.series.copy() self.assertRaises(Exception, setattr, series, 'index', np.arange(len(series) - 1)) # works series = self.series.copy() series.index = np.arange(len(series)) tm.assertIsInstance(series.index, Index) def test_array_finalize(self): pass def test_pop(self): # GH 6600 df = DataFrame({ 'A': 0, 'B': np.arange(5,dtype='int64'), 'C': 0, }) k = df.iloc[4] result = k.pop('B') self.assertEqual(result, 4) expected = Series([0, 0], index=['A', 'C'], name=4) assert_series_equal(k, expected) def test_not_hashable(self): s_empty = Series() s = Series([1]) self.assertRaises(TypeError, hash, s_empty) self.assertRaises(TypeError, hash, s) def test_fromValue(self): nans = Series(np.NaN, index=self.ts.index) self.assertEqual(nans.dtype, np.float_) self.assertEqual(len(nans), len(self.ts)) strings = Series('foo', index=self.ts.index) self.assertEqual(strings.dtype, np.object_) self.assertEqual(len(strings), len(self.ts)) d = datetime.now() dates = Series(d, index=self.ts.index) self.assertEqual(dates.dtype, 'M8[ns]') self.assertEqual(len(dates), len(self.ts)) def test_contains(self): tm.assert_contains_all(self.ts.index, self.ts) def test_pickle(self): unp_series = self._pickle_roundtrip(self.series) unp_ts = self._pickle_roundtrip(self.ts) assert_series_equal(unp_series, self.series) assert_series_equal(unp_ts, self.ts) def _pickle_roundtrip(self, obj): with ensure_clean() as path: obj.to_pickle(path) unpickled = pd.read_pickle(path) return unpickled def test_getitem_get(self): idx1 = self.series.index[5] idx2 = self.objSeries.index[5] self.assertEqual(self.series[idx1], self.series.get(idx1)) self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2)) self.assertEqual(self.series[idx1], self.series[5]) self.assertEqual(self.objSeries[idx2], self.objSeries[5]) self.assertEqual( self.series.get(-1), self.series.get(self.series.index[-1])) self.assertEqual(self.series[5], self.series.get(self.series.index[5])) # missing d = self.ts.index[0] - datetools.bday self.assertRaises(KeyError, self.ts.__getitem__, d) # None # GH 5652 for s in [Series(), Series(index=list('abc'))]: result = s.get(None) self.assertIsNone(result) def test_iget(self): s = Series(np.random.randn(10), index=lrange(0, 20, 2)) # 10711, deprecated with tm.assert_produces_warning(FutureWarning): s.iget(1) # 10711, deprecated with tm.assert_produces_warning(FutureWarning): s.irow(1) # 10711, deprecated with tm.assert_produces_warning(FutureWarning): s.iget_value(1) for i in range(len(s)): result = s.iloc[i] exp = s[s.index[i]] assert_almost_equal(result, exp) # pass a slice result = s.iloc[slice(1, 3)] expected = s.ix[2:4] assert_series_equal(result, expected) # test slice is a view result[:] = 0 self.assertTrue((s[1:3] == 0).all()) # list of integers result = s.iloc[[0, 2, 3, 4, 5]] expected = s.reindex(s.index[[0, 2, 3, 4, 5]]) assert_series_equal(result, expected) def test_iget_nonunique(self): s = Series([0, 1, 2], index=[0, 1, 0]) self.assertEqual(s.iloc[2], 2) def test_getitem_regression(self): s = Series(lrange(5), index=lrange(5)) result = s[lrange(5)] assert_series_equal(result, s) def test_getitem_setitem_slice_bug(self): s = Series(lrange(10), lrange(10)) result = s[-12:] assert_series_equal(result, s) result = s[-7:] assert_series_equal(result, s[3:]) result = s[:-12] assert_series_equal(result, s[:0]) s = Series(lrange(10), lrange(10)) s[-12:] = 0 self.assertTrue((s == 0).all()) s[:-12] = 5 self.assertTrue((s == 0).all()) def test_getitem_int64(self): idx = np.int64(5) self.assertEqual(self.ts[idx], self.ts[5]) def test_getitem_fancy(self): slice1 = self.series[[1, 2, 3]] slice2 = self.objSeries[[1, 2, 3]] self.assertEqual(self.series.index[2], slice1.index[1]) self.assertEqual(self.objSeries.index[2], slice2.index[1]) self.assertEqual(self.series[2], slice1[1]) self.assertEqual(self.objSeries[2], slice2[1]) def test_getitem_boolean(self): s = self.series mask = s > s.median() # passing list is OK result = s[list(mask)] expected = s[mask] assert_series_equal(result, expected) self.assert_numpy_array_equal(result.index, s.index[mask]) def test_getitem_boolean_empty(self): s = Series([], dtype=np.int64) s.index.name = 'index_name' s = s[s.isnull()] self.assertEqual(s.index.name, 'index_name') self.assertEqual(s.dtype, np.int64) # GH5877 # indexing with empty series s = Series(['A', 'B']) expected = Series(np.nan,index=['C'],dtype=object) result = s[Series(['C'], dtype=object)] assert_series_equal(result, expected) s = Series(['A', 'B']) expected = Series(dtype=object, index=Index([], dtype='int64')) result = s[Series([], dtype=object)] assert_series_equal(result, expected) # invalid because of the boolean indexer # that's empty or not-aligned def f(): s[Series([], dtype=bool)] self.assertRaises(IndexingError, f) def f(): s[Series([True], dtype=bool)] self.assertRaises(IndexingError, f) def test_getitem_generator(self): gen = (x > 0 for x in self.series) result = self.series[gen] result2 = self.series[iter(self.series > 0)] expected = self.series[self.series > 0] assert_series_equal(result, expected) assert_series_equal(result2, expected) def test_getitem_boolean_object(self): # using column from DataFrame s = self.series mask = s > s.median() omask = mask.astype(object) # getitem result = s[omask] expected = s[mask] assert_series_equal(result, expected) # setitem s2 = s.copy() cop = s.copy() cop[omask] = 5 s2[mask] = 5 assert_series_equal(cop, s2) # nans raise exception omask[5:10] = np.nan self.assertRaises(Exception, s.__getitem__, omask) self.assertRaises(Exception, s.__setitem__, omask, 5) def test_getitem_setitem_boolean_corner(self): ts = self.ts mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median() # these used to raise...?? self.assertRaises(Exception, ts.__getitem__, mask_shifted) self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1) #ts[mask_shifted] #ts[mask_shifted] = 1 self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted) self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1) #ts.ix[mask_shifted] #ts.ix[mask_shifted] = 2 def test_getitem_setitem_slice_integers(self): s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16]) result = s[:4] expected = s.reindex([2, 4, 6, 8]) assert_series_equal(result, expected) s[:4] = 0 self.assertTrue((s[:4] == 0).all()) self.assertTrue(not (s[4:] == 0).any()) def test_getitem_out_of_bounds(self): # don't segfault, GH #495 self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts)) # GH #917 s = Series([]) self.assertRaises(IndexError, s.__getitem__, -1) def test_getitem_setitem_integers(self): # caused bug without test s = Series([1, 2, 3], ['a', 'b', 'c']) self.assertEqual(s.ix[0], s['a']) s.ix[0] = 5 self.assertAlmostEqual(s['a'], 5) def test_getitem_box_float64(self): value = self.ts[5] tm.assertIsInstance(value, np.float64) def test_getitem_ambiguous_keyerror(self): s = Series(lrange(10), index=lrange(0, 20, 2)) self.assertRaises(KeyError, s.__getitem__, 1) self.assertRaises(KeyError, s.ix.__getitem__, 1) def test_getitem_unordered_dup(self): obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b']) self.assertTrue(np.isscalar(obj['c'])) self.assertEqual(obj['c'], 0) def test_getitem_dups_with_missing(self): # breaks reindex, so need to use .ix internally # GH 4246 s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah']) expected = s.ix[['foo', 'bar', 'bah', 'bam']] result = s[['foo', 'bar', 'bah', 'bam']] assert_series_equal(result, expected) def test_getitem_dups(self): s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64) expected = Series([3,4],index=['C','C'],dtype=np.int64) result = s['C'] assert_series_equal(result, expected) def test_getitem_dataframe(self): rng = list(range(10)) s = pd.Series(10, index=rng) df = pd.DataFrame(rng, index=rng) self.assertRaises(TypeError, s.__getitem__, df>5) def test_setitem_ambiguous_keyerror(self): s = Series(lrange(10), index=lrange(0, 20, 2)) # equivalent of an append s2 = s.copy() s2[1] = 5 expected = s.append(Series([5],index=[1])) assert_series_equal(s2,expected) s2 = s.copy() s2.ix[1] = 5 expected = s.append(Series([5],index=[1])) assert_series_equal(s2,expected) def test_setitem_float_labels(self): # note labels are floats s = Series(['a', 'b', 'c'], index=[0, 0.5, 1]) tmp = s.copy() s.ix[1] = 'zoo' tmp.iloc[2] = 'zoo' assert_series_equal(s, tmp) def test_slice(self): numSlice = self.series[10:20] numSliceEnd = self.series[-10:] objSlice = self.objSeries[10:20] self.assertNotIn(self.series.index[9], numSlice.index) self.assertNotIn(self.objSeries.index[9], objSlice.index) self.assertEqual(len(numSlice), len(numSlice.index)) self.assertEqual(self.series[numSlice.index[0]], numSlice[numSlice.index[0]]) self.assertEqual(numSlice.index[1], self.series.index[11]) self.assertTrue(tm.equalContents(numSliceEnd, np.array(self.series)[-10:])) # test return view sl = self.series[10:20] sl[:] = 0 self.assertTrue((self.series[10:20] == 0).all()) def test_slice_can_reorder_not_uniquely_indexed(self): s = Series(1, index=['a', 'a', 'b', 'b', 'c']) result = s[::-1] # it works! def test_slice_float_get_set(self): self.assertRaises(TypeError, lambda : self.ts[4.0:10.0]) def f(): self.ts[4.0:10.0] = 0 self.assertRaises(TypeError, f) self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0)) self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0) def test_slice_floats2(self): s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float)) self.assertEqual(len(s.ix[12.0:]), 8) self.assertEqual(len(s.ix[12.5:]), 7) i = np.arange(10, 20, dtype=float) i[2] = 12.2 s.index = i self.assertEqual(len(s.ix[12.0:]), 8) self.assertEqual(len(s.ix[12.5:]), 7) def test_slice_float64(self): values = np.arange(10., 50., 2) index = Index(values) start, end = values[[5, 15]] s = Series(np.random.randn(20), index=index) result = s[start:end] expected = s.iloc[5:16] assert_series_equal(result, expected) result = s.loc[start:end] assert_series_equal(result, expected) df = DataFrame(np.random.randn(20, 3), index=index) result = df[start:end] expected = df.iloc[5:16] tm.assert_frame_equal(result, expected) result = df.loc[start:end] tm.assert_frame_equal(result, expected) def test_setitem(self): self.ts[self.ts.index[5]] = np.NaN self.ts[[1, 2, 17]] = np.NaN self.ts[6] = np.NaN self.assertTrue(np.isnan(self.ts[6])) self.assertTrue(np.isnan(self.ts[2])) self.ts[np.isnan(self.ts)] = 5 self.assertFalse(np.isnan(self.ts[2])) # caught this bug when writing tests series = Series(tm.makeIntIndex(20).astype(float), index=tm.makeIntIndex(20)) series[::2] = 0 self.assertTrue((series[::2] == 0).all()) # set item that's not contained s = self.series.copy() s['foobar'] = 1 app = Series([1], index=['foobar'], name='series') expected = self.series.append(app) assert_series_equal(s, expected) # Test for issue #10193 key = pd.Timestamp('2012-01-01') series = pd.Series() series[key] = 47 expected = pd.Series(47, [key]) assert_series_equal(series, expected) series = pd.Series([], pd.DatetimeIndex([], freq='D')) series[key] = 47 expected = pd.Series(47, pd.DatetimeIndex([key], freq='D')) assert_series_equal(series, expected) def test_setitem_dtypes(self): # change dtypes # GH 4463 expected = Series([np.nan,2,3]) s = Series([1,2,3]) s.iloc[0] = np.nan assert_series_equal(s,expected) s = Series([1,2,3]) s.loc[0] = np.nan assert_series_equal(s,expected) s = Series([1,2,3]) s[0] = np.nan assert_series_equal(s,expected) s = Series([False]) s.loc[0] = np.nan assert_series_equal(s,Series([np.nan])) s = Series([False,True]) s.loc[0] = np.nan assert_series_equal(s,Series([np.nan,1.0])) def test_set_value(self): idx = self.ts.index[10] res = self.ts.set_value(idx, 0) self.assertIs(res, self.ts) self.assertEqual(self.ts[idx], 0) # equiv s = self.series.copy() res = s.set_value('foobar', 0) self.assertIs(res, s) self.assertEqual(res.index[-1], 'foobar') self.assertEqual(res['foobar'], 0) s = self.series.copy() s.loc['foobar'] = 0 self.assertEqual(s.index[-1], 'foobar') self.assertEqual(s['foobar'], 0) def test_setslice(self): sl = self.ts[5:20] self.assertEqual(len(sl), len(sl.index)) self.assertTrue(sl.index.is_unique) def test_basic_getitem_setitem_corner(self): # invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2] with tm.assertRaisesRegexp(ValueError, 'tuple-index'): self.ts[:, 2] with tm.assertRaisesRegexp(ValueError, 'tuple-index'): self.ts[:, 2] = 2 # weird lists. [slice(0, 5)] will work but not two slices result = self.ts[[slice(None, 5)]] expected = self.ts[:5] assert_series_equal(result, expected) # OK self.assertRaises(Exception, self.ts.__getitem__, [5, slice(None, None)]) self.assertRaises(Exception, self.ts.__setitem__, [5, slice(None, None)], 2) def test_reshape_non_2d(self): # GH 4554 x = Series(np.random.random(201), name='x') self.assertTrue(x.reshape(x.shape,) is x) # GH 2719 a = Series([1, 2, 3, 4]) result = a.reshape(2, 2) expected = a.values.reshape(2, 2) tm.assert_numpy_array_equal(result, expected) self.assertTrue(type(result) is type(expected)) def test_reshape_2d_return_array(self): x = Series(np.random.random(201), name='x') result = x.reshape((-1, 1)) self.assertNotIsInstance(result, Series) result2 = np.reshape(x, (-1, 1)) self.assertNotIsInstance(result2, Series) result = x[:, None] expected = x.reshape((-1, 1)) assert_almost_equal(result, expected) def test_basic_getitem_with_labels(self): indices = self.ts.index[[5, 10, 15]] result = self.ts[indices] expected = self.ts.reindex(indices) assert_series_equal(result, expected) result = self.ts[indices[0]:indices[2]] expected = self.ts.ix[indices[0]:indices[2]] assert_series_equal(result, expected) # integer indexes, be careful s = Series(np.random.randn(10), index=lrange(0, 20, 2)) inds = [0, 2, 5, 7, 8] arr_inds = np.array([0, 2, 5, 7, 8]) result = s[inds] expected = s.reindex(inds) assert_series_equal(result, expected) result = s[arr_inds] expected = s.reindex(arr_inds) assert_series_equal(result, expected) def test_basic_setitem_with_labels(self): indices = self.ts.index[[5, 10, 15]] cp = self.ts.copy() exp = self.ts.copy() cp[indices] = 0 exp.ix[indices] = 0 assert_series_equal(cp, exp) cp = self.ts.copy() exp = self.ts.copy() cp[indices[0]:indices[2]] = 0 exp.ix[indices[0]:indices[2]] = 0 assert_series_equal(cp, exp) # integer indexes, be careful s = Series(np.random.randn(10), index=lrange(0, 20, 2)) inds = [0, 4, 6] arr_inds = np.array([0, 4, 6]) cp = s.copy() exp = s.copy() s[inds] = 0 s.ix[inds] = 0 assert_series_equal(cp, exp) cp = s.copy() exp = s.copy() s[arr_inds] = 0 s.ix[arr_inds] = 0 assert_series_equal(cp, exp) inds_notfound = [0, 4, 5, 6] arr_inds_notfound = np.array([0, 4, 5, 6]) self.assertRaises(Exception, s.__setitem__, inds_notfound, 0) self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0) def test_ix_getitem(self): inds = self.series.index[[3, 4, 7]] assert_series_equal(self.series.ix[inds], self.series.reindex(inds)) assert_series_equal(self.series.ix[5::2], self.series[5::2]) # slice with indices d1, d2 = self.ts.index[[5, 15]] result = self.ts.ix[d1:d2] expected = self.ts.truncate(d1, d2) assert_series_equal(result, expected) # boolean mask = self.series > self.series.median() assert_series_equal(self.series.ix[mask], self.series[mask]) # ask for index value self.assertEqual(self.ts.ix[d1], self.ts[d1]) self.assertEqual(self.ts.ix[d2], self.ts[d2]) def test_ix_getitem_not_monotonic(self): d1, d2 = self.ts.index[[5, 15]] ts2 = self.ts[::2][[1, 2, 0]] self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2)) self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0) def test_ix_getitem_setitem_integer_slice_keyerrors(self): s = Series(np.random.randn(10), index=lrange(0, 20, 2)) # this is OK cp = s.copy() cp.ix[4:10] = 0 self.assertTrue((cp.ix[4:10] == 0).all()) # so is this cp = s.copy() cp.ix[3:11] = 0 self.assertTrue((cp.ix[3:11] == 0).values.all()) result = s.ix[4:10] result2 = s.ix[3:11] expected = s.reindex([4, 6, 8, 10]) assert_series_equal(result, expected) assert_series_equal(result2, expected) # non-monotonic, raise KeyError s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]] self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11)) self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0) def test_ix_getitem_iterator(self): idx = iter(self.series.index[:10]) result = self.series.ix[idx] assert_series_equal(result, self.series[:10]) def test_where(self): s = Series(np.random.randn(5)) cond = s > 0 rs = s.where(cond).dropna() rs2 = s[cond] assert_series_equal(rs, rs2) rs = s.where(cond, -s) assert_series_equal(rs, s.abs()) rs = s.where(cond) assert(s.shape == rs.shape) assert(rs is not s) # test alignment cond = Series([True,False,False,True,False],index=s.index) s2 = -(s.abs()) expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index) rs = s2.where(cond[:3]) assert_series_equal(rs, expected) expected = s2.abs() expected.ix[0] = s2[0] rs = s2.where(cond[:3], -s2) assert_series_equal(rs, expected) self.assertRaises(ValueError, s.where, 1) self.assertRaises(ValueError, s.where, cond[:3].values, -s) # GH 2745 s = Series([1, 2]) s[[True, False]] = [0, 1] expected = Series([0, 2]) assert_series_equal(s, expected) # failures self.assertRaises( ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3]) self.assertRaises( ValueError, s.__setitem__, tuple([[[True, False]]]), []) # unsafe dtype changes for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]: s = Series(np.arange(10), dtype=dtype) mask = s < 5 s[mask] = lrange(2, 7) expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype) assert_series_equal(s, expected) self.assertEqual(s.dtype, expected.dtype) # these are allowed operations, but are upcasted for dtype in [np.int64, np.float64]: s = Series(np.arange(10), dtype=dtype) mask = s < 5 values = [2.5, 3.5, 4.5, 5.5, 6.5] s[mask] = values expected = Series(values + lrange(5, 10), dtype='float64') assert_series_equal(s, expected) self.assertEqual(s.dtype, expected.dtype) # GH 9731 s = Series(np.arange(10), dtype='int64') mask = s > 5 values = [2.5, 3.5, 4.5, 5.5] s[mask] = values expected = Series(lrange(6) + values, dtype='float64') assert_series_equal(s, expected) # can't do these as we are forced to change the itemsize of the input # to something we cannot for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]: s = Series(np.arange(10), dtype=dtype) mask = s < 5 values = [2.5, 3.5, 4.5, 5.5, 6.5] self.assertRaises(Exception, s.__setitem__, tuple(mask), values) # GH3235 s = Series(np.arange(10), dtype='int64') mask = s < 5 s[mask] = lrange(2, 7) expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64') assert_series_equal(s, expected) self.assertEqual(s.dtype, expected.dtype) s = Series(np.arange(10), dtype='int64') mask = s > 5 s[mask] = [0] * 4 expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64') assert_series_equal(s, expected) s = Series(np.arange(10)) mask = s > 5 def f(): s[mask] = [5,4,3,2,1] self.assertRaises(ValueError, f) def f(): s[mask] = [0] * 5 self.assertRaises(ValueError, f) # dtype changes s = Series([1,2,3,4]) result = s.where(s>2,np.nan) expected = Series([np.nan,np.nan,3,4]) assert_series_equal(result, expected) # GH 4667 # setting with None changes dtype s = Series(range(10)).astype(float) s[8] = None result = s[8] self.assertTrue(isnull(result)) s = Series(range(10)).astype(float) s[s > 8] = None result = s[isnull(s)] expected = Series(np.nan,index=[9]) assert_series_equal(result, expected) def test_where_setitem_invalid(self): # GH 2702 # make sure correct exceptions are raised on invalid list assignment # slice s = Series(list('abc')) def f(): s[0:3] = list(range(27)) self.assertRaises(ValueError, f) s[0:3] = list(range(3)) expected = Series([0,1,2]) assert_series_equal(s.astype(np.int64), expected, ) # slice with step s = Series(list('abcdef')) def f(): s[0:4:2] = list(range(27)) self.assertRaises(ValueError, f) s = Series(list('abcdef')) s[0:4:2] = list(range(2)) expected = Series([0,'b',1,'d','e','f']) assert_series_equal(s, expected) # neg slices s = Series(list('abcdef')) def f(): s[:-1] = list(range(27)) self.assertRaises(ValueError, f) s[-3:-1] = list(range(2)) expected = Series(['a','b','c',0,1,'f']) assert_series_equal(s, expected) # list s = Series(list('abc')) def f(): s[[0,1,2]] = list(range(27)) self.assertRaises(ValueError, f) s = Series(list('abc')) def f(): s[[0,1,2]] = list(range(2)) self.assertRaises(ValueError, f) # scalar s = Series(list('abc')) s[0] = list(range(10)) expected = Series([list(range(10)),'b','c']) assert_series_equal(s, expected) def test_where_broadcast(self): # Test a variety of differently sized series for size in range(2, 6): # Test a variety of boolean indices for selection in [np.resize([True, False, False, False, False], size), # First element should be set # Set alternating elements] np.resize([True, False], size), np.resize([False], size)]: # No element should be set # Test a variety of different numbers as content for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]: # Test numpy arrays, lists and tuples as the input to be # broadcast for arr in [np.array([item]), [item], (item,)]: data = np.arange(size, dtype=float) s = Series(data) s[selection] = arr # Construct the expected series by taking the source # data or item based on the selection expected = Series([item if use_item else data[i] for i, use_item in enumerate(selection)]) assert_series_equal(s, expected) s = Series(data) result = s.where(~selection, arr) assert_series_equal(result, expected) def test_where_inplace(self): s = Series(np.random.randn(5)) cond = s > 0 rs = s.copy() rs.where(cond, inplace=True) assert_series_equal(rs.dropna(), s[cond]) assert_series_equal(rs, s.where(cond)) rs = s.copy() rs.where(cond, -s, inplace=True) assert_series_equal(rs, s.where(cond, -s)) def test_where_dups(self): # GH 4550 # where crashes with dups in index s1 = Series(list(range(3))) s2 = Series(list(range(3))) comb = pd.concat([s1,s2]) result = comb.where(comb < 2) expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2]) assert_series_equal(result, expected) # GH 4548 # inplace updating not working with dups comb[comb<1] = 5 expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2]) assert_series_equal(comb, expected) comb[comb<2] += 10 expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2]) assert_series_equal(comb, expected) def test_where_datetime(self): s = Series(date_range('20130102', periods=2)) expected = Series([10, 10], dtype='datetime64[ns]') mask = np.array([False, False]) rs = s.where(mask, [10, 10]) assert_series_equal(rs, expected) rs = s.where(mask, 10) assert_series_equal(rs, expected) rs = s.where(mask, 10.0) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, 10.0]) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, np.nan]) expected = Series([10, None], dtype='datetime64[ns]') assert_series_equal(rs, expected) def test_where_timedelta(self): s = Series([1, 2], dtype='timedelta64[ns]') expected = Series([10, 10], dtype='timedelta64[ns]') mask = np.array([False, False]) rs = s.where(mask, [10, 10]) assert_series_equal(rs, expected) rs = s.where(mask, 10) assert_series_equal(rs, expected) rs = s.where(mask, 10.0) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, 10.0]) assert_series_equal(rs, expected) rs = s.where(mask, [10.0, np.nan]) expected = Series([10, None], dtype='timedelta64[ns]') assert_series_equal(rs, expected) def test_mask(self): # compare with tested results in test_where s = Series(np.random.randn(5)) cond = s > 0 rs = s.where(~cond, np.nan) assert_series_equal(rs, s.mask(cond)) rs = s.where(~cond) rs2 = s.mask(cond) assert_series_equal(rs, rs2) rs = s.where(~cond, -s) rs2 = s.mask(cond, -s) assert_series_equal(rs, rs2) cond = Series([True, False, False, True, False], index=s.index) s2 = -(s.abs()) rs = s2.where(~cond[:3]) rs2 = s2.mask(cond[:3]) assert_series_equal(rs, rs2) rs = s2.where(~cond[:3], -s2) rs2 = s2.mask(cond[:3], -s2) assert_series_equal(rs, rs2) self.assertRaises(ValueError, s.mask, 1) self.assertRaises(ValueError, s.mask, cond[:3].values, -s) # dtype changes s = Series([1,2,3,4]) result = s.mask(s>2, np.nan) expected = Series([1, 2, np.nan, np.nan]) assert_series_equal(result, expected) def test_mask_broadcast(self): # GH 8801 # copied from test_where_broadcast for size in range(2, 6): for selection in [np.resize([True, False, False, False, False], size), # First element should be set # Set alternating elements] np.resize([True, False], size), np.resize([False], size)]: # No element should be set for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]: for arr in [np.array([item]), [item], (item,)]: data = np.arange(size, dtype=float) s = Series(data) result = s.mask(selection, arr) expected = Series([item if use_item else data[i] for i, use_item in enumerate(selection)]) assert_series_equal(result, expected) def test_mask_inplace(self): s = Series(np.random.randn(5)) cond = s > 0 rs = s.copy() rs.mask(cond, inplace=True) assert_series_equal(rs.dropna(), s[~cond]) assert_series_equal(rs, s.mask(cond)) rs = s.copy() rs.mask(cond, -s, inplace=True) assert_series_equal(rs, s.mask(cond, -s)) def test_drop(self): # unique s = Series([1,2],index=['one','two']) expected = Series([1],index=['one']) result = s.drop(['two']) assert_series_equal(result,expected) result = s.drop('two', axis='rows') assert_series_equal(result,expected) # non-unique # GH 5248 s = Series([1,1,2],index=['one','two','one']) expected = Series([1,2],index=['one','one']) result = s.drop(['two'], axis=0) assert_series_equal(result,expected) result = s.drop('two') assert_series_equal(result,expected) expected = Series([1],index=['two']) result = s.drop(['one']) assert_series_equal(result,expected) result = s.drop('one') assert_series_equal(result,expected) # single string/tuple-like s = Series(range(3),index=list('abc')) self.assertRaises(ValueError, s.drop, 'bc') self.assertRaises(ValueError, s.drop, ('a',)) # errors='ignore' s = Series(range(3),index=list('abc')) result = s.drop('bc', errors='ignore') assert_series_equal(result, s) result = s.drop(['a', 'd'], errors='ignore') expected = s.ix[1:] assert_series_equal(result, expected) # bad axis self.assertRaises(ValueError, s.drop, 'one', axis='columns') # GH 8522 s = Series([2,3], index=[True, False]) self.assertTrue(s.index.is_object()) result = s.drop(True) expected = Series([3],index=[False]) assert_series_equal(result,expected) def test_ix_setitem(self): inds = self.series.index[[3, 4, 7]] result = self.series.copy() result.ix[inds] = 5 expected = self.series.copy() expected[[3, 4, 7]] = 5 assert_series_equal(result, expected) result.ix[5:10] = 10 expected[5:10] = 10 assert_series_equal(result, expected) # set slice with indices d1, d2 = self.series.index[[5, 15]] result.ix[d1:d2] = 6 expected[5:16] = 6 # because it's inclusive assert_series_equal(result, expected) # set index value self.series.ix[d1] = 4 self.series.ix[d2] = 6 self.assertEqual(self.series[d1], 4) self.assertEqual(self.series[d2], 6) def test_where_numeric_with_string(self): # GH 9280 s = pd.Series([1, 2, 3]) w = s.where(s>1, 'X') self.assertFalse(com.is_integer(w[0])) self.assertTrue(com.is_integer(w[1])) self.assertTrue(com.is_integer(w[2])) self.assertTrue(isinstance(w[0], str)) self.assertTrue(w.dtype == 'object') w = s.where(s>1, ['X', 'Y', 'Z']) self.assertFalse(com.is_integer(w[0])) self.assertTrue(com.is_integer(w[1])) self.assertTrue(com.is_integer(w[2])) self.assertTrue(isinstance(w[0], str)) self.assertTrue(w.dtype == 'object') w = s.where(s>1, np.array(['X', 'Y', 'Z'])) self.assertFalse(com.is_integer(w[0])) self.assertTrue(com.is_integer(w[1])) self.assertTrue(com.is_integer(w[2])) self.assertTrue(isinstance(w[0], str)) self.assertTrue(w.dtype == 'object') def test_setitem_boolean(self): mask = self.series > self.series.median() # similiar indexed series result = self.series.copy() result[mask] = self.series * 2 expected = self.series * 2 assert_series_equal(result[mask], expected[mask]) # needs alignment result = self.series.copy() result[mask] = (self.series * 2)[0:5] expected = (self.series * 2)[0:5].reindex_like(self.series) expected[-mask] = self.series[mask] assert_series_equal(result[mask], expected[mask]) def test_ix_setitem_boolean(self): mask = self.series > self.series.median() result = self.series.copy() result.ix[mask] = 0 expected = self.series expected[mask] = 0 assert_series_equal(result, expected) def test_ix_setitem_corner(self): inds = list(self.series.index[[5, 8, 12]]) self.series.ix[inds] = 5 self.assertRaises(Exception, self.series.ix.__setitem__, inds + ['foo'], 5) def test_get_set_boolean_different_order(self): ordered = self.series.sort_values() # setting copy = self.series.copy() copy[ordered > 0] = 0 expected = self.series.copy() expected[expected > 0] = 0 assert_series_equal(copy, expected) # getting sel = self.series[ordered > 0] exp = self.series[self.series > 0] assert_series_equal(sel, exp) def test_repr(self): str(self.ts) str(self.series) str(self.series.astype(int)) str(self.objSeries) str(Series(tm.randn(1000), index=np.arange(1000))) str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1))) # empty str(self.empty) # with NaNs self.series[5:7] = np.NaN str(self.series) # with Nones ots = self.ts.astype('O') ots[::2] = None repr(ots) # various names for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'), 'loooooooooooooooooooooooooooooooooooooooooooooooooooong', ('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3), (u('\u03B1'), u('\u03B2'), u('\u03B3')), (u('\u03B1'), 'bar')]: self.series.name = name repr(self.series) biggie = Series(tm.randn(1000), index=np.arange(1000), name=('foo', 'bar', 'baz')) repr(biggie) # 0 as name ser = Series(np.random.randn(100), name=0) rep_str = repr(ser) self.assertIn("Name: 0", rep_str) # tidy repr ser = Series(np.random.randn(1001), name=0) rep_str = repr(ser) self.assertIn("Name: 0", rep_str) ser = Series(["a\n\r\tb"], name=["a\n\r\td"], index=["a\n\r\tf"]) self.assertFalse("\t" in repr(ser)) self.assertFalse("\r" in repr(ser)) self.assertFalse("a\n" in repr(ser)) # with empty series (#4651) s = Series([], dtype=np.int64, name='foo') self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)') s = Series([], dtype=np.int64, name=None) self.assertEqual(repr(s), 'Series([], dtype: int64)') def test_tidy_repr(self): a = Series([u("\u05d0")] * 1000) a.name = 'title1' repr(a) # should not raise exception def test_repr_bool_fails(self): s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)]) import sys buf = StringIO() tmp = sys.stderr sys.stderr = buf try: # it works (with no Cython exception barf)! repr(s) finally: sys.stderr = tmp self.assertEqual(buf.getvalue(), '') def test_repr_name_iterable_indexable(self): s = Series([1, 2, 3], name=np.int64(3)) # it works! repr(s) s.name = (u("\u05d0"),) * 2 repr(s) def test_repr_should_return_str(self): # http://docs.python.org/py3k/reference/datamodel.html#object.__repr__ # http://docs.python.org/reference/datamodel.html#object.__repr__ # ...The return value must be a string object. # (str on py2.x, str (unicode) on py3) data = [8, 5, 3, 5] index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")] df = Series(data, index=index1) self.assertTrue(type(df.__repr__() == str)) # both py2 / 3 def test_repr_max_rows(self): # GH 6863 with pd.option_context('max_rows', None): str(Series(range(1001))) # should not raise exception def test_unicode_string_with_unicode(self): df = Series([u("\u05d0")], name=u("\u05d1")) if compat.PY3: str(df) else: compat.text_type(df) def test_bytestring_with_unicode(self): df = Series([u("\u05d0")], name=u("\u05d1")) if compat.PY3: bytes(df) else: str(df) def test_timeseries_repr_object_dtype(self): index = Index([datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], dtype=object) ts = Series(np.random.randn(len(index)), index) repr(ts) ts = tm.makeTimeSeries(1000) self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:')) ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)] repr(ts2).splitlines()[-1] def test_timeseries_periodindex(self): # GH2891 from pandas import period_range prng = period_range('1/1/2011', '1/1/2012', freq='M') ts = Series(np.random.randn(len(prng)), prng) new_ts = self.round_trip_pickle(ts) self.assertEqual(new_ts.index.freq, 'M') def test_iter(self): for i, val in enumerate(self.series): self.assertEqual(val, self.series[i]) for i, val in enumerate(self.ts): self.assertEqual(val, self.ts[i]) def test_keys(self): # HACK: By doing this in two stages, we avoid 2to3 wrapping the call # to .keys() in a list() getkeys = self.ts.keys self.assertIs(getkeys(), self.ts.index) def test_values(self): self.assert_numpy_array_equal(self.ts, self.ts.values) def test_iteritems(self): for idx, val in compat.iteritems(self.series): self.assertEqual(val, self.series[idx]) for idx, val in compat.iteritems(self.ts): self.assertEqual(val, self.ts[idx]) # assert is lazy (genrators don't define reverse, lists do) self.assertFalse(hasattr(self.series.iteritems(), 'reverse')) def test_sum(self): self._check_stat_op('sum', np.sum, check_allna=True) def test_sum_inf(self): import pandas.core.nanops as nanops s = Series(np.random.randn(10)) s2 = s.copy() s[5:8] = np.inf s2[5:8] = np.nan self.assertTrue(np.isinf(s.sum())) arr = np.random.randn(100, 100).astype('f4') arr[:, 2] = np.inf with cf.option_context("mode.use_inf_as_null", True): assert_almost_equal(s.sum(), s2.sum()) res = nanops.nansum(arr, axis=1) self.assertTrue(np.isinf(res).all()) def test_mean(self): self._check_stat_op('mean', np.mean) def test_median(self): self._check_stat_op('median', np.median) # test with integers, test failure int_ts = Series(np.ones(10, dtype=int), index=lrange(10)) self.assertAlmostEqual(np.median(int_ts), int_ts.median()) def test_mode(self): s = Series([12, 12, 11, 10, 19, 11]) exp = Series([11, 12]) assert_series_equal(s.mode(), exp) assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64')) lst = [5] * 20 + [1] * 10 + [6] * 25 np.random.shuffle(lst) s = Series(lst) assert_series_equal(s.mode(), Series([6])) s = Series([5] * 10) assert_series_equal(s.mode(), Series([5])) s = Series(lst) s[0] = np.nan assert_series_equal(s.mode(), Series([6.])) s = Series(list('adfasbasfwewefwefweeeeasdfasnbam')) assert_series_equal(s.mode(), Series(['e'])) s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]') assert_series_equal(s.mode(), Series([], dtype="M8[ns]")) s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03', '2013-01-02'], dtype='M8[ns]') assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'], dtype='M8[ns]')) def test_prod(self): self._check_stat_op('prod', np.prod) def test_min(self): self._check_stat_op('min', np.min, check_objects=True) def test_max(self): self._check_stat_op('max', np.max, check_objects=True) def test_var_std(self): alt = lambda x: np.std(x, ddof=1) self._check_stat_op('std', alt) alt = lambda x: np.var(x, ddof=1) self._check_stat_op('var', alt) result = self.ts.std(ddof=4) expected = np.std(self.ts.values, ddof=4) assert_almost_equal(result, expected) result = self.ts.var(ddof=4) expected = np.var(self.ts.values, ddof=4) assert_almost_equal(result, expected) # 1 - element series with ddof=1 s = self.ts.iloc[[0]] result = s.var(ddof=1) self.assertTrue(isnull(result)) result = s.std(ddof=1) self.assertTrue(isnull(result)) def test_sem(self): alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x)) self._check_stat_op('sem', alt) result = self.ts.sem(ddof=4) expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values)) assert_almost_equal(result, expected) # 1 - element series with ddof=1 s = self.ts.iloc[[0]] result = s.sem(ddof=1) self.assertTrue(isnull(result)) def test_skew(self): tm._skip_if_no_scipy() from scipy.stats import skew alt = lambda x: skew(x, bias=False) self._check_stat_op('skew', alt) # test corner cases, skew() returns NaN unless there's at least 3 values min_N = 3 for i in range(1, min_N + 1): s = Series(np.ones(i)) df = DataFrame(np.ones((i, i))) if i < min_N: self.assertTrue(np.isnan(s.skew())) self.assertTrue(np.isnan(df.skew()).all()) else: self.assertEqual(0, s.skew()) self.assertTrue((df.skew() == 0).all()) def test_kurt(self): tm._skip_if_no_scipy() from scipy.stats import kurtosis alt = lambda x: kurtosis(x, bias=False) self._check_stat_op('kurt', alt) index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]]) s = Series(np.random.randn(6), index=index) self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar']) # test corner cases, kurt() returns NaN unless there's at least 4 values min_N = 4 for i in range(1, min_N + 1): s = Series(np.ones(i)) df = DataFrame(np.ones((i, i))) if i < min_N: self.assertTrue(np.isnan(s.kurt())) self.assertTrue(np.isnan(df.kurt()).all()) else: self.assertEqual(0, s.kurt()) self.assertTrue((df.kurt() == 0).all()) def test_argsort(self): self._check_accum_op('argsort') argsorted = self.ts.argsort() self.assertTrue(issubclass(argsorted.dtype.type, np.integer)) # GH 2967 (introduced bug in 0.11-dev I think) s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)]) self.assertEqual(s.dtype, 'datetime64[ns]') shifted = s.shift(-1) self.assertEqual(shifted.dtype, 'datetime64[ns]') self.assertTrue(isnull(shifted[4])) result = s.argsort() expected = Series(lrange(5), dtype='int64') assert_series_equal(result, expected) result = shifted.argsort() expected = Series(lrange(4) + [-1], dtype='int64') assert_series_equal(result, expected) def test_argsort_stable(self): s = Series(np.random.randint(0, 100, size=10000)) mindexer = s.argsort(kind='mergesort') qindexer = s.argsort() mexpected = np.argsort(s.values, kind='mergesort') qexpected = np.argsort(s.values, kind='quicksort') self.assert_numpy_array_equal(mindexer, mexpected) self.assert_numpy_array_equal(qindexer, qexpected) self.assertFalse(np.array_equal(qindexer, mindexer)) def test_reorder_levels(self): index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], names=['L0', 'L1', 'L2']) s = Series(np.arange(6), index=index) # no change, position result = s.reorder_levels([0, 1, 2]) assert_series_equal(s, result) # no change, labels result = s.reorder_levels(['L0', 'L1', 'L2']) assert_series_equal(s, result) # rotate, position result = s.reorder_levels([1, 2, 0]) e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']], labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]], names=['L1', 'L2', 'L0']) expected = Series(np.arange(6), index=e_idx) assert_series_equal(result, expected) result = s.reorder_levels([0, 0, 0]) e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']], labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], names=['L0', 'L0', 'L0']) expected = Series(range(6), index=e_idx) assert_series_equal(result, expected) result = s.reorder_levels(['L0', 'L0', 'L0']) assert_series_equal(result, expected) def test_cumsum(self): self._check_accum_op('cumsum') def test_cumprod(self): self._check_accum_op('cumprod') def test_cummin(self): self.assert_numpy_array_equal(self.ts.cummin(), np.minimum.accumulate(np.array(self.ts))) ts = self.ts.copy() ts[::2] = np.NaN result = ts.cummin()[1::2] expected = np.minimum.accumulate(ts.valid()) self.assert_numpy_array_equal(result, expected) def test_cummax(self): self.assert_numpy_array_equal(self.ts.cummax(), np.maximum.accumulate(np.array(self.ts))) ts = self.ts.copy() ts[::2] = np.NaN result = ts.cummax()[1::2] expected = np.maximum.accumulate(ts.valid()) self.assert_numpy_array_equal(result, expected) def test_cummin_datetime64(self): s = pd.Series(pd.to_datetime( ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3'])) expected = pd.Series(pd.to_datetime( ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1'])) result = s.cummin(skipna=True) self.assert_series_equal(expected, result) expected = pd.Series(pd.to_datetime( ['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1'])) result = s.cummin(skipna=False) self.assert_series_equal(expected, result) def test_cummax_datetime64(self): s = pd.Series(pd.to_datetime( ['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3'])) expected = pd.Series(pd.to_datetime( ['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3'])) result = s.cummax(skipna=True) self.assert_series_equal(expected, result) expected = pd.Series(pd.to_datetime( ['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3'])) result = s.cummax(skipna=False) self.assert_series_equal(expected, result) def test_cummin_timedelta64(self): s = pd.Series(pd.to_timedelta( ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ])) expected = pd.Series(pd.to_timedelta( ['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ])) result = s.cummin(skipna=True) self.assert_series_equal(expected, result) expected = pd.Series(pd.to_timedelta( ['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ])) result = s.cummin(skipna=False) self.assert_series_equal(expected, result) def test_cummax_timedelta64(self): s = pd.Series(pd.to_timedelta( ['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ])) expected = pd.Series(pd.to_timedelta( ['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ])) result = s.cummax(skipna=True) self.assert_series_equal(expected, result) expected = pd.Series(pd.to_timedelta( ['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ])) result = s.cummax(skipna=False) self.assert_series_equal(expected, result) def test_npdiff(self): raise nose.SkipTest("skipping due to Series no longer being an " "ndarray") # no longer works as the return type of np.diff is now nd.array s = Series(np.arange(5)) r = np.diff(s) assert_series_equal(Series([nan, 0, 0, 0, nan]), r) def _check_stat_op(self, name, alternate, check_objects=False, check_allna=False): import pandas.core.nanops as nanops def testit(): f = getattr(Series, name) # add some NaNs self.series[5:15] = np.NaN # idxmax, idxmin, min, and max are valid for dates if name not in ['max','min']: ds = Series(date_range('1/1/2001', periods=10)) self.assertRaises(TypeError, f, ds) # skipna or no self.assertTrue(notnull(f(self.series))) self.assertTrue(isnull(f(self.series, skipna=False))) # check the result is correct nona = self.series.dropna() assert_almost_equal(f(nona), alternate(nona.values)) assert_almost_equal(f(self.series), alternate(nona.values)) allna = self.series * nan if check_allna: # xref 9422 # bottleneck >= 1.0 give 0.0 for an allna Series sum try: self.assertTrue(nanops._USE_BOTTLENECK) import bottleneck as bn self.assertTrue(bn.__version__ >= LooseVersion('1.0')) self.assertEqual(f(allna),0.0) except: self.assertTrue(np.isnan(f(allna))) # dtype=object with None, it works! s = Series([1, 2, 3, None, 5]) f(s) # 2888 l = [0] l.extend(lrange(2 ** 40, 2 ** 40+1000)) s = Series(l, dtype='int64') assert_almost_equal(float(f(s)), float(alternate(s.values))) # check date range if check_objects: s = Series(bdate_range('1/1/2000', periods=10)) res = f(s) exp = alternate(s) self.assertEqual(res, exp) # check on string data if name not in ['sum','min','max']: self.assertRaises(TypeError, f, Series(list('abc'))) # Invalid axis. self.assertRaises(ValueError, f, self.series, axis=1) # Unimplemented numeric_only parameter. if 'numeric_only' in getargspec(f).args: self.assertRaisesRegexp(NotImplementedError, name, f, self.series, numeric_only=True) testit() try: import bottleneck as bn nanops._USE_BOTTLENECK = False testit() nanops._USE_BOTTLENECK = True except ImportError: pass def _check_accum_op(self, name): func = getattr(np, name) self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts))) # with missing values ts = self.ts.copy() ts[::2] = np.NaN result = func(ts)[1::2] expected = func(np.array(ts.valid())) self.assert_numpy_array_equal(result, expected) def test_round(self): # numpy.round doesn't preserve metadata, probably a numpy bug, # re: GH #314 result = np.round(self.ts, 2) expected = Series(np.round(self.ts.values, 2), index=self.ts.index, name='ts') assert_series_equal(result, expected) self.assertEqual(result.name, self.ts.name) def test_prod_numpy16_bug(self): s = Series([1., 1., 1.], index=lrange(3)) result = s.prod() self.assertNotIsInstance(result, Series) def test_quantile(self): from numpy import percentile q = self.ts.quantile(0.1) self.assertEqual(q, percentile(self.ts.valid(), 10)) q = self.ts.quantile(0.9) self.assertEqual(q, percentile(self.ts.valid(), 90)) # object dtype q = Series(self.ts,dtype=object).quantile(0.9) self.assertEqual(q, percentile(self.ts.valid(), 90)) # datetime64[ns] dtype dts = self.ts.index.to_series() q = dts.quantile(.2) self.assertEqual(q, Timestamp('2000-01-10 19:12:00')) # timedelta64[ns] dtype tds = dts.diff() q = tds.quantile(.25) self.assertEqual(q, pd.to_timedelta('24:00:00')) # GH7661 result = Series([np.timedelta64('NaT')]).sum() self.assertTrue(result is pd.NaT) msg = 'percentiles should all be in the interval \\[0, 1\\]' for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: with tm.assertRaisesRegexp(ValueError, msg): self.ts.quantile(invalid) def test_quantile_multi(self): from numpy import percentile qs = [.1, .9] result = self.ts.quantile(qs) expected = pd.Series([percentile(self.ts.valid(), 10), percentile(self.ts.valid(), 90)], index=qs, name=self.ts.name) assert_series_equal(result, expected) dts = self.ts.index.to_series() dts.name = 'xxx' result = dts.quantile((.2, .2)) expected = Series([Timestamp('2000-01-10 19:12:00'), Timestamp('2000-01-10 19:12:00')], index=[.2, .2], name='xxx') assert_series_equal(result, expected) result = self.ts.quantile([]) expected = pd.Series([], name=self.ts.name, index=Index([], dtype=float)) assert_series_equal(result, expected) def test_append(self): appendedSeries = self.series.append(self.objSeries) for idx, value in compat.iteritems(appendedSeries): if idx in self.series.index: self.assertEqual(value, self.series[idx]) elif idx in self.objSeries.index: self.assertEqual(value, self.objSeries[idx]) else: self.fail("orphaned index!") self.assertRaises(ValueError, self.ts.append, self.ts, verify_integrity=True) def test_append_many(self): pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]] result = pieces[0].append(pieces[1:]) assert_series_equal(result, self.ts) def test_all_any(self): ts = tm.makeTimeSeries() bool_series = ts > 0 self.assertFalse(bool_series.all()) self.assertTrue(bool_series.any()) # Alternative types, with implicit 'object' dtype. s = Series(['abc', True]) self.assertEqual('abc', s.any()) # 'abc' || True => 'abc' def test_all_any_params(self): # Check skipna, with implicit 'object' dtype. s1 = Series([np.nan, True]) s2 = Series([np.nan, False]) self.assertTrue(s1.all(skipna=False)) # nan && True => True self.assertTrue(s1.all(skipna=True)) self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan self.assertFalse(s2.any(skipna=True)) # Check level. s = pd.Series([False, False, True, True, False, True], index=[0, 0, 1, 1, 2, 2]) assert_series_equal(s.all(level=0), Series([False, True, False])) assert_series_equal(s.any(level=0), Series([False, True, True])) # bool_only is not implemented with level option. self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0) self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0) # bool_only is not implemented alone. self.assertRaises(NotImplementedError, s.any, bool_only=True) self.assertRaises(NotImplementedError, s.all, bool_only=True) def test_op_method(self): def check(series, other, check_reverse=False): simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow'] if not compat.PY3: simple_ops.append('div') for opname in simple_ops: op = getattr(Series, opname) if op == 'div': alt = operator.truediv else: alt = getattr(operator, opname) result = op(series, other) expected = alt(series, other) tm.assert_almost_equal(result, expected) if check_reverse: rop = getattr(Series, "r" + opname) result = rop(series, other) expected = alt(other, series) tm.assert_almost_equal(result, expected) check(self.ts, self.ts * 2) check(self.ts, self.ts[::2]) check(self.ts, 5, check_reverse=True) check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True) def test_neg(self): assert_series_equal(-self.series, -1 * self.series) def test_invert(self): assert_series_equal(-(self.series < 0), ~(self.series < 0)) def test_modulo(self): # GH3590, modulo as ints p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) result = p['first'] % p['second'] expected = Series(p['first'].values % p['second'].values, dtype='float64') expected.iloc[0:3] = np.nan assert_series_equal(result, expected) result = p['first'] % 0 expected = Series(np.nan, index=p.index, name='first') assert_series_equal(result, expected) p = p.astype('float64') result = p['first'] % p['second'] expected = Series(p['first'].values % p['second'].values) assert_series_equal(result, expected) p = p.astype('float64') result = p['first'] % p['second'] result2 = p['second'] % p['first'] self.assertFalse(np.array_equal(result, result2)) # GH 9144 s = Series([0, 1]) result = s % 0 expected = Series([nan, nan]) assert_series_equal(result, expected) result = 0 % s expected = Series([nan, 0.0]) assert_series_equal(result, expected) def test_div(self): # no longer do integer div for any ops, but deal with the 0's p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) result = p['first'] / p['second'] expected = Series(p['first'].values.astype(float) / p['second'].values, dtype='float64') expected.iloc[0:3] = np.inf assert_series_equal(result, expected) result = p['first'] / 0 expected = Series(np.inf, index=p.index, name='first') assert_series_equal(result, expected) p = p.astype('float64') result = p['first'] / p['second'] expected = Series(p['first'].values / p['second'].values) assert_series_equal(result, expected) p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) result = p['first'] / p['second'] assert_series_equal(result, p['first'].astype('float64'), check_names=False) self.assertTrue(result.name is None) self.assertFalse(np.array_equal(result, p['second'] / p['first'])) # inf signing s = Series([np.nan,1.,-1.]) result = s / 0 expected = Series([np.nan,np.inf,-np.inf]) assert_series_equal(result, expected) # float/integer issue # GH 7785 p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)}) expected = Series([-0.01,-np.inf]) result = p['second'].div(p['first']) assert_series_equal(result, expected, check_names=False) result = p['second'] / p['first'] assert_series_equal(result, expected) # GH 9144 s = Series([-1, 0, 1]) result = 0 / s expected = Series([0.0, nan, 0.0]) assert_series_equal(result, expected) result = s / 0 expected = Series([-inf, nan, inf]) assert_series_equal(result, expected) result = s // 0 expected = Series([-inf, nan, inf]) assert_series_equal(result, expected) def test_operators(self): def _check_op(series, other, op, pos_only=False): left = np.abs(series) if pos_only else series right = np.abs(other) if pos_only else other cython_or_numpy = op(left, right) python = left.combine(right, op) tm.assert_almost_equal(cython_or_numpy, python) def check(series, other): simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod'] for opname in simple_ops: _check_op(series, other, getattr(operator, opname)) _check_op(series, other, operator.pow, pos_only=True) _check_op(series, other, lambda x, y: operator.add(y, x)) _check_op(series, other, lambda x, y: operator.sub(y, x)) _check_op(series, other, lambda x, y: operator.truediv(y, x)) _check_op(series, other, lambda x, y: operator.floordiv(y, x)) _check_op(series, other, lambda x, y: operator.mul(y, x)) _check_op(series, other, lambda x, y: operator.pow(y, x), pos_only=True) _check_op(series, other, lambda x, y: operator.mod(y, x)) check(self.ts, self.ts * 2) check(self.ts, self.ts * 0) check(self.ts, self.ts[::2]) check(self.ts, 5) def check_comparators(series, other): _check_op(series, other, operator.gt) _check_op(series, other, operator.ge) _check_op(series, other, operator.eq) _check_op(series, other, operator.lt) _check_op(series, other, operator.le) check_comparators(self.ts, 5) check_comparators(self.ts, self.ts + 1) def test_operators_empty_int_corner(self): s1 = Series([], [], dtype=np.int32) s2 = Series({'x': 0.}) tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x'])) def test_constructor_dtype_timedelta64(self): # basic td = Series([timedelta(days=i) for i in range(3)]) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([timedelta(days=1)]) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')]) self.assertEqual(td.dtype, 'timedelta64[ns]') # mixed with NaT from pandas import tslib td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' ) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' ) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]') self.assertEqual(td.dtype, 'timedelta64[ns]') # improved inference # GH5689 td = Series([np.timedelta64(300000000), pd.NaT]) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([np.timedelta64(300000000), tslib.iNaT]) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([np.timedelta64(300000000), np.nan]) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([pd.NaT, np.timedelta64(300000000)]) self.assertEqual(td.dtype, 'timedelta64[ns]') td = Series([np.timedelta64(1,'s')]) self.assertEqual(td.dtype, 'timedelta64[ns]') # these are frequency conversion astypes #for t in ['s', 'D', 'us', 'ms']: # self.assertRaises(TypeError, td.astype, 'm8[%s]' % t) # valid astype td.astype('int64') # invalid casting self.assertRaises(TypeError, td.astype, 'int32') # this is an invalid casting def f(): Series([timedelta(days=1), 'foo'],dtype='m8[ns]') self.assertRaises(Exception, f) # leave as object here td = Series([timedelta(days=i) for i in range(3)] + ['foo']) self.assertEqual(td.dtype, 'object') # these will correctly infer a timedelta s = Series([None, pd.NaT, '1 Day']) self.assertEqual(s.dtype,'timedelta64[ns]') s = Series([np.nan, pd.NaT, '1 Day']) self.assertEqual(s.dtype,'timedelta64[ns]') s = Series([pd.NaT, None, '1 Day']) self.assertEqual(s.dtype,'timedelta64[ns]') s = Series([pd.NaT, np.nan, '1 Day']) self.assertEqual(s.dtype,'timedelta64[ns]') def test_operators_timedelta64(self): # invalid ops self.assertRaises(Exception, self.objSeries.__add__, 1) self.assertRaises( Exception, self.objSeries.__add__, np.array(1, dtype=np.int64)) self.assertRaises(Exception, self.objSeries.__sub__, 1) self.assertRaises( Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64)) # seriese ops v1 = date_range('2012-1-1', periods=3, freq='D') v2 = date_range('2012-1-2', periods=3, freq='D') rs = Series(v2) - Series(v1) xp = Series(1e9 * 3600 * 24, rs.index).astype( 'int64').astype('timedelta64[ns]') assert_series_equal(rs, xp) self.assertEqual(rs.dtype, 'timedelta64[ns]') df = DataFrame(dict(A=v1)) td = Series([timedelta(days=i) for i in range(3)]) self.assertEqual(td.dtype, 'timedelta64[ns]') # series on the rhs result = df['A'] - df['A'].shift() self.assertEqual(result.dtype, 'timedelta64[ns]') result = df['A'] + td self.assertEqual(result.dtype, 'M8[ns]') # scalar Timestamp on rhs maxa = df['A'].max() tm.assertIsInstance(maxa, Timestamp) resultb = df['A'] - df['A'].max() self.assertEqual(resultb.dtype, 'timedelta64[ns]') # timestamp on lhs result = resultb + df['A'] values = [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')] expected = Series(values, name='A') assert_series_equal(result, expected) # datetimes on rhs result = df['A'] - datetime(2001, 1, 1) expected = Series([timedelta(days=4017 + i) for i in range(3)], name='A') assert_series_equal(result, expected) self.assertEqual(result.dtype, 'm8[ns]') d = datetime(2001, 1, 1, 3, 4) resulta = df['A'] - d self.assertEqual(resulta.dtype, 'm8[ns]') # roundtrip resultb = resulta + d assert_series_equal(df['A'], resultb) # timedeltas on rhs td = timedelta(days=1) resulta = df['A'] + td resultb = resulta - td assert_series_equal(resultb, df['A']) self.assertEqual(resultb.dtype, 'M8[ns]') # roundtrip td = timedelta(minutes=5, seconds=3) resulta = df['A'] + td resultb = resulta - td assert_series_equal(df['A'], resultb) self.assertEqual(resultb.dtype, 'M8[ns]') # inplace value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1)) rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1)) self.assertEqual(rs[2], value) def test_timedeltas_with_DateOffset(self): # GH 4532 # operate with pd.offsets s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) result = s + pd.offsets.Second(5) result2 = pd.offsets.Second(5) + s expected = Series( [Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')]) assert_series_equal(result, expected) assert_series_equal(result2, expected) result = s + pd.offsets.Milli(5) result2 = pd.offsets.Milli(5) + s expected = Series( [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')]) assert_series_equal(result, expected) assert_series_equal(result2, expected) result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) expected = Series( [Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')]) assert_series_equal(result, expected) # operate with np.timedelta64 correctly result = s + np.timedelta64(1, 's') result2 = np.timedelta64(1, 's') + s expected = Series( [Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')]) assert_series_equal(result, expected) assert_series_equal(result2, expected) result = s + np.timedelta64(5, 'ms') result2 = np.timedelta64(5, 'ms') + s expected = Series( [Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')]) assert_series_equal(result, expected) assert_series_equal(result2, expected) # valid DateOffsets for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', 'Nano' ]: op = getattr(pd.offsets,do) s + op(5) op(5) + s def test_timedelta64_operations_with_DateOffset(self): # GH 10699 td = Series([timedelta(minutes=5, seconds=3)] * 3) result = td + pd.offsets.Minute(1) expected = Series([timedelta(minutes=6, seconds=3)] * 3) assert_series_equal(result, expected) result = td - pd.offsets.Minute(1) expected = Series([timedelta(minutes=4, seconds=3)] * 3) assert_series_equal(result, expected) result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), pd.offsets.Hour(2)]) expected = Series([timedelta(minutes=6, seconds=3), timedelta(minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)]) assert_series_equal(result, expected) result = td + pd.offsets.Minute(1) + pd.offsets.Second(12) expected = Series([timedelta(minutes=6, seconds=15)] * 3) assert_series_equal(result, expected) # valid DateOffsets for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', 'Nano' ]: op = getattr(pd.offsets,do) td + op(5) op(5) + td td - op(5) op(5) - td def test_timedelta64_operations_with_timedeltas(self): # td operate with td td1 = Series([timedelta(minutes=5, seconds=3)] * 3) td2 = timedelta(minutes=5, seconds=4) result = td1 - td2 expected = Series([timedelta(seconds=0)] * 3) -Series( [timedelta(seconds=1)] * 3) self.assertEqual(result.dtype, 'm8[ns]') assert_series_equal(result, expected) result2 = td2 - td1 expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(seconds=0)] * 3)) assert_series_equal(result2, expected) # roundtrip assert_series_equal(result + td2,td1) # Now again, using pd.to_timedelta, which should build # a Series or a scalar, depending on input. td1 = Series(pd.to_timedelta(['00:05:03'] * 3)) td2 = pd.to_timedelta('00:05:04') result = td1 - td2 expected = Series([timedelta(seconds=0)] * 3) -Series( [timedelta(seconds=1)] * 3) self.assertEqual(result.dtype, 'm8[ns]') assert_series_equal(result, expected) result2 = td2 - td1 expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(seconds=0)] * 3)) assert_series_equal(result2, expected) # roundtrip assert_series_equal(result + td2,td1) def test_timedelta64_operations_with_integers(self): # GH 4521 # divide/multiply by integers startdate = Series(date_range('2013-01-01', '2013-01-03')) enddate = Series(date_range('2013-03-01', '2013-03-03')) s1 = enddate - startdate s1[2] = np.nan s2 = Series([2, 3, 4]) expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') expected[2] = np.nan result = s1 / s2 assert_series_equal(result,expected) s2 = Series([20, 30, 40]) expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') expected[2] = np.nan result = s1 / s2 assert_series_equal(result,expected) result = s1 / 2 expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]') expected[2] = np.nan assert_series_equal(result,expected) s2 = Series([20, 30, 40]) expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]') expected[2] = np.nan result = s1 * s2 assert_series_equal(result,expected) for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']: s2 = Series([20, 30, 40],dtype=dtype) expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]') expected[2] = np.nan result = s1 * s2 assert_series_equal(result,expected) result = s1 * 2 expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]') expected[2] = np.nan assert_series_equal(result,expected) result = s1 * -1 expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]') expected[2] = np.nan assert_series_equal(result,expected) # invalid ops for op in ['__true_div__','__div__','__mul__']: sop = getattr(s1,op,None) if sop is not None: self.assertRaises(TypeError, sop, s2.astype(float)) self.assertRaises(TypeError, sop, 2.) for op in ['__add__','__sub__']: sop = getattr(s1,op,None) if sop is not None: self.assertRaises(TypeError, sop, 1) self.assertRaises(TypeError, sop, s2.values) def test_timedelta64_conversions(self): startdate = Series(date_range('2013-01-01', '2013-01-03')) enddate = Series(date_range('2013-03-01', '2013-03-03')) s1 = enddate - startdate s1[2] = np.nan for m in [1, 3, 10]: for unit in ['D','h','m','s','ms','us','ns']: # op expected = s1.apply(lambda x: x / np.timedelta64(m,unit)) result = s1 / np.timedelta64(m,unit) assert_series_equal(result, expected) if m == 1 and unit != 'ns': # astype result = s1.astype("timedelta64[{0}]".format(unit)) assert_series_equal(result, expected) # reverse op expected = s1.apply(lambda x: np.timedelta64(m,unit) / x) result = np.timedelta64(m,unit) / s1 # astype s = Series(date_range('20130101',periods=3)) result = s.astype(object) self.assertIsInstance(result.iloc[0],datetime) self.assertTrue(result.dtype == np.object_) result = s1.astype(object) self.assertIsInstance(result.iloc[0],timedelta) self.assertTrue(result.dtype == np.object_) def test_timedelta64_equal_timedelta_supported_ops(self): ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'), Timestamp('20130228 22:00:00'), Timestamp('20130228 21:00:00')]) intervals = 'D', 'h', 'm', 's', 'us' npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000, 'm': 60 * 1000000, 's': 1000000, 'us': 1} def timedelta64(*args): return sum(starmap(np.timedelta64, zip(args, intervals))) for op, d, h, m, s, us in product([operator.add, operator.sub], *([range(2)] * 5)): nptd = timedelta64(d, h, m, s, us) pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us) lhs = op(ser, nptd) rhs = op(ser, pytd) try: assert_series_equal(lhs, rhs) except: raise AssertionError( "invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs)) def test_timedelta_assignment(self): # GH 8209 s = Series([]) s.loc['B'] = timedelta(1) tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B'])) s = s.reindex(s.index.insert(0, 'A')) tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B'])) result = s.fillna(timedelta(1)) expected = Series(Timedelta('1 days'),index=['A','B']) tm.assert_series_equal(result, expected) s.loc['A'] = timedelta(1) tm.assert_series_equal(s, expected) def test_operators_datetimelike(self): def run_ops(ops, get_ser, test_ser): # check that we are getting a TypeError # with 'operate' (from core/ops.py) for the ops that are not defined for op_str in ops: op = getattr(get_ser, op_str, None) with tm.assertRaisesRegexp(TypeError, 'operate'): op(test_ser) ### timedelta64 ### td1 = Series([timedelta(minutes=5,seconds=3)]*3) td1.iloc[2] = np.nan td2 = timedelta(minutes=5,seconds=4) ops = ['__mul__','__floordiv__','__pow__', '__rmul__','__rfloordiv__','__rpow__'] run_ops(ops, td1, td2) td1 + td2 td2 + td1 td1 - td2 td2 - td1 td1 / td2 td2 / td1 ### datetime64 ### dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')]) dt1.iloc[2] = np.nan dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), Timestamp('20120104')]) ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', '__radd__', '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', '__rpow__'] run_ops(ops, dt1, dt2) dt1 - dt2 dt2 - dt1 ### datetime64 with timetimedelta ### ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', '__rpow__'] run_ops(ops, dt1, td1) dt1 + td1 td1 + dt1 dt1 - td1 # TODO: Decide if this ought to work. # td1 - dt1 ### timetimedelta with datetime64 ### ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', '__rsub__', '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', '__rpow__'] run_ops(ops, td1, dt1) td1 + dt1 dt1 + td1 # 8260, 10763 # datetime64 with tz ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', '__rpow__'] dt1 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo') dt2 = dt1.copy() dt2.iloc[2] = np.nan td1 = Series(timedelta_range('1 days 1 min',periods=5, freq='H')) td2 = td1.copy() td2.iloc[1] = np.nan run_ops(ops, dt1, td1) result = dt1 + td1[0] expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) result = dt2 + td2[0] expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) # odd numpy behavior with scalar timedeltas if not _np_version_under1p8: result = td1[0] + dt1 expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) result = td2[0] + dt2 expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) result = dt1 - td1[0] expected = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) self.assertRaises(TypeError, lambda: td1[0] - dt1) result = dt2 - td2[0] expected = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) self.assertRaises(TypeError, lambda: td2[0] - dt2) result = dt1 + td1 expected = (dt1.dt.tz_localize(None) + td1).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) result = dt2 + td2 expected = (dt2.dt.tz_localize(None) + td2).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) result = dt1 - td1 expected = (dt1.dt.tz_localize(None) - td1).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) result = dt2 - td2 expected = (dt2.dt.tz_localize(None) - td2).dt.tz_localize('US/Eastern') assert_series_equal(result, expected) self.assertRaises(TypeError, lambda: td1 - dt1) self.assertRaises(TypeError, lambda: td2 - dt2) def test_ops_datetimelike_align(self): # GH 7500 # datetimelike ops need to align dt = Series(date_range('2012-1-1', periods=3, freq='D')) dt.iloc[2] = np.nan dt2 = dt[::-1] expected = Series([timedelta(0), timedelta(0), pd.NaT]) # name is reset result = dt2 - dt assert_series_equal(result, expected) expected = Series(expected, name=0) result = (dt2.to_frame() - dt.to_frame())[0] assert_series_equal(result, expected) def test_timedelta64_functions(self): from datetime import timedelta from pandas import date_range # index min/max td = Series(date_range('2012-1-1', periods=3, freq='D')) - \ Timestamp('20120101') result = td.idxmin() self.assertEqual(result, 0) result = td.idxmax() self.assertEqual(result, 2) # GH 2982 # with NaT td[0] = np.nan result = td.idxmin() self.assertEqual(result, 1) result = td.idxmax() self.assertEqual(result, 2) # abs s1 = Series(date_range('20120101', periods=3)) s2 = Series(date_range('20120102', periods=3)) expected = Series(s2 - s1) # this fails as numpy returns timedelta64[us] #result = np.abs(s1-s2) # assert_frame_equal(result,expected) result = (s1 - s2).abs() assert_series_equal(result, expected) # max/min result = td.max() expected = Timedelta('2 days') self.assertEqual(result, expected) result = td.min() expected = Timedelta('1 days') self.assertEqual(result, expected) def test_ops_consistency_on_empty(self): # GH 7869 # consistency on empty # float result = Series(dtype=float).sum() self.assertEqual(result,0) result = Series(dtype=float).mean() self.assertTrue(isnull(result)) result = Series(dtype=float).median() self.assertTrue(isnull(result)) # timedelta64[ns] result = Series(dtype='m8[ns]').sum() self.assertEqual(result, Timedelta(0)) result = Series(dtype='m8[ns]').mean() self.assertTrue(result is pd.NaT) result = Series(dtype='m8[ns]').median() self.assertTrue(result is pd.NaT) def test_timedelta_fillna(self): #GH 3371 s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp('20130102'), Timestamp('20130103 9:01:01')]) td = s.diff() # reg fillna result = td.fillna(0) expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(days=1, seconds=9*3600+60+1)]) assert_series_equal(result, expected) # interprested as seconds result = td.fillna(1) expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1), timedelta(days=1, seconds=9*3600+60+1)]) assert_series_equal(result, expected) result = td.fillna(timedelta(days=1, seconds=1)) expected = Series([timedelta(days=1, seconds=1), timedelta(0), timedelta(1), timedelta(days=1, seconds=9*3600+60+1)]) assert_series_equal(result, expected) result = td.fillna(np.timedelta64(int(1e9))) expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1), timedelta(days=1, seconds=9*3600+60+1)]) assert_series_equal(result, expected) from pandas import tslib result = td.fillna(tslib.NaT) expected = Series([tslib.NaT, timedelta(0), timedelta(1), timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]') assert_series_equal(result, expected) # ffill td[2] = np.nan result = td.ffill() expected = td.fillna(0) expected[0] = np.nan assert_series_equal(result, expected) # bfill td[2] = np.nan result = td.bfill() expected = td.fillna(0) expected[2] = timedelta(days=1, seconds=9*3600+60+1) assert_series_equal(result, expected) def test_datetime64_fillna(self): s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp('20130102'), Timestamp('20130103 9:01:01')]) s[2] = np.nan # reg fillna result = s.fillna(Timestamp('20130104')) expected = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')]) assert_series_equal(result, expected) from pandas import tslib result = s.fillna(tslib.NaT) expected = s assert_series_equal(result, expected) # ffill result = s.ffill() expected = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')]) assert_series_equal(result, expected) # bfill result = s.bfill() expected = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01'), Timestamp('20130103 9:01:01')]) assert_series_equal(result, expected) # GH 6587 # make sure that we are treating as integer when filling # this also tests inference of a datetime-like with NaT's s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001']) expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]') result = s.fillna(method='backfill') assert_series_equal(result, expected) def test_datetime64_tz_fillna(self): for tz in ['US/Eastern', 'Asia/Tokyo']: # DatetimeBlock s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-03 10:00'), pd.NaT]) result = s.fillna(pd.Timestamp('2011-01-02 10:00')) expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp('2011-01-02 10:00')]) self.assert_series_equal(expected, result) result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz)) expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'), Timestamp('2011-01-02 10:00', tz=tz)]) self.assert_series_equal(expected, result) result = s.fillna('AAA') expected = Series([Timestamp('2011-01-01 10:00'), 'AAA', Timestamp('2011-01-03 10:00'), 'AAA'], dtype=object) self.assert_series_equal(expected, result) result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz), 3: pd.Timestamp('2011-01-04 10:00')}) expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'), Timestamp('2011-01-04 10:00')]) self.assert_series_equal(expected, result) result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'), 3: pd.Timestamp('2011-01-04 10:00')}) expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp('2011-01-04 10:00')]) self.assert_series_equal(expected, result) # DatetimeBlockTZ idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT, '2011-01-03 10:00', pd.NaT], tz=tz) s = pd.Series(idx) result = s.fillna(pd.Timestamp('2011-01-02 10:00')) expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp('2011-01-02 10:00'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp('2011-01-02 10:00')]) self.assert_series_equal(expected, result) result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz)) idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00', '2011-01-03 10:00', '2011-01-02 10:00'], tz=tz) expected = Series(idx) self.assert_series_equal(expected, result) result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz).to_pydatetime()) idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00', '2011-01-03 10:00', '2011-01-02 10:00'], tz=tz) expected = Series(idx) self.assert_series_equal(expected, result) result = s.fillna('AAA') expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA', Timestamp('2011-01-03 10:00', tz=tz), 'AAA'], dtype=object) self.assert_series_equal(expected, result) result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz), 3: pd.Timestamp('2011-01-04 10:00')}) expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp('2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00')]) self.assert_series_equal(expected, result) result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz), 3: pd.Timestamp('2011-01-04 10:00', tz=tz)}) expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp('2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00', tz=tz), Timestamp('2011-01-04 10:00', tz=tz)]) self.assert_series_equal(expected, result) # filling with a naive/other zone, coerce to object result = s.fillna(Timestamp('20130101')) expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp('2013-01-01'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp('2013-01-01')]) self.assert_series_equal(expected, result) result = s.fillna(Timestamp('20130101',tz='US/Pacific')) expected = Series([Timestamp('2011-01-01 10:00', tz=tz), Timestamp('2013-01-01',tz='US/Pacific'), Timestamp('2011-01-03 10:00', tz=tz), Timestamp('2013-01-01',tz='US/Pacific')]) self.assert_series_equal(expected, result) def test_fillna_int(self): s = Series(np.random.randint(-100, 100, 50)) s.fillna(method='ffill', inplace=True) assert_series_equal(s.fillna(method='ffill', inplace=False), s) def test_fillna_raise(self): s = Series(np.random.randint(-100, 100, 50)) self.assertRaises(TypeError, s.fillna, [1, 2]) self.assertRaises(TypeError, s.fillna, (1, 2)) def test_raise_on_info(self): s = Series(np.random.randn(10)) with tm.assertRaises(AttributeError): s.info() def test_isnull_for_inf(self): s = Series(['a', np.inf, np.nan, 1.0]) with pd.option_context('mode.use_inf_as_null', True): r = s.isnull() dr = s.dropna() e = Series([False, True, True, False]) de = Series(['a', 1.0], index=[0, 3]) tm.assert_series_equal(r, e) tm.assert_series_equal(dr, de) # TimeSeries-specific def test_fillna(self): ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5)) self.assert_numpy_array_equal(ts, ts.fillna(method='ffill')) ts[2] = np.NaN self.assert_numpy_array_equal(ts.fillna(method='ffill'), [0., 1., 1., 3., 4.]) self.assert_numpy_array_equal(ts.fillna(method='backfill'), [0., 1., 3., 3., 4.]) self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.]) self.assertRaises(ValueError, ts.fillna) self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill') # GH 5703 s1 = Series([np.nan]) s2 = Series([1]) result = s1.fillna(s2) expected = Series([1.]) assert_series_equal(result,expected) result = s1.fillna({}) assert_series_equal(result,s1) result = s1.fillna(Series(())) assert_series_equal(result,s1) result = s2.fillna(s1) assert_series_equal(result,s2) result = s1.fillna({ 0 : 1}) assert_series_equal(result,expected) result = s1.fillna({ 1 : 1}) assert_series_equal(result,Series([np.nan])) result = s1.fillna({ 0 : 1, 1 : 1}) assert_series_equal(result,expected) result = s1.fillna(Series({ 0 : 1, 1 : 1})) assert_series_equal(result,expected) result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5])) assert_series_equal(result,s1) s1 = Series([0, 1, 2], list('abc')) s2 = Series([0, np.nan, 2], list('bac')) result = s2.fillna(s1) expected = Series([0,0,2.], list('bac')) assert_series_equal(result,expected) # limit s = Series(np.nan,index=[0,1,2]) result = s.fillna(999,limit=1) expected = Series([999,np.nan,np.nan],index=[0,1,2]) assert_series_equal(result,expected) result = s.fillna(999,limit=2) expected = Series([999,999,np.nan],index=[0,1,2]) assert_series_equal(result,expected) # GH 9043 # make sure a string representation of int/float values can be filled # correctly without raising errors or being converted vals = ['0', '1.5', '-0.3'] for val in vals: s = Series([0, 1, np.nan, np.nan, 4], dtype='float64') result = s.fillna(val) expected = Series([0, 1, val, val, 4], dtype='object') assert_series_equal(result, expected) def test_fillna_bug(self): x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd']) filled = x.fillna(method='ffill') expected = Series([nan, 1., 1., 3., 3.], x.index) assert_series_equal(filled, expected) filled = x.fillna(method='bfill') expected = Series([1., 1., 3., 3., nan], x.index) assert_series_equal(filled, expected) def test_fillna_inplace(self): x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd']) y = x.copy() y.fillna(value=0, inplace=True) expected = x.fillna(value=0) assert_series_equal(y, expected) def test_fillna_invalid_method(self): try: self.ts.fillna(method='ffil') except ValueError as inst: self.assertIn('ffil', str(inst)) def test_ffill(self): ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5)) ts[2] = np.NaN assert_series_equal(ts.ffill(), ts.fillna(method='ffill')) def test_bfill(self): ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5)) ts[2] = np.NaN assert_series_equal(ts.bfill(), ts.fillna(method='bfill')) def test_sub_of_datetime_from_TimeSeries(self): from pandas.tseries.timedeltas import to_timedelta from datetime import datetime a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00)) b = datetime(1993, 6, 22, 13, 30) a = Series([a]) result = to_timedelta(np.abs(a - b)) self.assertEqual(result.dtype, 'timedelta64[ns]') def test_datetime64_with_index(self): # arithmetic integer ops with an index s = Series(np.random.randn(5)) expected = s - s.index.to_series() result = s - s.index assert_series_equal(result, expected) # GH 4629 # arithmetic datetime64 ops with an index s = Series(date_range('20130101', periods=5), index=date_range('20130101', periods=5)) expected = s - s.index.to_series() result = s - s.index assert_series_equal(result, expected) result = s - s.index.to_period() assert_series_equal(result, expected) df = DataFrame(np.random.randn(5,2), index=date_range('20130101', periods=5)) df['date'] = Timestamp('20130102') df['expected'] = df['date'] - df.index.to_series() df['result'] = df['date'] - df.index assert_series_equal(df['result'], df['expected'], check_names=False) def test_timedelta64_nan(self): from pandas import tslib td = Series([timedelta(days=i) for i in range(10)]) # nan ops on timedeltas td1 = td.copy() td1[0] = np.nan self.assertTrue(isnull(td1[0])) self.assertEqual(td1[0].value, tslib.iNaT) td1[0] = td[0] self.assertFalse(isnull(td1[0])) td1[1] = tslib.iNaT self.assertTrue(isnull(td1[1])) self.assertEqual(td1[1].value, tslib.iNaT) td1[1] = td[1] self.assertFalse(isnull(td1[1])) td1[2] = tslib.NaT self.assertTrue(isnull(td1[2])) self.assertEqual(td1[2].value, tslib.iNaT) td1[2] = td[2] self.assertFalse(isnull(td1[2])) # boolean setting # this doesn't work, not sure numpy even supports it #result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan #self.assertEqual(isnull(result).sum(), 7) # NumPy limitiation =( # def test_logical_range_select(self): # np.random.seed(12345) # selector = -0.5 <= self.ts <= 0.5 # expected = (self.ts >= -0.5) & (self.ts <= 0.5) # assert_series_equal(selector, expected) def test_operators_na_handling(self): from decimal import Decimal from datetime import date s = Series([Decimal('1.3'), Decimal('2.3')], index=[date(2012, 1, 1), date(2012, 1, 2)]) result = s + s.shift(1) result2 = s.shift(1) + s self.assertTrue(isnull(result[0])) self.assertTrue(isnull(result2[0])) s = Series(['foo', 'bar', 'baz', np.nan]) result = 'prefix_' + s expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan]) assert_series_equal(result, expected) result = s + '_suffix' expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan]) assert_series_equal(result, expected) def test_object_comparisons(self): s = Series(['a', 'b', np.nan, 'c', 'a']) result = s == 'a' expected = Series([True, False, False, False, True]) assert_series_equal(result, expected) result = s < 'a' expected = Series([False, False, False, False, False]) assert_series_equal(result, expected) result = s != 'a' expected = -(s == 'a') assert_series_equal(result, expected) def test_comparison_tuples(self): # GH11339 # comparisons vs tuple s = Series([(1,1),(1,2)]) result = s == (1,2) expected = Series([False,True]) assert_series_equal(result, expected) result = s != (1,2) expected = Series([True, False]) assert_series_equal(result, expected) result = s == (0,0) expected = Series([False, False]) assert_series_equal(result, expected) result = s != (0,0) expected = Series([True, True]) assert_series_equal(result, expected) s = Series([(1,1),(1,1)]) result = s == (1,1) expected = Series([True, True]) assert_series_equal(result, expected) result = s != (1,1) expected = Series([False, False]) assert_series_equal(result, expected) s = Series([frozenset([1]),frozenset([1,2])]) result = s == frozenset([1]) expected = Series([True, False]) assert_series_equal(result, expected) def test_comparison_operators_with_nas(self): s = Series(bdate_range('1/1/2000', periods=10), dtype=object) s[::2] = np.nan # test that comparisons work ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] for op in ops: val = s[5] f = getattr(operator, op) result = f(s, val) expected = f(s.dropna(), val).reindex(s.index) if op == 'ne': expected = expected.fillna(True).astype(bool) else: expected = expected.fillna(False).astype(bool) assert_series_equal(result, expected) # fffffffuuuuuuuuuuuu # result = f(val, s) # expected = f(val, s.dropna()).reindex(s.index) # assert_series_equal(result, expected) # boolean &, |, ^ should work with object arrays and propagate NAs ops = ['and_', 'or_', 'xor'] mask = s.isnull() for bool_op in ops: f = getattr(operator, bool_op) filled = s.fillna(s[0]) result = f(s < s[9], s > s[3]) expected = f(filled < filled[9], filled > filled[3]) expected[mask] = False assert_series_equal(result, expected) def test_comparison_object_numeric_nas(self): s = Series(np.random.randn(10), dtype=object) shifted = s.shift(2) ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] for op in ops: f = getattr(operator, op) result = f(s, shifted) expected = f(s.astype(float), shifted.astype(float)) assert_series_equal(result, expected) def test_comparison_invalid(self): # GH4968 # invalid date/int comparisons s = Series(range(5)) s2 = Series(date_range('20010101', periods=5)) for (x, y) in [(s,s2),(s2,s)]: self.assertRaises(TypeError, lambda : x == y) self.assertRaises(TypeError, lambda : x != y) self.assertRaises(TypeError, lambda : x >= y) self.assertRaises(TypeError, lambda : x > y) self.assertRaises(TypeError, lambda : x < y) self.assertRaises(TypeError, lambda : x <= y) def test_more_na_comparisons(self): left = Series(['a', np.nan, 'c']) right = Series(['a', np.nan, 'd']) result = left == right expected = Series([True, False, False]) assert_series_equal(result, expected) result = left != right expected = Series([False, True, True]) assert_series_equal(result, expected) result = left == np.nan expected = Series([False, False, False]) assert_series_equal(result, expected) result = left != np.nan expected = Series([True, True, True]) assert_series_equal(result, expected) def test_comparison_different_length(self): a = Series(['a', 'b', 'c']) b = Series(['b', 'a']) self.assertRaises(ValueError, a.__lt__, b) a = Series([1, 2]) b = Series([2, 3, 4]) self.assertRaises(ValueError, a.__eq__, b) def test_comparison_label_based(self): # GH 4947 # comparisons should be label based a = Series([True, False, True], list('bca')) b = Series([False, True, False], list('abc')) expected = Series([True, False, False], list('bca')) result = a & b assert_series_equal(result,expected) expected = Series([True, False, True], list('bca')) result = a | b assert_series_equal(result,expected) expected = Series([False, False, True], list('bca')) result = a ^ b assert_series_equal(result,expected) # rhs is bigger a = Series([True, False, True], list('bca')) b = Series([False, True, False, True], list('abcd')) expected = Series([True, False, False], list('bca')) result = a & b assert_series_equal(result,expected) expected = Series([True, False, True], list('bca')) result = a | b assert_series_equal(result,expected) # filling # vs empty result = a & Series([]) expected = Series([False, False, False], list('bca')) assert_series_equal(result,expected) result = a | Series([]) expected = Series([True, False, True], list('bca')) assert_series_equal(result,expected) # vs non-matching result = a & Series([1],['z']) expected = Series([False, False, False], list('bca')) assert_series_equal(result,expected) result = a | Series([1],['z']) expected = Series([True, False, True], list('bca')) assert_series_equal(result,expected) # identity # we would like s[s|e] == s to hold for any e, whether empty or not for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]: result = a[a | e] assert_series_equal(result,a[a]) # vs scalars index = list('bca') t = Series([True,False,True]) for v in [True,1,2]: result = Series([True,False,True],index=index) | v expected = Series([True,True,True],index=index) assert_series_equal(result,expected) for v in [np.nan,'foo']: self.assertRaises(TypeError, lambda : t | v) for v in [False,0]: result = Series([True,False,True],index=index) | v expected = Series([True,False,True],index=index) assert_series_equal(result,expected) for v in [True,1]: result = Series([True,False,True],index=index) & v expected = Series([True,False,True],index=index) assert_series_equal(result,expected) for v in [False,0]: result = Series([True,False,True],index=index) & v expected = Series([False,False,False],index=index) assert_series_equal(result,expected) for v in [np.nan]: self.assertRaises(TypeError, lambda : t & v) def test_operators_bitwise(self): # GH 9016: support bitwise op for integer types index = list('bca') s_tft = Series([True, False, True], index=index) s_fff = Series([False, False, False], index=index) s_tff = Series([True, False, False], index=index) s_empty = Series([]) s_0101 = Series([0,1,0,1]) s_0123 = Series(range(4),dtype='int64') s_3333 = Series([3] * 4) s_4444 = Series([4] * 4) res = s_tft & s_empty expected = s_fff assert_series_equal(res, expected) res = s_tft | s_empty expected = s_tft assert_series_equal(res, expected) res = s_0123 & s_3333 expected = Series(range(4),dtype='int64') assert_series_equal(res, expected) res = s_0123 | s_4444 expected = Series(range(4, 8),dtype='int64') assert_series_equal(res, expected) s_a0b1c0 = Series([1], list('b')) res = s_tft & s_a0b1c0 expected = s_tff assert_series_equal(res, expected) res = s_tft | s_a0b1c0 expected = s_tft assert_series_equal(res, expected) n0 = 0 res = s_tft & n0 expected = s_fff assert_series_equal(res, expected) res = s_0123 & n0 expected = Series([0] * 4) assert_series_equal(res, expected) n1 = 1 res = s_tft & n1 expected = s_tft assert_series_equal(res, expected) res = s_0123 & n1 expected = Series([0, 1, 0, 1]) assert_series_equal(res, expected) s_1111 = Series([1]*4, dtype='int8') res = s_0123 & s_1111 expected = Series([0, 1, 0, 1], dtype='int64') assert_series_equal(res, expected) res = s_0123.astype(np.int16) | s_1111.astype(np.int32) expected = Series([1, 1, 3, 3], dtype='int32') assert_series_equal(res, expected) self.assertRaises(TypeError, lambda: s_1111 & 'a') self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d']) self.assertRaises(TypeError, lambda: s_0123 & np.NaN) self.assertRaises(TypeError, lambda: s_0123 & 3.14) self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2]) # s_0123 will be all false now because of reindexing like s_tft assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca'))) # s_tft will be all false now because of reindexing like s_0123 assert_series_equal(s_0123 & s_tft, Series([False] * 4)) assert_series_equal(s_0123 & False, Series([False] * 4)) assert_series_equal(s_0123 ^ False, Series([False, True, True, True])) assert_series_equal(s_0123 & [False], Series([False] * 4)) assert_series_equal(s_0123 & (False), Series([False] * 4)) assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4)) s_ftft = Series([False, True, False, True]) assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft) s_abNd = Series(['a','b',np.NaN,'d']) res = s_0123 & s_abNd expected = s_ftft assert_series_equal(res, expected) def test_between(self): s = Series(bdate_range('1/1/2000', periods=20).asobject) s[::2] = np.nan result = s[s.between(s[3], s[17])] expected = s[3:18].dropna() assert_series_equal(result, expected) result = s[s.between(s[3], s[17], inclusive=False)] expected = s[5:16].dropna() assert_series_equal(result, expected) def test_setitem_na(self): # these induce dtype changes expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan]) s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) s[::2] = np.nan assert_series_equal(s, expected) # get's coerced to float, right? expected = Series([np.nan, 1, np.nan, 0]) s = Series([True, True, False, False]) s[::2] = np.nan assert_series_equal(s, expected) expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9]) s = Series(np.arange(10)) s[:5] = np.nan assert_series_equal(s, expected) def test_scalar_na_cmp_corners(self): s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) def tester(a, b): return a & b self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1)) s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)]) s[::2] = np.nan expected = Series(True,index=s.index) expected[::2] = False assert_series_equal(tester(s, list(s)), expected) d = DataFrame({'A': s}) # TODO: Fix this exception - needs to be fixed! (see GH5035) # (previously this was a TypeError because series returned # NotImplemented self.assertRaises(ValueError, tester, s, d) def test_idxmin(self): # test idxmin # _check_stat_op approach can not be used here because of isnull check. # add some NaNs self.series[5:15] = np.NaN # skipna or no self.assertEqual(self.series[self.series.idxmin()], self.series.min()) self.assertTrue(isnull(self.series.idxmin(skipna=False))) # no NaNs nona = self.series.dropna() self.assertEqual(nona[nona.idxmin()], nona.min()) self.assertEqual(nona.index.values.tolist().index(nona.idxmin()), nona.values.argmin()) # all NaNs allna = self.series * nan self.assertTrue(isnull(allna.idxmin())) # datetime64[ns] from pandas import date_range s = Series(date_range('20130102', periods=6)) result = s.idxmin() self.assertEqual(result, 0) s[0] = np.nan result = s.idxmin() self.assertEqual(result, 1) def test_idxmax(self): # test idxmax # _check_stat_op approach can not be used here because of isnull check. # add some NaNs self.series[5:15] = np.NaN # skipna or no self.assertEqual(self.series[self.series.idxmax()], self.series.max()) self.assertTrue(isnull(self.series.idxmax(skipna=False))) # no NaNs nona = self.series.dropna() self.assertEqual(nona[nona.idxmax()], nona.max()) self.assertEqual(nona.index.values.tolist().index(nona.idxmax()), nona.values.argmax()) # all NaNs allna = self.series * nan self.assertTrue(isnull(allna.idxmax())) from pandas import date_range s = Series(date_range('20130102', periods=6)) result = s.idxmax() self.assertEqual(result, 5) s[5] = np.nan result = s.idxmax() self.assertEqual(result, 4) # Float64Index # GH 5914 s = pd.Series([1,2,3],[1.1,2.1,3.1]) result = s.idxmax() self.assertEqual(result, 3.1) result = s.idxmin() self.assertEqual(result, 1.1) s = pd.Series(s.index, s.index) result = s.idxmax() self.assertEqual(result, 3.1) result = s.idxmin() self.assertEqual(result, 1.1) def test_ndarray_compat(self): # test numpy compat with Series as sub-class of NDFrame tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'], index=date_range('1/1/2000', periods=1000)) def f(x): return x[x.argmax()] result = tsdf.apply(f) expected = tsdf.max() assert_series_equal(result,expected) # .item() s = Series([1]) result = s.item() self.assertEqual(result, 1) self.assertEqual(s.item(), s.iloc[0]) # using an ndarray like function s = Series(np.random.randn(10)) result = np.ones_like(s) expected = Series(1,index=range(10),dtype='float64') #assert_series_equal(result,expected) # ravel s = Series(np.random.randn(10)) tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F')) # compress # GH 6658 s = Series([0, 1., -1], index=list('abc')) result = np.compress(s > 0, s) assert_series_equal(result, Series([1.], index=['b'])) result = np.compress(s < -1, s) # result empty Index(dtype=object) as the same as original exp = Series([], dtype='float64', index=Index([], dtype='object')) assert_series_equal(result, exp) s = Series([0, 1., -1], index=[.1, .2, .3]) result = np.compress(s > 0, s) assert_series_equal(result, Series([1.], index=[.2])) result = np.compress(s < -1, s) # result empty Float64Index as the same as original exp = Series([], dtype='float64', index=Index([], dtype='float64')) assert_series_equal(result, exp) def test_complexx(self): # GH4819 # complex access for ndarray compat a = np.arange(5) b = Series(a + 4j*a) tm.assert_almost_equal(a,b.real) tm.assert_almost_equal(4*a,b.imag) b.real = np.arange(5)+5 tm.assert_almost_equal(a+5,b.real) tm.assert_almost_equal(4*a,b.imag) def test_underlying_data_conversion(self): # GH 4080 df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c'])) df.set_index(['a', 'b', 'c'], inplace=True) s = Series([1], index=[(2,2,2)]) df['val'] = 0 df df['val'].update(s) expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0])) expected.set_index(['a', 'b', 'c'], inplace=True) tm.assert_frame_equal(df,expected) # GH 3970 # these are chained assignments as well pd.set_option('chained_assignment',None) df = DataFrame({ "aa":range(5), "bb":[2.2]*5}) df["cc"] = 0.0 ck = [True]*len(df) df["bb"].iloc[0] = .13 df_tmp = df.iloc[ck] df["bb"].iloc[0] = .15 self.assertEqual(df['bb'].iloc[0], 0.15) pd.set_option('chained_assignment','raise') # GH 3217 df = DataFrame(dict(a = [1,3], b = [np.nan, 2])) df['c'] = np.nan df['c'].update(pd.Series(['foo'],index=[0])) expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan])) tm.assert_frame_equal(df,expected) def test_operators_corner(self): series = self.ts empty = Series([], index=Index([])) result = series + empty self.assertTrue(np.isnan(result).all()) result = empty + Series([], index=Index([])) self.assertEqual(len(result), 0) # TODO: this returned NotImplemented earlier, what to do? # deltas = Series([timedelta(1)] * 5, index=np.arange(5)) # sub_deltas = deltas[::2] # deltas5 = deltas * 5 # deltas = deltas + sub_deltas # float + int int_ts = self.ts.astype(int)[:-5] added = self.ts + int_ts expected = self.ts.values[:-5] + int_ts.values self.assert_numpy_array_equal(added[:-5], expected) def test_operators_reverse_object(self): # GH 56 arr = Series(np.random.randn(10), index=np.arange(10), dtype=object) def _check_op(arr, op): result = op(1., arr) expected = op(1., arr.astype(float)) assert_series_equal(result.astype(float), expected) _check_op(arr, operator.add) _check_op(arr, operator.sub) _check_op(arr, operator.mul) _check_op(arr, operator.truediv) _check_op(arr, operator.floordiv) def test_series_frame_radd_bug(self): import operator # GH 353 vals = Series(tm.rands_array(5, 10)) result = 'foo_' + vals expected = vals.map(lambda x: 'foo_' + x) assert_series_equal(result, expected) frame = DataFrame({'vals': vals}) result = 'foo_' + frame expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)}) tm.assert_frame_equal(result, expected) # really raise this time self.assertRaises(TypeError, operator.add, datetime.now(), self.ts) def test_operators_frame(self): # rpow does not work with DataFrame df = DataFrame({'A': self.ts}) tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A']) tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A']) tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A']) tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A']) def test_operators_combine(self): def _check_fill(meth, op, a, b, fill_value=0): exp_index = a.index.union(b.index) a = a.reindex(exp_index) b = b.reindex(exp_index) amask = isnull(a) bmask = isnull(b) exp_values = [] for i in range(len(exp_index)): if amask[i]: if bmask[i]: exp_values.append(nan) continue exp_values.append(op(fill_value, b[i])) elif bmask[i]: if amask[i]: exp_values.append(nan) continue exp_values.append(op(a[i], fill_value)) else: exp_values.append(op(a[i], b[i])) result = meth(a, b, fill_value=fill_value) expected = Series(exp_values, exp_index) assert_series_equal(result, expected) a = Series([nan, 1., 2., 3., nan], index=np.arange(5)) b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6)) pairings = [] for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']: fv = 0 lop = getattr(Series, op) lequiv = getattr(operator, op) rop = getattr(Series, 'r' + op) # bind op at definition time... requiv = lambda x, y, op=op: getattr(operator, op)(y, x) pairings.append((lop, lequiv, fv)) pairings.append((rop, requiv, fv)) if compat.PY3: pairings.append((Series.div, operator.truediv, 1)) pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1)) else: pairings.append((Series.div, operator.div, 1)) pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1)) for op, equiv_op, fv in pairings: result = op(a, b) exp = equiv_op(a, b) assert_series_equal(result, exp) _check_fill(op, equiv_op, a, b, fill_value=fv) # should accept axis=0 or axis='rows' op(a, b, axis=0) def test_combine_first(self): values = tm.makeIntIndex(20).values.astype(float) series = Series(values, index=tm.makeIntIndex(20)) series_copy = series * 2 series_copy[::2] = np.NaN # nothing used from the input combined = series.combine_first(series_copy) self.assert_numpy_array_equal(combined, series) # Holes filled from input combined = series_copy.combine_first(series) self.assertTrue(np.isfinite(combined).all()) self.assert_numpy_array_equal(combined[::2], series[::2]) self.assert_numpy_array_equal(combined[1::2], series_copy[1::2]) # mixed types index = tm.makeStringIndex(20) floats = Series(tm.randn(20), index=index) strings = Series(tm.makeStringIndex(10), index=index[::2]) combined = strings.combine_first(floats) tm.assert_dict_equal(strings, combined, compare_keys=False) tm.assert_dict_equal(floats[1::2], combined, compare_keys=False) # corner case s = Series([1., 2, 3], index=[0, 1, 2]) result = s.combine_first(Series([], index=[])) assert_series_equal(s, result) def test_update(self): s = Series([1.5, nan, 3., 4., nan]) s2 = Series([nan, 3.5, nan, 5.]) s.update(s2) expected = Series([1.5, 3.5, 3., 5., np.nan]) assert_series_equal(s, expected) # GH 3217 df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) df['c'] = np.nan # this will fail as long as series is a sub-class of ndarray # df['c'].update(Series(['foo'],index=[0])) ##### def test_corr(self): tm._skip_if_no_scipy() import scipy.stats as stats # full overlap self.assertAlmostEqual(self.ts.corr(self.ts), 1) # partial overlap self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1) self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12))) ts1 = self.ts[:15].reindex(self.ts.index) ts2 = self.ts[5:].reindex(self.ts.index) self.assertTrue(isnull(ts1.corr(ts2, min_periods=12))) # No overlap self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2]))) # all NA cp = self.ts[:10].copy() cp[:] = np.nan self.assertTrue(isnull(cp.corr(cp))) A = tm.makeTimeSeries() B = tm.makeTimeSeries() result = A.corr(B) expected, _ = stats.pearsonr(A, B) self.assertAlmostEqual(result, expected) def test_corr_rank(self): tm._skip_if_no_scipy() import scipy import scipy.stats as stats # kendall and spearman A = tm.makeTimeSeries() B = tm.makeTimeSeries() A[-5:] = A[:5] result = A.corr(B, method='kendall') expected = stats.kendalltau(A, B)[0] self.assertAlmostEqual(result, expected) result = A.corr(B, method='spearman') expected = stats.spearmanr(A, B)[0] self.assertAlmostEqual(result, expected) # these methods got rewritten in 0.8 if scipy.__version__ < LooseVersion('0.9'): raise nose.SkipTest("skipping corr rank because of scipy version " "{0}".format(scipy.__version__)) # results from R A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606]) B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292, 1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375]) kexp = 0.4319297 sexp = 0.5853767 self.assertAlmostEqual(A.corr(B, method='kendall'), kexp) self.assertAlmostEqual(A.corr(B, method='spearman'), sexp) def test_cov(self): # full overlap self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2) # partial overlap self.assertAlmostEqual( self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2) # No overlap self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2]))) # all NA cp = self.ts[:10].copy() cp[:] = np.nan self.assertTrue(isnull(cp.cov(cp))) # min_periods self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12))) ts1 = self.ts[:15].reindex(self.ts.index) ts2 = self.ts[5:].reindex(self.ts.index) self.assertTrue(isnull(ts1.cov(ts2, min_periods=12))) def test_copy(self): ts = self.ts.copy() ts[::2] = np.NaN # Did not modify original Series self.assertFalse(np.isnan(self.ts[0])) def test_count(self): self.assertEqual(self.ts.count(), len(self.ts)) self.ts[::2] = np.NaN self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum()) mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]]) ts = Series(np.arange(len(mi)), index=mi) left = ts.count(level=1) right = Series([2, 3, 1], index=[1, 2, nan]) assert_series_equal(left, right) ts.iloc[[0, 3, 5]] = nan assert_series_equal(ts.count(level=1), right - 1) def test_dtype(self): self.assertEqual(self.ts.dtype, np.dtype('float64')) self.assertEqual(self.ts.dtypes, np.dtype('float64')) self.assertEqual(self.ts.ftype, 'float64:dense') self.assertEqual(self.ts.ftypes, 'float64:dense') assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64'])) assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense'])) def test_dot(self): a = Series(np.random.randn(4), index=['p', 'q', 'r', 's']) b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'], columns=['p', 'q', 'r', 's']).T result = a.dot(b) expected = Series(np.dot(a.values, b.values), index=['1', '2', '3']) assert_series_equal(result, expected) # Check index alignment b2 = b.reindex(index=reversed(b.index)) result = a.dot(b) assert_series_equal(result, expected) # Check ndarray argument result = a.dot(b.values) self.assertTrue(np.all(result == expected.values)) assert_almost_equal(a.dot(b['2'].values), expected['2']) # Check series argument assert_almost_equal(a.dot(b['1']), expected['1']) assert_almost_equal(a.dot(b2['1']), expected['1']) self.assertRaises(Exception, a.dot, a.values[:3]) self.assertRaises(ValueError, a.dot, b.T) def test_value_counts_nunique(self): # basics.rst doc example series = Series(np.random.randn(500)) series[20:500] = np.nan series[10:20] = 5000 result = series.nunique() self.assertEqual(result, 11) def test_unique(self): # 714 also, dtype=float s = Series([1.2345] * 100) s[::2] = np.nan result = s.unique() self.assertEqual(len(result), 2) s = Series([1.2345] * 100, dtype='f4') s[::2] = np.nan result = s.unique() self.assertEqual(len(result), 2) # NAs in object arrays #714 s = Series(['foo'] * 100, dtype='O') s[::2] = np.nan result = s.unique() self.assertEqual(len(result), 2) # decision about None s = Series([1, 2, 3, None, None, None], dtype=object) result = s.unique() expected = np.array([1, 2, 3, None], dtype=object) self.assert_numpy_array_equal(result, expected) def test_dropna_empty(self): s = Series([]) self.assertEqual(len(s.dropna()), 0) s.dropna(inplace=True) self.assertEqual(len(s), 0) # invalid axis self.assertRaises(ValueError, s.dropna, axis=1) def test_datetime64_tz_dropna(self): # DatetimeBlock s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp('2011-01-03 10:00'), pd.NaT]) result = s.dropna() expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-03 10:00')], index=[0, 2]) self.assert_series_equal(result, expected) # DatetimeBlockTZ idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT, '2011-01-03 10:00', pd.NaT], tz='Asia/Tokyo') s = pd.Series(idx) self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]') result = s.dropna() expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'), Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')], index=[0, 2]) self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]') self.assert_series_equal(result, expected) def test_dropna_no_nan(self): for s in [Series([1, 2, 3], name='x'), Series([False, True, False], name='x')]: result = s.dropna() self.assert_series_equal(result, s) self.assertFalse(result is s) s2 = s.copy() s2.dropna(inplace=True) self.assert_series_equal(s2, s) def test_axis_alias(self): s = Series([1, 2, np.nan]) assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index')) self.assertEqual(s.dropna().sum('rows'), 3) self.assertEqual(s._get_axis_number('rows'), 0) self.assertEqual(s._get_axis_name('rows'), 'index') def test_drop_duplicates(self): # check both int and object for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]: expected = Series([False, False, False, True]) assert_series_equal(s.duplicated(), expected) assert_series_equal(s.drop_duplicates(), s[~expected]) sc = s.copy() sc.drop_duplicates(inplace=True) assert_series_equal(sc, s[~expected]) expected = Series([False, False, True, False]) assert_series_equal(s.duplicated(keep='last'), expected) assert_series_equal(s.drop_duplicates(keep='last'), s[~expected]) sc = s.copy() sc.drop_duplicates(keep='last', inplace=True) assert_series_equal(sc, s[~expected]) # deprecate take_last with tm.assert_produces_warning(FutureWarning): assert_series_equal(s.duplicated(take_last=True), expected) with tm.assert_produces_warning(FutureWarning): assert_series_equal(s.drop_duplicates(take_last=True), s[~expected]) sc = s.copy() with tm.assert_produces_warning(FutureWarning): sc.drop_duplicates(take_last=True, inplace=True) assert_series_equal(sc, s[~expected]) expected = Series([False, False, True, True]) assert_series_equal(s.duplicated(keep=False), expected) assert_series_equal(s.drop_duplicates(keep=False), s[~expected]) sc = s.copy() sc.drop_duplicates(keep=False, inplace=True) assert_series_equal(sc, s[~expected]) for s in [Series([1, 2, 3, 5, 3, 2, 4]), Series(['1', '2', '3', '5', '3', '2', '4'])]: expected = Series([False, False, False, False, True, True, False]) assert_series_equal(s.duplicated(), expected) assert_series_equal(s.drop_duplicates(), s[~expected]) sc = s.copy() sc.drop_duplicates(inplace=True) assert_series_equal(sc, s[~expected]) expected = Series([False, True, True, False, False, False, False]) assert_series_equal(s.duplicated(keep='last'), expected) assert_series_equal(s.drop_duplicates(keep='last'), s[~expected]) sc = s.copy() sc.drop_duplicates(keep='last', inplace=True) assert_series_equal(sc, s[~expected]) # deprecate take_last with tm.assert_produces_warning(FutureWarning): assert_series_equal(s.duplicated(take_last=True), expected) with tm.assert_produces_warning(FutureWarning): assert_series_equal(s.drop_duplicates(take_last=True), s[~expected]) sc = s.copy() with tm.assert_produces_warning(FutureWarning): sc.drop_duplicates(take_last=True, inplace=True) assert_series_equal(sc, s[~expected]) expected = Series([False, True, True, False, True, True, False]) assert_series_equal(s.duplicated(keep=False), expected) assert_series_equal(s.drop_duplicates(keep=False), s[~expected]) sc = s.copy() sc.drop_duplicates(keep=False, inplace=True) assert_series_equal(sc, s[~expected]) def test_sort_values(self): ts = self.ts.copy() # 9816 deprecated with tm.assert_produces_warning(FutureWarning): ts.sort() self.assert_numpy_array_equal(ts, self.ts.sort_values()) self.assert_numpy_array_equal(ts.index, self.ts.sort_values().index) ts.sort_values(ascending=False, inplace=True) self.assert_numpy_array_equal(ts, self.ts.sort_values(ascending=False)) self.assert_numpy_array_equal(ts.index, self.ts.sort_values(ascending=False).index) # GH 5856/5853 # Series.sort_values operating on a view df = DataFrame(np.random.randn(10,4)) s = df.iloc[:,0] def f(): s.sort_values(inplace=True) self.assertRaises(ValueError, f) # test order/sort inplace # GH6859 ts1 = self.ts.copy() ts1.sort_values(ascending=False, inplace=True) ts2 = self.ts.copy() ts2.sort_values(ascending=False, inplace=True) assert_series_equal(ts1,ts2) ts1 = self.ts.copy() ts1 = ts1.sort_values(ascending=False, inplace=False) ts2 = self.ts.copy() ts2 = ts.sort_values(ascending=False) assert_series_equal(ts1,ts2) def test_sort_index(self): rindex = list(self.ts.index) random.shuffle(rindex) random_order = self.ts.reindex(rindex) sorted_series = random_order.sort_index() assert_series_equal(sorted_series, self.ts) # descending sorted_series = random_order.sort_index(ascending=False) assert_series_equal(sorted_series, self.ts.reindex(self.ts.index[::-1])) def test_sort_index_inplace(self): # For #11402 rindex = list(self.ts.index) random.shuffle(rindex) # descending random_order = self.ts.reindex(rindex) result = random_order.sort_index(ascending=False, inplace=True) self.assertIs(result, None, msg='sort_index() inplace should return None') assert_series_equal(random_order, self.ts.reindex(self.ts.index[::-1])) # ascending random_order = self.ts.reindex(rindex) result = random_order.sort_index(ascending=True, inplace=True) self.assertIs(result, None, msg='sort_index() inplace should return None') assert_series_equal(random_order, self.ts) def test_sort_API(self): # API for 9816 # sortlevel mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] res = s.sort_index(level='A') assert_series_equal(backwards, res) # sort_index rindex = list(self.ts.index) random.shuffle(rindex) random_order = self.ts.reindex(rindex) sorted_series = random_order.sort_index(level=0) assert_series_equal(sorted_series, self.ts) # compat on axis sorted_series = random_order.sort_index(axis=0) assert_series_equal(sorted_series, self.ts) self.assertRaises(ValueError, lambda : random_order.sort_values(axis=1)) sorted_series = random_order.sort_index(level=0, axis=0) assert_series_equal(sorted_series, self.ts) self.assertRaises(ValueError, lambda : random_order.sort_index(level=0, axis=1)) def test_order(self): # 9816 deprecated with tm.assert_produces_warning(FutureWarning): self.ts.order() ts = self.ts.copy() ts[:5] = np.NaN vals = ts.values result = ts.sort_values() self.assertTrue(np.isnan(result[-5:]).all()) self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:])) result = ts.sort_values(na_position='first') self.assertTrue(np.isnan(result[:5]).all()) self.assert_numpy_array_equal(result[5:], np.sort(vals[5:])) # something object-type ser = Series(['A', 'B'], [1, 2]) # no failure ser.sort_values() # ascending=False ordered = ts.sort_values(ascending=False) expected = np.sort(ts.valid().values)[::-1] assert_almost_equal(expected, ordered.valid().values) ordered = ts.sort_values(ascending=False, na_position='first') assert_almost_equal(expected, ordered.valid().values) def test_nsmallest_nlargest(self): # float, int, datetime64 (use i8), timedelts64 (same), # object that are numbers, object that are strings base = [3, 2, 1, 2, 5] s_list = [ Series(base, dtype='int8'), Series(base, dtype='int16'), Series(base, dtype='int32'), Series(base, dtype='int64'), Series(base, dtype='float32'), Series(base, dtype='float64'), Series(base, dtype='uint8'), Series(base, dtype='uint16'), Series(base, dtype='uint32'), Series(base, dtype='uint64'), Series(base).astype('timedelta64[ns]'), Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])), ] raising = [ Series([3., 2, 1, 2, '5'], dtype='object'), Series([3., 2, 1, 2, 5], dtype='object'), # not supported on some archs # Series([3., 2, 1, 2, 5], dtype='complex256'), Series([3., 2, 1, 2, 5], dtype='complex128'), ] for r in raising: dt = r.dtype msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt args = 2, len(r), 0, -1 methods = r.nlargest, r.nsmallest for method, arg in product(methods, args): with tm.assertRaisesRegexp(TypeError, msg): method(arg) for s in s_list: assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]]) assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]]) with tm.assert_produces_warning(FutureWarning): assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]]) assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]]) assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]]) with tm.assert_produces_warning(FutureWarning): assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]]) empty = s.iloc[0:0] assert_series_equal(s.nsmallest(0), empty) assert_series_equal(s.nsmallest(-1), empty) assert_series_equal(s.nlargest(0), empty) assert_series_equal(s.nlargest(-1), empty) assert_series_equal(s.nsmallest(len(s)), s.sort_values()) assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values()) assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]]) assert_series_equal(s.nlargest(len(s) + 1), s.iloc[[4, 0, 1, 3, 2]]) s = Series([3., np.nan, 1, 2, 5]) assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]]) assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]]) msg = 'keep must be either "first", "last"' with tm.assertRaisesRegexp(ValueError, msg): s.nsmallest(keep='invalid') with tm.assertRaisesRegexp(ValueError, msg): s.nlargest(keep='invalid') def test_rank(self): tm._skip_if_no_scipy() from scipy.stats import rankdata self.ts[::2] = np.nan self.ts[:10][::3] = 4. ranks = self.ts.rank() oranks = self.ts.astype('O').rank() assert_series_equal(ranks, oranks) mask = np.isnan(self.ts) filled = self.ts.fillna(np.inf) # rankdata returns a ndarray exp = Series(rankdata(filled),index=filled.index) exp[mask] = np.nan assert_almost_equal(ranks, exp) iseries = Series(np.arange(5).repeat(2)) iranks = iseries.rank() exp = iseries.astype(float).rank() assert_series_equal(iranks, exp) iseries = Series(np.arange(5)) + 1.0 exp = iseries / 5.0 iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) iseries = Series(np.repeat(1, 100)) exp = Series(np.repeat(0.505, 100)) iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) iseries[1] = np.nan exp = Series(np.repeat(50.0 / 99.0, 100)) exp[1] = np.nan iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) iseries = Series(np.arange(5)) + 1.0 iseries[4] = np.nan exp = iseries / 4.0 iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) iseries = Series(np.repeat(np.nan, 100)) exp = iseries.copy() iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) iseries = Series(np.arange(5)) + 1 iseries[4] = np.nan exp = iseries / 4.0 iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) rng = date_range('1/1/1990', periods=5) iseries = Series(np.arange(5), rng) + 1 iseries.ix[4] = np.nan exp = iseries / 4.0 iranks = iseries.rank(pct=True) assert_series_equal(iranks, exp) iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1]) exp = Series([2, 1, 3, 5, 4, 6.0]) iranks = iseries.rank() assert_series_equal(iranks, exp) values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64') random_order = np.random.permutation(len(values)) iseries = Series(values[random_order]) exp = Series(random_order + 1.0, dtype='float64') iranks = iseries.rank() assert_series_equal(iranks, exp) def test_rank_inf(self): raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly') values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64') random_order = np.random.permutation(len(values)) iseries = Series(values[random_order]) exp = Series(random_order + 1.0, dtype='float64') iranks = iseries.rank() assert_series_equal(iranks, exp) def test_from_csv(self): with ensure_clean() as path: self.ts.to_csv(path) ts = Series.from_csv(path) assert_series_equal(self.ts, ts, check_names=False) self.assertTrue(ts.name is None) self.assertTrue(ts.index.name is None) # GH10483 self.ts.to_csv(path, header=True) ts_h = Series.from_csv(path, header=0) self.assertTrue(ts_h.name == 'ts') self.series.to_csv(path) series = Series.from_csv(path) self.assertIsNone(series.name) self.assertIsNone(series.index.name) assert_series_equal(self.series, series, check_names=False) self.assertTrue(series.name is None) self.assertTrue(series.index.name is None) self.series.to_csv(path, header=True) series_h = Series.from_csv(path, header=0) self.assertTrue(series_h.name == 'series') outfile = open(path, 'w') outfile.write('1998-01-01|1.0\n1999-01-01|2.0') outfile.close() series = Series.from_csv(path, sep='|') checkseries = Series( {datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0}) assert_series_equal(checkseries, series) series = Series.from_csv(path, sep='|', parse_dates=False) checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0}) assert_series_equal(checkseries, series) def test_to_csv(self): import io with ensure_clean() as path: self.ts.to_csv(path) lines = io.open(path, newline=None).readlines() assert(lines[1] != '\n') self.ts.to_csv(path, index=False) arr = np.loadtxt(path) assert_almost_equal(arr, self.ts.values) def test_to_csv_unicode_index(self): buf = StringIO() s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")]) s.to_csv(buf, encoding='UTF-8') buf.seek(0) s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8') assert_series_equal(s, s2) def test_tolist(self): rs = self.ts.tolist() xp = self.ts.values.tolist() assert_almost_equal(rs, xp) # datetime64 s = Series(self.ts.index) rs = s.tolist() self.assertEqual(self.ts.index[0], rs[0]) def test_to_frame(self): self.ts.name = None rs = self.ts.to_frame() xp = pd.DataFrame(self.ts.values, index=self.ts.index) assert_frame_equal(rs, xp) self.ts.name = 'testname' rs = self.ts.to_frame() xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index) assert_frame_equal(rs, xp) rs = self.ts.to_frame(name='testdifferent') xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index) assert_frame_equal(rs, xp) def test_to_dict(self): self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts) def test_to_csv_float_format(self): with ensure_clean() as filename: ser = Series([0.123456, 0.234567, 0.567567]) ser.to_csv(filename, float_format='%.2f') rs = Series.from_csv(filename) xp = Series([0.12, 0.23, 0.57]) assert_series_equal(rs, xp) def test_to_csv_list_entries(self): s = Series(['jack and jill', 'jesse and frank']) split = s.str.split(r'\s+and\s+') buf = StringIO() split.to_csv(buf) def test_to_csv_path_is_none(self): # GH 8215 # Series.to_csv() was returning None, inconsistent with # DataFrame.to_csv() which returned string s = Series([1, 2, 3]) csv_str = s.to_csv(path=None) self.assertIsInstance(csv_str, str) def test_str_attribute(self): # GH9068 methods = ['strip', 'rstrip', 'lstrip'] s = Series([' jack', 'jill ', ' jesse ', 'frank']) for method in methods: expected = Series([getattr(str, method)(x) for x in s.values]) assert_series_equal(getattr(Series.str, method)(s.str), expected) # str accessor only valid with string values s = Series(range(5)) with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'): s.str.repeat(2) def test_clip(self): val = self.ts.median() self.assertEqual(self.ts.clip_lower(val).min(), val) self.assertEqual(self.ts.clip_upper(val).max(), val) self.assertEqual(self.ts.clip(lower=val).min(), val) self.assertEqual(self.ts.clip(upper=val).max(), val) result = self.ts.clip(-0.5, 0.5) expected = np.clip(self.ts, -0.5, 0.5) assert_series_equal(result, expected) tm.assertIsInstance(expected, Series) def test_clip_types_and_nulls(self): sers = [Series([np.nan, 1.0, 2.0, 3.0]), Series([None, 'a', 'b', 'c']), Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))] for s in sers: thresh = s[2] l = s.clip_lower(thresh) u = s.clip_upper(thresh) self.assertEqual(l[notnull(l)].min(), thresh) self.assertEqual(u[notnull(u)].max(), thresh) self.assertEqual(list(isnull(s)), list(isnull(l))) self.assertEqual(list(isnull(s)), list(isnull(u))) def test_clip_against_series(self): # GH #6966 s = Series([1.0, 1.0, 4.0]) threshold = Series([1.0, 2.0, 3.0]) assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0])) assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0])) lower = Series([1.0, 2.0, 3.0]) upper = Series([1.5, 2.5, 3.5]) assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5])) assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5])) def test_valid(self): ts = self.ts.copy() ts[::2] = np.NaN result = ts.valid() self.assertEqual(len(result), ts.count()) tm.assert_dict_equal(result, ts, compare_keys=False) def test_isnull(self): ser = Series([0, 5.4, 3, nan, -0.001]) np.array_equal( ser.isnull(), Series([False, False, False, True, False]).values) ser = Series(["hi", "", nan]) np.array_equal(ser.isnull(), Series([False, False, True]).values) def test_notnull(self): ser = Series([0, 5.4, 3, nan, -0.001]) np.array_equal( ser.notnull(), Series([True, True, True, False, True]).values) ser = Series(["hi", "", nan]) np.array_equal(ser.notnull(), Series([True, True, False]).values) def test_shift(self): shifted = self.ts.shift(1) unshifted = shifted.shift(-1) tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False) offset = datetools.bday shifted = self.ts.shift(1, freq=offset) unshifted = shifted.shift(-1, freq=offset) assert_series_equal(unshifted, self.ts) unshifted = self.ts.shift(0, freq=offset) assert_series_equal(unshifted, self.ts) shifted = self.ts.shift(1, freq='B') unshifted = shifted.shift(-1, freq='B') assert_series_equal(unshifted, self.ts) # corner case unshifted = self.ts.shift(0) assert_series_equal(unshifted, self.ts) # Shifting with PeriodIndex ps = tm.makePeriodSeries() shifted = ps.shift(1) unshifted = shifted.shift(-1) tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False) shifted2 = ps.shift(1, 'B') shifted3 = ps.shift(1, datetools.bday) assert_series_equal(shifted2, shifted3) assert_series_equal(ps, shifted2.shift(-1, 'B')) self.assertRaises(ValueError, ps.shift, freq='D') # legacy support shifted4 = ps.shift(1, freq='B') assert_series_equal(shifted2, shifted4) shifted5 = ps.shift(1, freq=datetools.bday) assert_series_equal(shifted5, shifted4) # 32-bit taking # GH 8129 index=date_range('2000-01-01',periods=5) for dtype in ['int32','int64']: s1 = Series(np.arange(5,dtype=dtype),index=index) p = s1.iloc[1] result = s1.shift(periods=p) expected = Series([np.nan,0,1,2,3],index=index) assert_series_equal(result,expected) # xref 8260 # with tz s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo') result = s-s.shift() assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo')) # incompat tz s2 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='CET'),name='foo') self.assertRaises(ValueError, lambda : s-s2) def test_tshift(self): # PeriodIndex ps = tm.makePeriodSeries() shifted = ps.tshift(1) unshifted = shifted.tshift(-1) assert_series_equal(unshifted, ps) shifted2 = ps.tshift(freq='B') assert_series_equal(shifted, shifted2) shifted3 = ps.tshift(freq=datetools.bday) assert_series_equal(shifted, shifted3) self.assertRaises(ValueError, ps.tshift, freq='M') # DatetimeIndex shifted = self.ts.tshift(1) unshifted = shifted.tshift(-1) assert_series_equal(self.ts, unshifted) shifted2 = self.ts.tshift(freq=self.ts.index.freq) assert_series_equal(shifted, shifted2) inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)), name='ts') shifted = inferred_ts.tshift(1) unshifted = shifted.tshift(-1) assert_series_equal(shifted, self.ts.tshift(1)) assert_series_equal(unshifted, inferred_ts) no_freq = self.ts[[0, 5, 7]] self.assertRaises(ValueError, no_freq.tshift) def test_shift_int(self): ts = self.ts.astype(int) shifted = ts.shift(1) expected = ts.astype(float).shift(1) assert_series_equal(shifted, expected) def test_shift_categorical(self): # GH 9416 s = pd.Series(['a', 'b', 'c', 'd'], dtype='category') assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid()) sp1 = s.shift(1) assert_index_equal(s.index, sp1.index) self.assertTrue(np.all(sp1.values.codes[:1] == -1)) self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:])) sn2 = s.shift(-2) assert_index_equal(s.index, sn2.index) self.assertTrue(np.all(sn2.values.codes[-2:] == -1)) self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2])) assert_index_equal(s.values.categories, sp1.values.categories) assert_index_equal(s.values.categories, sn2.values.categories) def test_truncate(self): offset = datetools.bday ts = self.ts[::3] start, end = self.ts.index[3], self.ts.index[6] start_missing, end_missing = self.ts.index[2], self.ts.index[7] # neither specified truncated = ts.truncate() assert_series_equal(truncated, ts) # both specified expected = ts[1:3] truncated = ts.truncate(start, end) assert_series_equal(truncated, expected) truncated = ts.truncate(start_missing, end_missing) assert_series_equal(truncated, expected) # start specified expected = ts[1:] truncated = ts.truncate(before=start) assert_series_equal(truncated, expected) truncated = ts.truncate(before=start_missing) assert_series_equal(truncated, expected) # end specified expected = ts[:3] truncated = ts.truncate(after=end) assert_series_equal(truncated, expected) truncated = ts.truncate(after=end_missing) assert_series_equal(truncated, expected) # corner case, empty series returned truncated = ts.truncate(after=self.ts.index[0] - offset) assert(len(truncated) == 0) truncated = ts.truncate(before=self.ts.index[-1] + offset) assert(len(truncated) == 0) self.assertRaises(ValueError, ts.truncate, before=self.ts.index[-1] + offset, after=self.ts.index[0] - offset) def test_ptp(self): N = 1000 arr = np.random.randn(N) ser = Series(arr) self.assertEqual(np.ptp(ser), np.ptp(arr)) # GH11163 s = Series([3, 5, np.nan, -3, 10]) self.assertEqual(s.ptp(), 13) self.assertTrue(pd.isnull(s.ptp(skipna=False))) mi = pd.MultiIndex.from_product([['a','b'], [1,2,3]]) s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi) expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64) self.assert_series_equal(s.ptp(level=0), expected) expected = pd.Series([np.nan, np.nan], index=['a', 'b']) self.assert_series_equal(s.ptp(level=0, skipna=False), expected) with self.assertRaises(ValueError): s.ptp(axis=1) s = pd.Series(['a', 'b', 'c', 'd', 'e']) with self.assertRaises(TypeError): s.ptp() with self.assertRaises(NotImplementedError): s.ptp(numeric_only=True) def test_asof(self): # array or list or dates N = 50 rng = date_range('1/1/1990', periods=N, freq='53s') ts = Series(np.random.randn(N), index=rng) ts[15:30] = np.nan dates = date_range('1/1/1990', periods=N * 3, freq='25s') result = ts.asof(dates) self.assertTrue(notnull(result).all()) lb = ts.index[14] ub = ts.index[30] result = ts.asof(list(dates)) self.assertTrue(notnull(result).all()) lb = ts.index[14] ub = ts.index[30] mask = (result.index >= lb) & (result.index < ub) rs = result[mask] self.assertTrue((rs == ts[lb]).all()) val = result[result.index[result.index >= ub][0]] self.assertEqual(ts[ub], val) self.ts[5:10] = np.NaN self.ts[15:20] = np.NaN val1 = self.ts.asof(self.ts.index[7]) val2 = self.ts.asof(self.ts.index[19]) self.assertEqual(val1, self.ts[4]) self.assertEqual(val2, self.ts[14]) # accepts strings val1 = self.ts.asof(str(self.ts.index[7])) self.assertEqual(val1, self.ts[4]) # in there self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3]) # no as of value d = self.ts.index[0] - datetools.bday self.assertTrue(np.isnan(self.ts.asof(d))) def test_getitem_setitem_datetimeindex(self): from pandas import date_range N = 50 # testing with timezone, GH #2785 rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern') ts = Series(np.random.randn(N), index=rng) result = ts["1990-01-01 04:00:00"] expected = ts[4] self.assertEqual(result, expected) result = ts.copy() result["1990-01-01 04:00:00"] = 0 result["1990-01-01 04:00:00"] = ts[4] assert_series_equal(result, ts) result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0 result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8] assert_series_equal(result, ts) lb = "1990-01-01 04:00:00" rb = "1990-01-01 07:00:00" result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] assert_series_equal(result, expected) # repeat all the above with naive datetimes result = ts[datetime(1990, 1, 1, 4)] expected = ts[4] self.assertEqual(result, expected) result = ts.copy() result[datetime(1990, 1, 1, 4)] = 0 result[datetime(1990, 1, 1, 4)] = ts[4] assert_series_equal(result, ts) result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0 result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8] assert_series_equal(result, ts) lb = datetime(1990, 1, 1, 4) rb = datetime(1990, 1, 1, 7) result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] assert_series_equal(result, expected) result = ts[ts.index[4]] expected = ts[4] self.assertEqual(result, expected) result = ts[ts.index[4:8]] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result[ts.index[4:8]] = 0 result[4:8] = ts[4:8] assert_series_equal(result, ts) # also test partial date slicing result = ts["1990-01-02"] expected = ts[24:48] assert_series_equal(result, expected) result = ts.copy() result["1990-01-02"] = 0 result["1990-01-02"] = ts[24:48] assert_series_equal(result, ts) def test_getitem_setitem_datetime_tz_pytz(self): tm._skip_if_no_pytz() from pytz import timezone as tz from pandas import date_range N = 50 # testing with timezone, GH #2785 rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern') ts = Series(np.random.randn(N), index=rng) # also test Timestamp tz handling, GH #2789 result = ts.copy() result["1990-01-01 09:00:00+00:00"] = 0 result["1990-01-01 09:00:00+00:00"] = ts[4] assert_series_equal(result, ts) result = ts.copy() result["1990-01-01 03:00:00-06:00"] = 0 result["1990-01-01 03:00:00-06:00"] = ts[4] assert_series_equal(result, ts) # repeat with datetimes result = ts.copy() result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0 result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4] assert_series_equal(result, ts) result = ts.copy() # comparison dates with datetime MUST be localized! date = tz('US/Central').localize(datetime(1990, 1, 1, 3)) result[date] = 0 result[date] = ts[4] assert_series_equal(result, ts) def test_getitem_setitem_datetime_tz_dateutil(self): tm._skip_if_no_dateutil() from dateutil.tz import tzutc from pandas.tslib import _dateutil_gettz as gettz tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil from pandas import date_range N = 50 # testing with timezone, GH #2785 rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern') ts = Series(np.random.randn(N), index=rng) # also test Timestamp tz handling, GH #2789 result = ts.copy() result["1990-01-01 09:00:00+00:00"] = 0 result["1990-01-01 09:00:00+00:00"] = ts[4] assert_series_equal(result, ts) result = ts.copy() result["1990-01-01 03:00:00-06:00"] = 0 result["1990-01-01 03:00:00-06:00"] = ts[4] assert_series_equal(result, ts) # repeat with datetimes result = ts.copy() result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0 result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4] assert_series_equal(result, ts) result = ts.copy() result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0 result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4] assert_series_equal(result, ts) def test_getitem_setitem_periodindex(self): from pandas import period_range N = 50 rng = period_range('1/1/1990', periods=N, freq='H') ts = Series(np.random.randn(N), index=rng) result = ts["1990-01-01 04"] expected = ts[4] self.assertEqual(result, expected) result = ts.copy() result["1990-01-01 04"] = 0 result["1990-01-01 04"] = ts[4] assert_series_equal(result, ts) result = ts["1990-01-01 04":"1990-01-01 07"] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result["1990-01-01 04":"1990-01-01 07"] = 0 result["1990-01-01 04":"1990-01-01 07"] = ts[4:8] assert_series_equal(result, ts) lb = "1990-01-01 04" rb = "1990-01-01 07" result = ts[(ts.index >= lb) & (ts.index <= rb)] expected = ts[4:8] assert_series_equal(result, expected) # GH 2782 result = ts[ts.index[4]] expected = ts[4] self.assertEqual(result, expected) result = ts[ts.index[4:8]] expected = ts[4:8] assert_series_equal(result, expected) result = ts.copy() result[ts.index[4:8]] = 0 result[4:8] = ts[4:8] assert_series_equal(result, ts) def test_asof_periodindex(self): from pandas import period_range, PeriodIndex # array or list or dates N = 50 rng = period_range('1/1/1990', periods=N, freq='H') ts = Series(np.random.randn(N), index=rng) ts[15:30] = np.nan dates = date_range('1/1/1990', periods=N * 3, freq='37min') result = ts.asof(dates) self.assertTrue(notnull(result).all()) lb = ts.index[14] ub = ts.index[30] result = ts.asof(list(dates)) self.assertTrue(notnull(result).all()) lb = ts.index[14] ub = ts.index[30] pix = PeriodIndex(result.index.values, freq='H') mask = (pix >= lb) & (pix < ub) rs = result[mask] self.assertTrue((rs == ts[lb]).all()) ts[5:10] = np.NaN ts[15:20] = np.NaN val1 = ts.asof(ts.index[7]) val2 = ts.asof(ts.index[19]) self.assertEqual(val1, ts[4]) self.assertEqual(val2, ts[14]) # accepts strings val1 = ts.asof(str(ts.index[7])) self.assertEqual(val1, ts[4]) # in there self.assertEqual(ts.asof(ts.index[3]), ts[3]) # no as of value d = ts.index[0].to_timestamp() - datetools.bday self.assertTrue(np.isnan(ts.asof(d))) def test_asof_more(self): from pandas import date_range s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5], index=date_range('1/1/2000', periods=9)) dates = s.index[[4, 5, 6, 2, 1]] result = s.asof(dates) expected = Series([2, 2, 3, 1, np.nan], index=dates) assert_series_equal(result, expected) s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5], index=date_range('1/1/2000', periods=9)) result = s.asof(s.index[0]) self.assertEqual(result, s[0]) def test_cast_on_putmask(self): # GH 2746 # need to upcast s = Series([1, 2], index=[1, 2], dtype='int64') s[[True, False]] = Series([0], index=[1], dtype='int64') expected = Series([0, 2], index=[1, 2], dtype='int64') assert_series_equal(s, expected) def test_type_promote_putmask(self): # GH8387: test that changing types does not break alignment ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5) left, mask = ts.copy(), ts > 0 right = ts[mask].copy().map(str) left[mask] = right assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t)) s = Series([0, 1, 2, 0 ]) mask = s > 0 s2 = s[ mask ].map( str ) s[mask] = s2 assert_series_equal(s, Series([0, '1', '2', 0])) s = Series([0, 'foo', 'bar', 0 ]) mask = Series([False, True, True, False]) s2 = s[ mask ] s[mask] = s2 assert_series_equal(s, Series([0, 'foo','bar', 0])) def test_astype_cast_nan_int(self): df = Series([1.0, 2.0, 3.0, np.nan]) self.assertRaises(ValueError, df.astype, np.int64) def test_astype_cast_object_int(self): arr = Series(["car", "house", "tree", "1"]) self.assertRaises(ValueError, arr.astype, int) self.assertRaises(ValueError, arr.astype, np.int64) self.assertRaises(ValueError, arr.astype, np.int8) arr = Series(['1', '2', '3', '4'], dtype=object) result = arr.astype(int) self.assert_numpy_array_equal(result, np.arange(1, 5)) def test_astype_datetimes(self): import pandas.tslib as tslib s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5)) s = s.astype('O') self.assertEqual(s.dtype, np.object_) s = Series([datetime(2001, 1, 2, 0, 0)]) s = s.astype('O') self.assertEqual(s.dtype, np.object_) s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)]) s[1] = np.nan self.assertEqual(s.dtype, 'M8[ns]') s = s.astype('O') self.assertEqual(s.dtype, np.object_) def test_astype_str(self): # GH4405 digits = string.digits s1 = Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]) s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0]) types = (compat.text_type, np.str_) for typ in types: for s in (s1, s2): res = s.astype(typ) expec = s.map(compat.text_type) assert_series_equal(res, expec) # GH9757 # Test str and unicode on python 2.x and just str on python 3.x for tt in set([str, compat.text_type]): ts = Series([Timestamp('2010-01-04 00:00:00')]) s = ts.astype(tt) expected = Series([tt('2010-01-04')]) assert_series_equal(s, expected) ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')]) s = ts.astype(tt) expected = Series([tt('2010-01-04 00:00:00-05:00')]) assert_series_equal(s, expected) td = Series([Timedelta(1, unit='d')]) s = td.astype(tt) expected = Series([tt('1 days 00:00:00.000000000')]) assert_series_equal(s, expected) def test_astype_unicode(self): # GH7758 # a bit of magic is required to set default encoding encoding to utf-8 digits = string.digits test_series = [ Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]), Series([u('データーサイエンス、お前はもう死んでいる')]), ] former_encoding = None if not compat.PY3: # in python we can force the default encoding # for this test former_encoding = sys.getdefaultencoding() reload(sys) sys.setdefaultencoding("utf-8") if sys.getdefaultencoding() == "utf-8": test_series.append(Series([u('野菜食べないとやばい').encode("utf-8")])) for s in test_series: res = s.astype("unicode") expec = s.map(compat.text_type) assert_series_equal(res, expec) # restore the former encoding if former_encoding is not None and former_encoding != "utf-8": reload(sys) sys.setdefaultencoding(former_encoding) def test_map(self): index, data = tm.getMixedTypeDict() source = Series(data['B'], index=data['C']) target = Series(data['C'][:4], index=data['D'][:4]) merged = target.map(source) for k, v in compat.iteritems(merged): self.assertEqual(v, source[target[k]]) # input could be a dict merged = target.map(source.to_dict()) for k, v in compat.iteritems(merged): self.assertEqual(v, source[target[k]]) # function result = self.ts.map(lambda x: x * 2) self.assert_numpy_array_equal(result, self.ts * 2) # GH 10324 a = Series([1, 2, 3, 4]) b = Series(["even", "odd", "even", "odd"], dtype="category") c = Series(["even", "odd", "even", "odd"]) exp = Series(["odd", "even", "odd", np.nan], dtype="category") self.assert_series_equal(a.map(b), exp) exp = Series(["odd", "even", "odd", np.nan]) self.assert_series_equal(a.map(c), exp) a = Series(['a', 'b', 'c', 'd']) b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(['b', 'c', 'd', 'e'])) c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e'])) exp = Series([np.nan, 1, 2, 3]) self.assert_series_equal(a.map(b), exp) exp = Series([np.nan, 1, 2, 3]) self.assert_series_equal(a.map(c), exp) a = Series(['a', 'b', 'c', 'd']) b = Series(['B', 'C', 'D', 'E'], dtype='category', index=pd.CategoricalIndex(['b', 'c', 'd', 'e'])) c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e'])) exp = Series([np.nan, 'B', 'C', 'D'], dtype='category') self.assert_series_equal(a.map(b), exp) exp = Series([np.nan, 'B', 'C', 'D']) self.assert_series_equal(a.map(c), exp) def test_map_compat(self): # related GH 8024 s = Series([True,True,False],index=[1,2,3]) result = s.map({ True : 'foo', False : 'bar' }) expected = Series(['foo','foo','bar'],index=[1,2,3]) assert_series_equal(result,expected) def test_map_int(self): left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4}) right = Series({1: 11, 2: 22, 3: 33}) self.assertEqual(left.dtype, np.float_) self.assertTrue(issubclass(right.dtype.type, np.integer)) merged = left.map(right) self.assertEqual(merged.dtype, np.float_) self.assertTrue(isnull(merged['d'])) self.assertTrue(not isnull(merged['c'])) def test_map_type_inference(self): s = Series(lrange(3)) s2 = s.map(lambda x: np.where(x == 0, 0, 1)) self.assertTrue(issubclass(s2.dtype.type, np.integer)) def test_divide_decimal(self): ''' resolves issue #9787 ''' from decimal import Decimal expected = Series([Decimal(5)]) s = Series([Decimal(10)]) s = s/Decimal(2) tm.assert_series_equal(expected, s) s = Series([Decimal(10)]) s = s//Decimal(2) tm.assert_series_equal(expected, s) def test_map_decimal(self): from decimal import Decimal result = self.series.map(lambda x: Decimal(str(x))) self.assertEqual(result.dtype, np.object_) tm.assertIsInstance(result[0], Decimal) def test_map_na_exclusion(self): s = Series([1.5, np.nan, 3, np.nan, 5]) result = s.map(lambda x: x * 2, na_action='ignore') exp = s * 2 assert_series_equal(result, exp) def test_map_dict_with_tuple_keys(self): ''' Due to new MultiIndex-ing behaviour in v0.14.0, dicts with tuple keys passed to map were being converted to a multi-index, preventing tuple values from being mapped properly. ''' df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]}) label_mappings = { (1,): 'A', (2,): 'B', (3, 4): 'A', (5, 6): 'B' } df['labels'] = df['a'].map(label_mappings) df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index) # All labels should be filled now tm.assert_series_equal(df['labels'], df['expected_labels'], check_names=False) def test_apply(self): assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts)) # elementwise-apply import math assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts)) # how to handle Series result, #2316 result = self.ts.apply(lambda x: Series([x, x ** 2], index=['x', 'x^2'])) expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2}) tm.assert_frame_equal(result, expected) # empty series s = Series(dtype=object, name='foo', index=pd.Index([], name='bar')) rs = s.apply(lambda x: x) tm.assert_series_equal(s, rs) # check all metadata (GH 9322) self.assertIsNot(s, rs) self.assertIs(s.index, rs.index) self.assertEqual(s.dtype, rs.dtype) self.assertEqual(s.name, rs.name) # index but no data s = Series(index=[1, 2, 3]) rs = s.apply(lambda x: x) tm.assert_series_equal(s, rs) def test_apply_same_length_inference_bug(self): s = Series([1, 2]) f = lambda x: (x, x + 1) result = s.apply(f) expected = s.map(f) assert_series_equal(result, expected) s = Series([1, 2, 3]) result = s.apply(f) expected = s.map(f) assert_series_equal(result, expected) def test_apply_dont_convert_dtype(self): s = Series(np.random.randn(10)) f = lambda x: x if x > 0 else np.nan result = s.apply(f, convert_dtype=False) self.assertEqual(result.dtype, object) def test_convert_objects(self): s = Series([1., 2, 3], index=['a', 'b', 'c']) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates=False, convert_numeric=True) assert_series_equal(result, s) # force numeric conversion r = s.copy().astype('O') r['a'] = '1' with tm.assert_produces_warning(FutureWarning): result = r.convert_objects(convert_dates=False, convert_numeric=True) assert_series_equal(result, s) r = s.copy().astype('O') r['a'] = '1.' with tm.assert_produces_warning(FutureWarning): result = r.convert_objects(convert_dates=False, convert_numeric=True) assert_series_equal(result, s) r = s.copy().astype('O') r['a'] = 'garbled' expected = s.copy() expected['a'] = np.nan with tm.assert_produces_warning(FutureWarning): result = r.convert_objects(convert_dates=False, convert_numeric=True) assert_series_equal(result, expected) # GH 4119, not converting a mixed type (e.g.floats and object) s = Series([1, 'na', 3, 4]) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_numeric=True) expected = Series([1, np.nan, 3, 4]) assert_series_equal(result, expected) s = Series([1, '', 3, 4]) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_numeric=True) expected = Series([1, np.nan, 3, 4]) assert_series_equal(result, expected) # dates s = Series( [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)]) s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime( 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O') with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates=True, convert_numeric=False) expected = Series( [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]') assert_series_equal(result, expected) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates='coerce', convert_numeric=False) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates='coerce', convert_numeric=True) assert_series_equal(result, expected) expected = Series( [Timestamp( '20010101'), Timestamp('20010102'), Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]') with tm.assert_produces_warning(FutureWarning): result = s2.convert_objects(convert_dates='coerce', convert_numeric=False) assert_series_equal(result, expected) with tm.assert_produces_warning(FutureWarning): result = s2.convert_objects(convert_dates='coerce', convert_numeric=True) assert_series_equal(result, expected) # preserver all-nans (if convert_dates='coerce') s = Series(['foo', 'bar', 1, 1.0], dtype='O') with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates='coerce', convert_numeric=False) assert_series_equal(result, s) # preserver if non-object s = Series([1], dtype='float32') with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates='coerce', convert_numeric=False) assert_series_equal(result, s) #r = s.copy() #r[0] = np.nan #result = r.convert_objects(convert_dates=True,convert_numeric=False) #self.assertEqual(result.dtype, 'M8[ns]') # dateutil parses some single letters into today's value as a date for x in 'abcdefghijklmnopqrstuvwxyz': s = Series([x]) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates='coerce') assert_series_equal(result, s) s = Series([x.upper()]) with tm.assert_produces_warning(FutureWarning): result = s.convert_objects(convert_dates='coerce') assert_series_equal(result, s) def test_convert_objects_preserve_bool(self): s = Series([1, True, 3, 5], dtype=object) with tm.assert_produces_warning(FutureWarning): r = s.convert_objects(convert_numeric=True) e = Series([1, 1, 3, 5], dtype='i8') tm.assert_series_equal(r, e) def test_convert_objects_preserve_all_bool(self): s = Series([False, True, False, False], dtype=object) with tm.assert_produces_warning(FutureWarning): r = s.convert_objects(convert_numeric=True) e = Series([False, True, False, False], dtype=bool) tm.assert_series_equal(r, e) # GH 10265 def test_convert(self): # Tests: All to nans, coerce, true # Test coercion returns correct type s = Series(['a', 'b', 'c']) results = s._convert(datetime=True, coerce=True) expected = Series([lib.NaT] * 3) assert_series_equal(results, expected) results = s._convert(numeric=True, coerce=True) expected = Series([np.nan] * 3) assert_series_equal(results, expected) expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]')) results = s._convert(timedelta=True, coerce=True) assert_series_equal(results, expected) dt = datetime(2001, 1, 1, 0, 0) td = dt - datetime(2000, 1, 1, 0, 0) # Test coercion with mixed types s = Series(['a', '3.1415', dt, td]) results = s._convert(datetime=True, coerce=True) expected = Series([lib.NaT, lib.NaT, dt, lib.NaT]) assert_series_equal(results, expected) results = s._convert(numeric=True, coerce=True) expected = Series([nan, 3.1415, nan, nan]) assert_series_equal(results, expected) results = s._convert(timedelta=True, coerce=True) expected = Series([lib.NaT, lib.NaT, lib.NaT, td], dtype=np.dtype('m8[ns]')) assert_series_equal(results, expected) # Test standard conversion returns original results = s._convert(datetime=True) assert_series_equal(results, s) results = s._convert(numeric=True) expected = Series([nan, 3.1415, nan, nan]) assert_series_equal(results, expected) results = s._convert(timedelta=True) assert_series_equal(results, s) # test pass-through and non-conversion when other types selected s = Series(['1.0','2.0','3.0']) results = s._convert(datetime=True, numeric=True, timedelta=True) expected = Series([1.0,2.0,3.0]) assert_series_equal(results, expected) results = s._convert(True,False,True) assert_series_equal(results, s) s = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)], dtype='O') results = s._convert(datetime=True, numeric=True, timedelta=True) expected = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)]) assert_series_equal(results, expected) results = s._convert(datetime=False,numeric=True,timedelta=True) assert_series_equal(results, s) td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0) s = Series([td, td], dtype='O') results = s._convert(datetime=True, numeric=True, timedelta=True) expected = Series([td, td]) assert_series_equal(results, expected) results = s._convert(True,True,False) assert_series_equal(results, s) s = Series([1., 2, 3], index=['a', 'b', 'c']) result = s._convert(numeric=True) assert_series_equal(result, s) # force numeric conversion r = s.copy().astype('O') r['a'] = '1' result = r._convert(numeric=True) assert_series_equal(result, s) r = s.copy().astype('O') r['a'] = '1.' result = r._convert(numeric=True) assert_series_equal(result, s) r = s.copy().astype('O') r['a'] = 'garbled' result = r._convert(numeric=True) expected = s.copy() expected['a'] = nan assert_series_equal(result, expected) # GH 4119, not converting a mixed type (e.g.floats and object) s = Series([1, 'na', 3, 4]) result = s._convert(datetime=True, numeric=True) expected = Series([1, nan, 3, 4]) assert_series_equal(result, expected) s = Series([1, '', 3, 4]) result = s._convert(datetime=True, numeric=True) assert_series_equal(result, expected) # dates s = Series( [datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)]) s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime( 2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O') result = s._convert(datetime=True) expected = Series( [Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]') assert_series_equal(result, expected) result = s._convert(datetime=True, coerce=True) assert_series_equal(result, expected) expected = Series( [Timestamp( '20010101'), Timestamp('20010102'), Timestamp('20010103'), lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]') result = s2._convert(datetime=True, numeric=False, timedelta=False, coerce=True) assert_series_equal(result, expected) result = s2._convert(datetime=True, coerce=True) assert_series_equal(result, expected) s = Series(['foo', 'bar', 1, 1.0], dtype='O') result = s._convert(datetime=True, coerce=True) expected = Series([lib.NaT]*4) assert_series_equal(result, expected) # preserver if non-object s = Series([1], dtype='float32') result = s._convert(datetime=True, coerce=True) assert_series_equal(result, s) #r = s.copy() #r[0] = np.nan #result = r._convert(convert_dates=True,convert_numeric=False) #self.assertEqual(result.dtype, 'M8[ns]') # dateutil parses some single letters into today's value as a date expected = Series([lib.NaT]) for x in 'abcdefghijklmnopqrstuvwxyz': s = Series([x]) result = s._convert(datetime=True, coerce=True) assert_series_equal(result, expected) s = Series([x.upper()]) result = s._convert(datetime=True, coerce=True) assert_series_equal(result, expected) def test_convert_no_arg_error(self): s = Series(['1.0','2']) self.assertRaises(ValueError, s._convert) def test_convert_preserve_bool(self): s = Series([1, True, 3, 5], dtype=object) r = s._convert(datetime=True, numeric=True) e = Series([1, 1, 3, 5], dtype='i8') tm.assert_series_equal(r, e) def test_convert_preserve_all_bool(self): s = Series([False, True, False, False], dtype=object) r = s._convert(datetime=True, numeric=True) e = Series([False, True, False, False], dtype=bool) tm.assert_series_equal(r, e) def test_apply_args(self): s = Series(['foo,bar']) result = s.apply(str.split, args=(',',)) self.assertEqual(result[0], ['foo', 'bar']) tm.assertIsInstance(result[0], list) def test_align(self): def _check_align(a, b, how='left', fill=None): aa, ab = a.align(b, join=how, fill_value=fill) join_index = a.index.join(b.index, how=how) if fill is not None: diff_a = aa.index.difference(join_index) diff_b = ab.index.difference(join_index) if len(diff_a) > 0: self.assertTrue((aa.reindex(diff_a) == fill).all()) if len(diff_b) > 0: self.assertTrue((ab.reindex(diff_b) == fill).all()) ea = a.reindex(join_index) eb = b.reindex(join_index) if fill is not None: ea = ea.fillna(fill) eb = eb.fillna(fill) assert_series_equal(aa, ea) assert_series_equal(ab, eb) self.assertEqual(aa.name, 'ts') self.assertEqual(ea.name, 'ts') self.assertEqual(ab.name, 'ts') self.assertEqual(eb.name, 'ts') for kind in JOIN_TYPES: _check_align(self.ts[2:], self.ts[:-5], how=kind) _check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1) # empty left _check_align(self.ts[:0], self.ts[:-5], how=kind) _check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1) # empty right _check_align(self.ts[:-5], self.ts[:0], how=kind) _check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1) # both empty _check_align(self.ts[:0], self.ts[:0], how=kind) _check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1) def test_align_fill_method(self): def _check_align(a, b, how='left', method='pad', limit=None): aa, ab = a.align(b, join=how, method=method, limit=limit) join_index = a.index.join(b.index, how=how) ea = a.reindex(join_index) eb = b.reindex(join_index) ea = ea.fillna(method=method, limit=limit) eb = eb.fillna(method=method, limit=limit) assert_series_equal(aa, ea) assert_series_equal(ab, eb) for kind in JOIN_TYPES: for meth in ['pad', 'bfill']: _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth) _check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth, limit=1) # empty left _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth) _check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth, limit=1) # empty right _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth) _check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth, limit=1) # both empty _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth) _check_align(self.ts[:0], self.ts[:0], how=kind, method=meth, limit=1) def test_align_nocopy(self): b = self.ts[:5].copy() # do copy a = self.ts.copy() ra, _ = a.align(b, join='left') ra[:5] = 5 self.assertFalse((a[:5] == 5).any()) # do not copy a = self.ts.copy() ra, _ = a.align(b, join='left', copy=False) ra[:5] = 5 self.assertTrue((a[:5] == 5).all()) # do copy a = self.ts.copy() b = self.ts[:5].copy() _, rb = a.align(b, join='right') rb[:3] = 5 self.assertFalse((b[:3] == 5).any()) # do not copy a = self.ts.copy() b = self.ts[:5].copy() _, rb = a.align(b, join='right', copy=False) rb[:2] = 5 self.assertTrue((b[:2] == 5).all()) def test_align_sameindex(self): a, b = self.ts.align(self.ts, copy=False) self.assertIs(a.index, self.ts.index) self.assertIs(b.index, self.ts.index) # a, b = self.ts.align(self.ts, copy=True) # self.assertIsNot(a.index, self.ts.index) # self.assertIsNot(b.index, self.ts.index) def test_align_multiindex(self): # GH 10665 midx = pd.MultiIndex.from_product([range(2), range(3), range(2)], names=('a', 'b', 'c')) idx = pd.Index(range(2), name='b') s1 = pd.Series(np.arange(12,dtype='int64'), index=midx) s2 = pd.Series(np.arange(2,dtype='int64'), index=idx) # these must be the same results (but flipped) res1l, res1r = s1.align(s2, join='left') res2l, res2r = s2.align(s1, join='right') expl = s1 tm.assert_series_equal(expl, res1l) tm.assert_series_equal(expl, res2r) expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) tm.assert_series_equal(expr, res1r) tm.assert_series_equal(expr, res2l) res1l, res1r = s1.align(s2, join='right') res2l, res2r = s2.align(s1, join='left') exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)], names=('a', 'b', 'c')) expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) tm.assert_series_equal(expl, res1l) tm.assert_series_equal(expl, res2r) expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx) tm.assert_series_equal(expr, res1r) tm.assert_series_equal(expr, res2l) def test_reindex(self): identity = self.series.reindex(self.series.index) # __array_interface__ is not defined for older numpies # and on some pythons try: self.assertTrue(np.may_share_memory(self.series.index, identity.index)) except (AttributeError): pass self.assertTrue(identity.index.is_(self.series.index)) self.assertTrue(identity.index.identical(self.series.index)) subIndex = self.series.index[10:20] subSeries = self.series.reindex(subIndex) for idx, val in compat.iteritems(subSeries): self.assertEqual(val, self.series[idx]) subIndex2 = self.ts.index[10:20] subTS = self.ts.reindex(subIndex2) for idx, val in compat.iteritems(subTS): self.assertEqual(val, self.ts[idx]) stuffSeries = self.ts.reindex(subIndex) self.assertTrue(np.isnan(stuffSeries).all()) # This is extremely important for the Cython code to not screw up nonContigIndex = self.ts.index[::2] subNonContig = self.ts.reindex(nonContigIndex) for idx, val in compat.iteritems(subNonContig): self.assertEqual(val, self.ts[idx]) # return a copy the same index here result = self.ts.reindex() self.assertFalse((result is self.ts)) def test_reindex_nan(self): ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8]) i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2] assert_series_equal(ts.reindex(i), ts.iloc[j]) ts.index = ts.index.astype('object') # reindex coerces index.dtype to float, loc/iloc doesn't assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False) def test_reindex_corner(self): # (don't forget to fix this) I think it's fixed reindexed_dep = self.empty.reindex(self.ts.index, method='pad') # corner case: pad empty series reindexed = self.empty.reindex(self.ts.index, method='pad') # pass non-Index reindexed = self.ts.reindex(list(self.ts.index)) assert_series_equal(self.ts, reindexed) # bad fill method ts = self.ts[::2] self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo') def test_reindex_pad(self): s = Series(np.arange(10),dtype='int64') s2 = s[::2] reindexed = s2.reindex(s.index, method='pad') reindexed2 = s2.reindex(s.index, method='ffill') assert_series_equal(reindexed, reindexed2) expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10)) assert_series_equal(reindexed, expected) # GH4604 s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e']) new_index = ['a','g','c','f'] expected = Series([1,1,3,3],index=new_index) # this changes dtype because the ffill happens after result = s.reindex(new_index).ffill() assert_series_equal(result, expected.astype('float64')) result = s.reindex(new_index).ffill(downcast='infer') assert_series_equal(result, expected) expected = Series([1, 5, 3, 5], index=new_index) result = s.reindex(new_index, method='ffill') assert_series_equal(result, expected) # inferrence of new dtype s = Series([True,False,False,True],index=list('abcd')) new_index='agc' result = s.reindex(list(new_index)).ffill() expected = Series([True,True,False],index=list(new_index)) assert_series_equal(result, expected) # GH4618 shifted series downcasting s = Series(False,index=lrange(0,5)) result = s.shift(1).fillna(method='bfill') expected = Series(False,index=lrange(0,5)) assert_series_equal(result, expected) def test_reindex_nearest(self): s = Series(np.arange(10, dtype='int64')) target = [0.1, 0.9, 1.5, 2.0] actual = s.reindex(target, method='nearest') expected = Series(np.around(target).astype('int64'), target) assert_series_equal(expected, actual) actual = s.reindex_like(actual, method='nearest') assert_series_equal(expected, actual) actual = s.reindex_like(actual, method='nearest', tolerance=1) assert_series_equal(expected, actual) actual = s.reindex(target, method='nearest', tolerance=0.2) expected = Series([0, 1, np.nan, 2], target) assert_series_equal(expected, actual) def test_reindex_backfill(self): pass def test_reindex_int(self): ts = self.ts[::2] int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index) # this should work fine reindexed_int = int_ts.reindex(self.ts.index) # if NaNs introduced self.assertEqual(reindexed_int.dtype, np.float_) # NO NaNs introduced reindexed_int = int_ts.reindex(int_ts.index[::2]) self.assertEqual(reindexed_int.dtype, np.int_) def test_reindex_bool(self): # A series other than float, int, string, or object ts = self.ts[::2] bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index) # this should work fine reindexed_bool = bool_ts.reindex(self.ts.index) # if NaNs introduced self.assertEqual(reindexed_bool.dtype, np.object_) # NO NaNs introduced reindexed_bool = bool_ts.reindex(bool_ts.index[::2]) self.assertEqual(reindexed_bool.dtype, np.bool_) def test_reindex_bool_pad(self): # fail ts = self.ts[5:] bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index) filled_bool = bool_ts.reindex(self.ts.index, method='pad') self.assertTrue(isnull(filled_bool[:5]).all()) def test_reindex_like(self): other = self.ts[::2] assert_series_equal(self.ts.reindex(other.index), self.ts.reindex_like(other)) # GH 7179 day1 = datetime(2013,3,5) day2 = datetime(2013,5,5) day3 = datetime(2014,3,5) series1 = Series([5, None, None],[day1, day2, day3]) series2 = Series([None, None], [day1, day3]) result = series1.reindex_like(series2, method='pad') expected = Series([5, np.nan], index=[day1, day3]) assert_series_equal(result, expected) def test_reindex_fill_value(self): #------------------------------------------------------------ # floats floats = Series([1., 2., 3.]) result = floats.reindex([1, 2, 3]) expected = Series([2., 3., np.nan], index=[1, 2, 3]) assert_series_equal(result, expected) result = floats.reindex([1, 2, 3], fill_value=0) expected = Series([2., 3., 0], index=[1, 2, 3]) assert_series_equal(result, expected) #------------------------------------------------------------ # ints ints = Series([1, 2, 3]) result = ints.reindex([1, 2, 3]) expected = Series([2., 3., np.nan], index=[1, 2, 3]) assert_series_equal(result, expected) # don't upcast result = ints.reindex([1, 2, 3], fill_value=0) expected = Series([2, 3, 0], index=[1, 2, 3]) self.assertTrue(issubclass(result.dtype.type, np.integer)) assert_series_equal(result, expected) #------------------------------------------------------------ # objects objects = Series([1, 2, 3], dtype=object) result = objects.reindex([1, 2, 3]) expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object) assert_series_equal(result, expected) result = objects.reindex([1, 2, 3], fill_value='foo') expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object) assert_series_equal(result, expected) #------------------------------------------------------------ # bools bools = Series([True, False, True]) result = bools.reindex([1, 2, 3]) expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object) assert_series_equal(result, expected) result = bools.reindex([1, 2, 3], fill_value=False) expected = Series([False, True, False], index=[1, 2, 3]) assert_series_equal(result, expected) def test_rename(self): renamer = lambda x: x.strftime('%Y%m%d') renamed = self.ts.rename(renamer) self.assertEqual(renamed.index[0], renamer(self.ts.index[0])) # dict rename_dict = dict(zip(self.ts.index, renamed.index)) renamed2 = self.ts.rename(rename_dict) assert_series_equal(renamed, renamed2) # partial dict s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64') renamed = s.rename({'b': 'foo', 'd': 'bar'}) self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar']) # index with name renamer = Series( np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64') renamed = renamer.rename({}) self.assertEqual(renamed.index.name, renamer.index.name) def test_rename_inplace(self): renamer = lambda x: x.strftime('%Y%m%d') expected = renamer(self.ts.index[0]) self.ts.rename(renamer, inplace=True) self.assertEqual(self.ts.index[0], expected) def test_preserveRefs(self): seq = self.ts[[5, 10, 15]] seq[1] = np.NaN self.assertFalse(np.isnan(self.ts[10])) def test_ne(self): ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) expected = [True, True, False, True, True] self.assertTrue(tm.equalContents(ts.index != 5, expected)) self.assertTrue(tm.equalContents(~(ts.index == 5), expected)) def test_pad_nan(self): x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'], dtype=float) x.fillna(method='pad', inplace=True) expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0], ['z', 'a', 'b', 'c', 'd'], dtype=float) assert_series_equal(x[1:], expected[1:]) self.assertTrue(np.isnan(x[0]), np.isnan(expected[0])) def test_unstack(self): from numpy import nan from pandas.util.testing import assert_frame_equal index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']], labels=[[1, 1, 0, 0], [0, 1, 0, 2]]) s = Series(np.arange(4.), index=index) unstacked = s.unstack() expected = DataFrame([[2., nan, 3.], [0., 1., nan]], index=['bar', 'foo'], columns=['one', 'three', 'two']) assert_frame_equal(unstacked, expected) unstacked = s.unstack(level=0) assert_frame_equal(unstacked, expected.T) index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]]) s = Series(np.random.randn(6), index=index) exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]], labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]]) expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0) unstacked = s.unstack(0) assert_frame_equal(unstacked, expected) # GH5873 idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]]) ts = pd.Series([1,2], index=idx) left = ts.unstack() right = DataFrame([[nan, 1], [2, nan]], index=[101, 102], columns=[nan, 3.5]) print(left) print(right) assert_frame_equal(left, right) idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'], ['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]]) ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx) right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]], columns=['cat', 'dog']) tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)] right.index = pd.MultiIndex.from_tuples(tpls) assert_frame_equal(ts.unstack(level=0), right) def test_sortlevel(self): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] res = s.sortlevel('A') assert_series_equal(backwards, res) res = s.sortlevel(['A', 'B']) assert_series_equal(backwards, res) res = s.sortlevel('A', sort_remaining=False) assert_series_equal(s, res) res = s.sortlevel(['A', 'B'], sort_remaining=False) assert_series_equal(s, res) def test_head_tail(self): assert_series_equal(self.series.head(), self.series[:5]) assert_series_equal(self.series.tail(), self.series[-5:]) def test_isin(self): s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C']) result = s.isin(['A', 'C']) expected = Series([True, False, True, False, False, False, True, True]) assert_series_equal(result, expected) def test_isin_with_string_scalar(self): # GH4763 s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C']) with tm.assertRaises(TypeError): s.isin('a') with tm.assertRaises(TypeError): s = Series(['aaa', 'b', 'c']) s.isin('aaa') def test_isin_with_i8(self): # GH 5021 expected = Series([True,True,False,False,False]) expected2 = Series([False,True,False,False,False]) # datetime64[ns] s = Series(date_range('jan-01-2013','jan-05-2013')) result = s.isin(s[0:2]) assert_series_equal(result, expected) result = s.isin(s[0:2].values) assert_series_equal(result, expected) # fails on dtype conversion in the first place result = s.isin(s[0:2].values.astype('datetime64[D]')) assert_series_equal(result, expected) result = s.isin([s[1]]) assert_series_equal(result, expected2) result = s.isin([np.datetime64(s[1])]) assert_series_equal(result, expected2) # timedelta64[ns] s = Series(pd.to_timedelta(lrange(5),unit='d')) result = s.isin(s[0:2]) assert_series_equal(result, expected) #------------------------------------------------------------------------------ # TimeSeries-specific def test_cummethods_bool(self): # GH 6270 # looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2 def cummin(x): return np.minimum.accumulate(x) def cummax(x): return np.maximum.accumulate(x) a = pd.Series([False, False, False, True, True, False, False]) b = ~a c = pd.Series([False] * len(b)) d = ~c methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod, 'cummin': cummin, 'cummax': cummax} args = product((a, b, c, d), methods) for s, method in args: expected = Series(methods[method](s.values)) result = getattr(s, method)() assert_series_equal(result, expected) e = pd.Series([False, True, nan, False]) cse = pd.Series([0, 1, nan, 1], dtype=object) cpe = pd.Series([False, 0, nan, 0]) cmin = pd.Series([False, False, nan, False]) cmax = pd.Series([False, True, nan, True]) expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin, 'cummax': cmax} for method in methods: res = getattr(e, method)() assert_series_equal(res, expecteds[method]) def test_replace(self): N = 100 ser = Series(np.random.randn(N)) ser[0:4] = np.nan ser[6:10] = 0 # replace list with a single value ser.replace([np.nan], -1, inplace=True) exp = ser.fillna(-1) assert_series_equal(ser, exp) rs = ser.replace(0., np.nan) ser[ser == 0.] = np.nan assert_series_equal(rs, ser) ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object) ser[:5] = np.nan ser[6:10] = 'foo' ser[20:30] = 'bar' # replace list with a single value rs = ser.replace([np.nan, 'foo', 'bar'], -1) self.assertTrue((rs[:5] == -1).all()) self.assertTrue((rs[6:10] == -1).all()) self.assertTrue((rs[20:30] == -1).all()) self.assertTrue((isnull(ser[:5])).all()) # replace with different values rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3}) self.assertTrue((rs[:5] == -1).all()) self.assertTrue((rs[6:10] == -2).all()) self.assertTrue((rs[20:30] == -3).all()) self.assertTrue((isnull(ser[:5])).all()) # replace with different values with 2 lists rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3]) assert_series_equal(rs, rs2) # replace inplace ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True) self.assertTrue((ser[:5] == -1).all()) self.assertTrue((ser[6:10] == -1).all()) self.assertTrue((ser[20:30] == -1).all()) ser = Series([np.nan, 0, np.inf]) assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT]) assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) filled = ser.copy() filled[4] = 0 assert_series_equal(ser.replace(np.inf, 0), filled) ser = Series(self.ts.index) assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0)) # malformed self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0]) # make sure that we aren't just masking a TypeError because bools don't # implement indexing with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'): ser.replace([1, 2], [np.nan, 0]) ser = Series([0, 1, 2, 3, 4]) result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0]) assert_series_equal(result, Series([4, 3, 2, 1, 0])) # API change from 0.12? # GH 5319 ser = Series([0, np.nan, 2, 3, 4]) expected = ser.ffill() result = ser.replace([np.nan]) assert_series_equal(result, expected) ser = Series([0, np.nan, 2, 3, 4]) expected = ser.ffill() result = ser.replace(np.nan) assert_series_equal(result, expected) #GH 5797 ser = Series(date_range('20130101', periods=5)) expected = ser.copy() expected.loc[2] = Timestamp('20120101') result = ser.replace({Timestamp('20130103'): Timestamp('20120101')}) assert_series_equal(result, expected) result = ser.replace(Timestamp('20130103'), Timestamp('20120101')) assert_series_equal(result, expected) def test_replace_with_single_list(self): ser = Series([0, 1, 2, 3, 4]) result = ser.replace([1,2,3]) assert_series_equal(result, Series([0,0,0,0,4])) s = ser.copy() s.replace([1,2,3],inplace=True) assert_series_equal(s, Series([0,0,0,0,4])) # make sure things don't get corrupted when fillna call fails s = ser.copy() with tm.assertRaises(ValueError): s.replace([1,2,3],inplace=True,method='crash_cymbal') assert_series_equal(s, ser) def test_replace_mixed_types(self): s = Series(np.arange(5),dtype='int64') def check_replace(to_rep, val, expected): sc = s.copy() r = s.replace(to_rep, val) sc.replace(to_rep, val, inplace=True) assert_series_equal(expected, r) assert_series_equal(expected, sc) # should NOT upcast to float e = Series([0,1,2,3,4]) tr, v = [3], [3.0] check_replace(tr, v, e) # MUST upcast to float e = Series([0,1,2,3.5,4]) tr, v = [3], [3.5] check_replace(tr, v, e) # casts to object e = Series([0,1,2,3.5,'a']) tr, v = [3,4], [3.5,'a'] check_replace(tr, v, e) # again casts to object e = Series([0,1,2,3.5,Timestamp('20130101')]) tr, v = [3,4],[3.5,Timestamp('20130101')] check_replace(tr, v, e) # casts to float e = Series([0,1,2,3.5,1]) tr, v = [3,4],[3.5,True] check_replace(tr, v, e) # test an object with dates + floats + integers + strings dr = date_range('1/1/2001', '1/10/2001', freq='D').to_series().reset_index(drop=True) result = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a']) expected = Series([1.0,2,'a'] + dr[3:].tolist(),dtype=object) assert_series_equal(result, expected) def test_replace_bool_with_string_no_op(self): s = Series([True, False, True]) result = s.replace('fun', 'in-the-sun') tm.assert_series_equal(s, result) def test_replace_bool_with_string(self): # nonexistent elements s = Series([True, False, True]) result = s.replace(True, '2u') expected = Series(['2u', False, '2u']) tm.assert_series_equal(expected, result) def test_replace_bool_with_bool(self): s = Series([True, False, True]) result = s.replace(True, False) expected = Series([False] * len(s)) tm.assert_series_equal(expected, result) def test_replace_with_dict_with_bool_keys(self): s = Series([True, False, True]) with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'): s.replace({'asdf': 'asdb', True: 'yes'}) def test_asfreq(self): ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30), datetime(2009, 11, 30), datetime(2009, 12, 31)]) daily_ts = ts.asfreq('B') monthly_ts = daily_ts.asfreq('BM') self.assert_numpy_array_equal(monthly_ts, ts) daily_ts = ts.asfreq('B', method='pad') monthly_ts = daily_ts.asfreq('BM') self.assert_numpy_array_equal(monthly_ts, ts) daily_ts = ts.asfreq(datetools.bday) monthly_ts = daily_ts.asfreq(datetools.bmonthEnd) self.assert_numpy_array_equal(monthly_ts, ts) result = ts[:0].asfreq('M') self.assertEqual(len(result), 0) self.assertIsNot(result, ts) def test_diff(self): # Just run the function self.ts.diff() # int dtype a = 10000000000000000 b = a + 1 s = Series([a, b]) rs = s.diff() self.assertEqual(rs[1], 1) # neg n rs = self.ts.diff(-1) xp = self.ts - self.ts.shift(-1) assert_series_equal(rs, xp) # 0 rs = self.ts.diff(0) xp = self.ts - self.ts assert_series_equal(rs, xp) # datetime diff (GH3100) s = Series(date_range('20130102', periods=5)) rs = s - s.shift(1) xp = s.diff() assert_series_equal(rs, xp) # timedelta diff nrs = rs - rs.shift(1) nxp = xp.diff() assert_series_equal(nrs, nxp) # with tz s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'), name='foo') result = s.diff() assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo')) def test_pct_change(self): rs = self.ts.pct_change(fill_method=None) assert_series_equal(rs, self.ts / self.ts.shift(1) - 1) rs = self.ts.pct_change(2) filled = self.ts.fillna(method='pad') assert_series_equal(rs, filled / filled.shift(2) - 1) rs = self.ts.pct_change(fill_method='bfill', limit=1) filled = self.ts.fillna(method='bfill', limit=1) assert_series_equal(rs, filled / filled.shift(1) - 1) rs = self.ts.pct_change(freq='5D') filled = self.ts.fillna(method='pad') assert_series_equal(rs, filled / filled.shift(freq='5D') - 1) def test_pct_change_shift_over_nas(self): s = Series([1., 1.5, np.nan, 2.5, 3.]) chg = s.pct_change() expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2]) assert_series_equal(chg, expected) def test_autocorr(self): # Just run the function corr1 = self.ts.autocorr() # Now run it with the lag parameter corr2 = self.ts.autocorr(lag=1) # corr() with lag needs Series of at least length 2 if len(self.ts) <= 2: self.assertTrue(np.isnan(corr1)) self.assertTrue(np.isnan(corr2)) else: self.assertEqual(corr1, corr2) # Choose a random lag between 1 and length of Series - 2 # and compare the result with the Series corr() function n = 1 + np.random.randint(max(1, len(self.ts) - 2)) corr1 = self.ts.corr(self.ts.shift(n)) corr2 = self.ts.autocorr(lag=n) # corr() with lag needs Series of at least length 2 if len(self.ts) <= 2: self.assertTrue(np.isnan(corr1)) self.assertTrue(np.isnan(corr2)) else: self.assertEqual(corr1, corr2) def test_first_last_valid(self): ts = self.ts.copy() ts[:5] = np.NaN index = ts.first_valid_index() self.assertEqual(index, ts.index[5]) ts[-5:] = np.NaN index = ts.last_valid_index() self.assertEqual(index, ts.index[-6]) ts[:] = np.nan self.assertIsNone(ts.last_valid_index()) self.assertIsNone(ts.first_valid_index()) ser = Series([], index=[]) self.assertIsNone(ser.last_valid_index()) self.assertIsNone(ser.first_valid_index()) def test_mpl_compat_hack(self): result = self.ts[:, np.newaxis] expected = self.ts.values[:, np.newaxis] assert_almost_equal(result, expected) #------------------------------------------------------------------------------ # GroupBy def test_select(self): n = len(self.ts) result = self.ts.select(lambda x: x >= self.ts.index[n // 2]) expected = self.ts.reindex(self.ts.index[n // 2:]) assert_series_equal(result, expected) result = self.ts.select(lambda x: x.weekday() == 2) expected = self.ts[self.ts.index.weekday == 2] assert_series_equal(result, expected) #------------------------------------------------------------------------------ # Misc not safe for sparse def test_dropna_preserve_name(self): self.ts[:5] = np.nan result = self.ts.dropna() self.assertEqual(result.name, self.ts.name) name = self.ts.name ts = self.ts.copy() ts.dropna(inplace=True) self.assertEqual(ts.name, name) def test_numpy_unique(self): # it works! result = np.unique(self.ts) def test_concat_empty_series_dtypes_roundtrips(self): # round-tripping with self & like self dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]']) for dtype in dtypes: self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype) self.assertEqual(pd.concat([Series(dtype=dtype), Series(dtype=dtype)]).dtype, dtype) def int_result_type(dtype, dtype2): typs = set([dtype.kind,dtype2.kind]) if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'): return 'i' elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'): return 'u' return None def float_result_type(dtype, dtype2): typs = set([dtype.kind,dtype2.kind]) if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'): return 'f' return None def get_result_type(dtype, dtype2): result = float_result_type(dtype, dtype2) if result is not None: return result result = int_result_type(dtype, dtype2) if result is not None: return result return 'O' for dtype in dtypes: for dtype2 in dtypes: if dtype == dtype2: continue expected = get_result_type(dtype, dtype2) result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype self.assertEqual(result.kind, expected) def test_concat_empty_series_dtypes(self): # bools self.assertEqual(pd.concat([Series(dtype=np.bool_), Series(dtype=np.int32)]).dtype, np.int32) self.assertEqual(pd.concat([Series(dtype=np.bool_), Series(dtype=np.float32)]).dtype, np.object_) # datetimelike self.assertEqual(pd.concat([Series(dtype='m8[ns]'), Series(dtype=np.bool)]).dtype, np.object_) self.assertEqual(pd.concat([Series(dtype='m8[ns]'), Series(dtype=np.int64)]).dtype, np.object_) self.assertEqual(pd.concat([Series(dtype='M8[ns]'), Series(dtype=np.bool)]).dtype, np.object_) self.assertEqual(pd.concat([Series(dtype='M8[ns]'), Series(dtype=np.int64)]).dtype, np.object_) self.assertEqual(pd.concat([Series(dtype='M8[ns]'), Series(dtype=np.bool_), Series(dtype=np.int64)]).dtype, np.object_) # categorical self.assertEqual(pd.concat([Series(dtype='category'), Series(dtype='category')]).dtype, 'category') self.assertEqual(pd.concat([Series(dtype='category'), Series(dtype='float64')]).dtype, np.object_) self.assertEqual(pd.concat([Series(dtype='category'), Series(dtype='object')]).dtype, 'category') # sparse result = pd.concat([Series(dtype='float64').to_sparse(), Series(dtype='float64').to_sparse()]) self.assertEqual(result.dtype,np.float64) self.assertEqual(result.ftype,'float64:sparse') result = pd.concat([Series(dtype='float64').to_sparse(), Series(dtype='float64')]) self.assertEqual(result.dtype,np.float64) self.assertEqual(result.ftype,'float64:sparse') result = pd.concat([Series(dtype='float64').to_sparse(), Series(dtype='object')]) self.assertEqual(result.dtype,np.object_) self.assertEqual(result.ftype,'object:dense') def test_searchsorted_numeric_dtypes_scalar(self): s = Series([1, 2, 90, 1000, 3e9]) r = s.searchsorted(30) e = 2 tm.assert_equal(r, e) r = s.searchsorted([30]) e = np.array([2]) tm.assert_numpy_array_equal(r, e) def test_searchsorted_numeric_dtypes_vector(self): s = Series([1, 2, 90, 1000, 3e9]) r = s.searchsorted([91, 2e6]) e = np.array([3, 4]) tm.assert_numpy_array_equal(r, e) def test_search_sorted_datetime64_scalar(self): s = Series(pd.date_range('20120101', periods=10, freq='2D')) v = pd.Timestamp('20120102') r = s.searchsorted(v) e = 1 tm.assert_equal(r, e) def test_search_sorted_datetime64_list(self): s = Series(pd.date_range('20120101', periods=10, freq='2D')) v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')] r = s.searchsorted(v) e = np.array([1, 2]) tm.assert_numpy_array_equal(r, e) def test_searchsorted_sorter(self): # GH8490 s = Series([3, 1, 2]) r = s.searchsorted([0, 3], sorter=np.argsort(s)) e = np.array([0, 2]) tm.assert_numpy_array_equal(r, e) def test_to_frame_expanddim(self): # GH 9762 class SubclassedSeries(Series): @property def _constructor_expanddim(self): return SubclassedFrame class SubclassedFrame(DataFrame): pass s = SubclassedSeries([1, 2, 3], name='X') result = s.to_frame() self.assertTrue(isinstance(result, SubclassedFrame)) expected = SubclassedFrame({'X': [1, 2, 3]}) assert_frame_equal(result, expected) class TestSeriesNonUnique(tm.TestCase): _multiprocess_can_split_ = True def setUp(self): pass def test_basic_indexing(self): s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b']) self.assertRaises(IndexError, s.__getitem__, 5) self.assertRaises(IndexError, s.__setitem__, 5, 0) self.assertRaises(KeyError, s.__getitem__, 'c') s = s.sort_index() self.assertRaises(IndexError, s.__getitem__, 5) self.assertRaises(IndexError, s.__setitem__, 5, 0) def test_int_indexing(self): s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2]) self.assertRaises(KeyError, s.__getitem__, 5) self.assertRaises(KeyError, s.__getitem__, 'c') # not monotonic s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1]) self.assertRaises(KeyError, s.__getitem__, 5) self.assertRaises(KeyError, s.__getitem__, 'c') def test_datetime_indexing(self): from pandas import date_range index = date_range('1/1/2000', '1/7/2000') index = index.repeat(3) s = Series(len(index), index=index) stamp = Timestamp('1/8/2000') self.assertRaises(KeyError, s.__getitem__, stamp) s[stamp] = 0 self.assertEqual(s[stamp], 0) # not monotonic s = Series(len(index), index=index) s = s[::-1] self.assertRaises(KeyError, s.__getitem__, stamp) s[stamp] = 0 self.assertEqual(s[stamp], 0) def test_reset_index(self): df = tm.makeDataFrame()[:5] ser = df.stack() ser.index.names = ['hash', 'category'] ser.name = 'value' df = ser.reset_index() self.assertIn('value', df) df = ser.reset_index(name='value2') self.assertIn('value2', df) # check inplace s = ser.reset_index(drop=True) s2 = ser s2.reset_index(drop=True, inplace=True) assert_series_equal(s, s2) # level index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]]) s = Series(np.random.randn(6), index=index) rs = s.reset_index(level=1) self.assertEqual(len(rs.columns), 2) rs = s.reset_index(level=[0, 2], drop=True) self.assertTrue(rs.index.equals(Index(index.get_level_values(1)))) tm.assertIsInstance(rs, Series) def test_set_index_makes_timeseries(self): idx = tm.makeDateIndex(10) s = Series(lrange(10)) s.index = idx with tm.assert_produces_warning(FutureWarning): self.assertTrue(s.is_time_series == True) self.assertTrue(s.index.is_all_dates == True) def test_timeseries_coercion(self): idx = tm.makeDateIndex(10000) ser = Series(np.random.randn(len(idx)), idx.astype(object)) with tm.assert_produces_warning(FutureWarning): self.assertTrue(ser.is_time_series) self.assertTrue(ser.index.is_all_dates) self.assertIsInstance(ser.index, DatetimeIndex) def test_replace(self): N = 100 ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object) ser[:5] = np.nan ser[6:10] = 'foo' ser[20:30] = 'bar' # replace list with a single value rs = ser.replace([np.nan, 'foo', 'bar'], -1) self.assertTrue((rs[:5] == -1).all()) self.assertTrue((rs[6:10] == -1).all()) self.assertTrue((rs[20:30] == -1).all()) self.assertTrue((isnull(ser[:5])).all()) # replace with different values rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3}) self.assertTrue((rs[:5] == -1).all()) self.assertTrue((rs[6:10] == -2).all()) self.assertTrue((rs[20:30] == -3).all()) self.assertTrue((isnull(ser[:5])).all()) # replace with different values with 2 lists rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3]) assert_series_equal(rs, rs2) # replace inplace ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True) self.assertTrue((ser[:5] == -1).all()) self.assertTrue((ser[6:10] == -1).all()) self.assertTrue((ser[20:30] == -1).all()) def test_repeat(self): s = Series(np.random.randn(3), index=['a', 'b', 'c']) reps = s.repeat(5) exp = Series(s.values.repeat(5), index=s.index.values.repeat(5)) assert_series_equal(reps, exp) to_rep = [2, 3, 4] reps = s.repeat(to_rep) exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep)) assert_series_equal(reps, exp) def test_unique_data_ownership(self): # it works! #1807 Series(Series(["a", "c", "b"]).unique()).sort_values() def test_datetime_timedelta_quantiles(self): # covers #9694 self.assertTrue(pd.isnull(Series([],dtype='M8[ns]').quantile(.5))) self.assertTrue(pd.isnull(Series([],dtype='m8[ns]').quantile(.5))) def test_empty_timeseries_redections_return_nat(self): # covers #11245 for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'): self.assertIs(Series([], dtype=dtype).min(), pd.NaT) self.assertIs(Series([], dtype=dtype).max(), pd.NaT) if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
mit
IssamLaradji/scikit-learn
examples/classification/plot_digits_classification.py
289
2397
""" ================================ Recognizing hand-written digits ================================ An example showing how the scikit-learn can be used to recognize images of hand-written digits. This example is commented in the :ref:`tutorial section of the user manual <introduction>`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, metrics # The digits dataset digits = datasets.load_digits() # The data that we are interested in is made of 8x8 images of digits, let's # have a look at the first 3 images, stored in the `images` attribute of the # dataset. If we were working from image files, we could load them using # pylab.imread. Note that each image must have the same size. For these # images, we know which digit they represent: it is given in the 'target' of # the dataset. images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % label) # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier classifier = svm.SVC(gamma=0.001) # We learn the digits on the first half of the digits classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2]) # Now predict the value of the digit on the second half: expected = digits.target[n_samples / 2:] predicted = classifier.predict(data[n_samples / 2:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2, 4, index + 5) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' % prediction) plt.show()
bsd-3-clause
andyraib/data-storage
python_scripts/env/lib/python3.6/site-packages/matplotlib/delaunay/interpolate.py
21
7262
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import numpy as np from matplotlib._delaunay import compute_planes, linear_interpolate_grid from matplotlib._delaunay import nn_interpolate_grid from matplotlib._delaunay import nn_interpolate_unstructured __all__ = ['LinearInterpolator', 'NNInterpolator'] def slice2gridspec(key): """Convert a 2-tuple of slices to start,stop,steps for x and y. key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep)) For now, the only accepted step values are imaginary integers (interpreted in the same way numpy.mgrid, etc. do). """ if ((len(key) != 2) or (not isinstance(key[0], slice)) or (not isinstance(key[1], slice))): raise ValueError("only 2-D slices, please") x0 = key[1].start x1 = key[1].stop xstep = key[1].step if not isinstance(xstep, complex) or int(xstep.real) != xstep.real: raise ValueError("only the [start:stop:numsteps*1j] form supported") xstep = int(xstep.imag) y0 = key[0].start y1 = key[0].stop ystep = key[0].step if not isinstance(ystep, complex) or int(ystep.real) != ystep.real: raise ValueError("only the [start:stop:numsteps*1j] form supported") ystep = int(ystep.imag) return x0, x1, xstep, y0, y1, ystep class LinearInterpolator(object): """Interpolate a function defined on the nodes of a triangulation by using the planes defined by the three function values at each corner of the triangles. LinearInterpolator(triangulation, z, default_value=numpy.nan) triangulation -- Triangulation instance z -- the function values at each node of the triangulation default_value -- a float giving the default value should the interpolating point happen to fall outside of the convex hull of the triangulation At the moment, the only regular rectangular grids are supported for interpolation. vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j] vals would then be a (ysteps, xsteps) array containing the interpolated values. These arguments are interpreted the same way as numpy.mgrid. Attributes: planes -- (ntriangles, 3) array of floats specifying the plane for each triangle. Linear Interpolation -------------------- Given the Delauany triangulation (or indeed *any* complete triangulation) we can interpolate values inside the convex hull by locating the enclosing triangle of the interpolation point and returning the value at that point of the plane defined by the three node values. f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2] The interpolated function is C0 continuous across the convex hull of the input points. It is C1 continuous across the convex hull except for the nodes and the edges of the triangulation. """ def __init__(self, triangulation, z, default_value=np.nan): self.triangulation = triangulation self.z = np.asarray(z, dtype=np.float64) self.default_value = default_value self.planes = compute_planes(triangulation.x, triangulation.y, self.z, triangulation.triangle_nodes) def __getitem__(self, key): x0, x1, xstep, y0, y1, ystep = slice2gridspec(key) grid = linear_interpolate_grid( x0, x1, xstep, y0, y1, ystep, self.default_value, self.planes, self.triangulation.x, self.triangulation.y, self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors) return grid class NNInterpolator(object): """Interpolate a function defined on the nodes of a triangulation by the natural neighbors method. NNInterpolator(triangulation, z, default_value=numpy.nan) triangulation -- Triangulation instance z -- the function values at each node of the triangulation default_value -- a float giving the default value should the interpolating point happen to fall outside of the convex hull of the triangulation At the moment, the only regular rectangular grids are supported for interpolation. vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j] vals would then be a (ysteps, xsteps) array containing the interpolated values. These arguments are interpreted the same way as numpy.mgrid. Natural Neighbors Interpolation ------------------------------- One feature of the Delaunay triangulation is that for each triangle, its circumcircle contains no other point (although in degenerate cases, like squares, other points may be *on* the circumcircle). One can also construct what is called the Voronoi diagram from a Delaunay triangulation by connecting the circumcenters of the triangles to those of their neighbors to form a tesselation of irregular polygons covering the plane and containing only one node from the triangulation. Each point in one node's Voronoi polygon is closer to that node than any other node. To compute the Natural Neighbors interpolant, we consider adding the interpolation point to the triangulation. We define the natural neighbors of this point as the set of nodes participating in Delaunay triangles whose circumcircles contain the point. To restore the Delaunay-ness of the triangulation, one would only have to alter those triangles and Voronoi polygons. The new Voronoi diagram would have a polygon around the inserted point. This polygon would "steal" area from the original Voronoi polygons. For each node i in the natural neighbors set, we compute the area stolen from its original Voronoi polygon, stolen[i]. We define the natural neighbors coordinates phi[i] = stolen[i] / sum(stolen,axis=0) We then use these phi[i] to weight the corresponding function values from the input data z to compute the interpolated value. The interpolated surface is C1-continuous except at the nodes themselves across the convex hull of the input points. One can find the set of points that a given node will affect by computing the union of the areas covered by the circumcircles of each Delaunay triangle that node participates in. """ def __init__(self, triangulation, z, default_value=np.nan): self.triangulation = triangulation self.z = np.asarray(z, dtype=np.float64) self.default_value = default_value def __getitem__(self, key): x0, x1, xstep, y0, y1, ystep = slice2gridspec(key) grid = nn_interpolate_grid( x0, x1, xstep, y0, y1, ystep, self.default_value, self.triangulation.x, self.triangulation.y, self.z, self.triangulation.circumcenters, self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors) return grid def __call__(self, intx, inty): intz = nn_interpolate_unstructured(intx, inty, self.default_value, self.triangulation.x, self.triangulation.y, self.z, self.triangulation.circumcenters, self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors) return intz
apache-2.0
tobiasgehring/qudi
gui/colordefs.py
3
59164
# -*- coding: utf-8 -*- """ This file contains the Qudi GUI module utility classes. Qudi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Qudi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Qudi. If not, see <http://www.gnu.org/licenses/>. Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> """ import numpy as np import pyqtgraph as pg class ColorScale(): """ Custom color scale for use in Qudi. You need to add two numpy arrays, COLORS and COLORS_INV when subclassing """ def __init__(self): color_positions = np.linspace(0.0, 1.0, num=len(self.COLORS)) self.colormap = pg.ColorMap(color_positions, self.COLORS.astype(int)) self.cmap_normed = pg.ColorMap(color_positions, self.COLORS_INV / 255) # get the LookUpTable (LUT), first two params should match the position # scale extremes passed to ColorMap(). # Return an RGB(A) lookup table (ndarray). Insert starting and stopping # value and the number of points in the returned lookup table: self.lut = self.colormap.getLookupTable(0, 1, 2000) class ColorScaleRainbow(ColorScale): """ Define the color map that goes from dark blue to bright red. Looks gay but is not preferable for a number of reasons: brightness linearity, visual banding, red-green colorblindness problems, etc. See the matplotlib discussion about their default color scale for reference. """ COLORS = np.array([ [ 0, 0, 132, 255], [ 0, 0, 241, 255], [ 0, 88, 255, 255], [ 0, 204, 255, 255], [ 66, 255, 149, 255], [160, 255, 86, 255], [254, 237, 0, 255], [255, 129, 0, 255], [255, 26, 0, 255] ], dtype=np.ubyte) COLORS_INV = COLORS[::-1] # Shamelessly stolen from https://bids.github.io/colormap/ # New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt, # and (in the case of viridis) Eric Firing. # # This file and the colormaps in it are released under the CC0 license / # public domain dedication. We would appreciate credit if you use or # redistribute these colormaps, but do not impose any legal restrictions. # # To the extent possible under law, the persons who associated CC0 with # mpl-colormaps have waived all copyright and related or neighboring rights # to mpl-colormaps. # # You should have received a copy of the CC0 legalcode along with this # work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. class ColorScaleMagma(ColorScale): """ Magma color scale proposal for matplotlib default color scale """ COLORS = np.array([ [0.001462, 0.000466, 0.013866, 1.0], [0.002258, 0.001295, 0.018331, 1.0], [0.003279, 0.002305, 0.023708, 1.0], [0.004512, 0.003490, 0.029965, 1.0], [0.005950, 0.004843, 0.037130, 1.0], [0.007588, 0.006356, 0.044973, 1.0], [0.009426, 0.008022, 0.052844, 1.0], [0.011465, 0.009828, 0.060750, 1.0], [0.013708, 0.011771, 0.068667, 1.0], [0.016156, 0.013840, 0.076603, 1.0], [0.018815, 0.016026, 0.084584, 1.0], [0.021692, 0.018320, 0.092610, 1.0], [0.024792, 0.020715, 0.100676, 1.0], [0.028123, 0.023201, 0.108787, 1.0], [0.031696, 0.025765, 0.116965, 1.0], [0.035520, 0.028397, 0.125209, 1.0], [0.039608, 0.031090, 0.133515, 1.0], [0.043830, 0.033830, 0.141886, 1.0], [0.048062, 0.036607, 0.150327, 1.0], [0.052320, 0.039407, 0.158841, 1.0], [0.056615, 0.042160, 0.167446, 1.0], [0.060949, 0.044794, 0.176129, 1.0], [0.065330, 0.047318, 0.184892, 1.0], [0.069764, 0.049726, 0.193735, 1.0], [0.074257, 0.052017, 0.202660, 1.0], [0.078815, 0.054184, 0.211667, 1.0], [0.083446, 0.056225, 0.220755, 1.0], [0.088155, 0.058133, 0.229922, 1.0], [0.092949, 0.059904, 0.239164, 1.0], [0.097833, 0.061531, 0.248477, 1.0], [0.102815, 0.063010, 0.257854, 1.0], [0.107899, 0.064335, 0.267289, 1.0], [0.113094, 0.065492, 0.276784, 1.0], [0.118405, 0.066479, 0.286321, 1.0], [0.123833, 0.067295, 0.295879, 1.0], [0.129380, 0.067935, 0.305443, 1.0], [0.135053, 0.068391, 0.315000, 1.0], [0.140858, 0.068654, 0.324538, 1.0], [0.146785, 0.068738, 0.334011, 1.0], [0.152839, 0.068637, 0.343404, 1.0], [0.159018, 0.068354, 0.352688, 1.0], [0.165308, 0.067911, 0.361816, 1.0], [0.171713, 0.067305, 0.370771, 1.0], [0.178212, 0.066576, 0.379497, 1.0], [0.184801, 0.065732, 0.387973, 1.0], [0.191460, 0.064818, 0.396152, 1.0], [0.198177, 0.063862, 0.404009, 1.0], [0.204935, 0.062907, 0.411514, 1.0], [0.211718, 0.061992, 0.418647, 1.0], [0.218512, 0.061158, 0.425392, 1.0], [0.225302, 0.060445, 0.431742, 1.0], [0.232077, 0.059889, 0.437695, 1.0], [0.238826, 0.059517, 0.443256, 1.0], [0.245543, 0.059352, 0.448436, 1.0], [0.252220, 0.059415, 0.453248, 1.0], [0.258857, 0.059706, 0.457710, 1.0], [0.265447, 0.060237, 0.461840, 1.0], [0.271994, 0.060994, 0.465660, 1.0], [0.278493, 0.061978, 0.469190, 1.0], [0.284951, 0.063168, 0.472451, 1.0], [0.291366, 0.064553, 0.475462, 1.0], [0.297740, 0.066117, 0.478243, 1.0], [0.304081, 0.067835, 0.480812, 1.0], [0.310382, 0.069702, 0.483186, 1.0], [0.316654, 0.071690, 0.485380, 1.0], [0.322899, 0.073782, 0.487408, 1.0], [0.329114, 0.075972, 0.489287, 1.0], [0.335308, 0.078236, 0.491024, 1.0], [0.341482, 0.080564, 0.492631, 1.0], [0.347636, 0.082946, 0.494121, 1.0], [0.353773, 0.085373, 0.495501, 1.0], [0.359898, 0.087831, 0.496778, 1.0], [0.366012, 0.090314, 0.497960, 1.0], [0.372116, 0.092816, 0.499053, 1.0], [0.378211, 0.095332, 0.500067, 1.0], [0.384299, 0.097855, 0.501002, 1.0], [0.390384, 0.100379, 0.501864, 1.0], [0.396467, 0.102902, 0.502658, 1.0], [0.402548, 0.105420, 0.503386, 1.0], [0.408629, 0.107930, 0.504052, 1.0], [0.414709, 0.110431, 0.504662, 1.0], [0.420791, 0.112920, 0.505215, 1.0], [0.426877, 0.115395, 0.505714, 1.0], [0.432967, 0.117855, 0.506160, 1.0], [0.439062, 0.120298, 0.506555, 1.0], [0.445163, 0.122724, 0.506901, 1.0], [0.451271, 0.125132, 0.507198, 1.0], [0.457386, 0.127522, 0.507448, 1.0], [0.463508, 0.129893, 0.507652, 1.0], [0.469640, 0.132245, 0.507809, 1.0], [0.475780, 0.134577, 0.507921, 1.0], [0.481929, 0.136891, 0.507989, 1.0], [0.488088, 0.139186, 0.508011, 1.0], [0.494258, 0.141462, 0.507988, 1.0], [0.500438, 0.143719, 0.507920, 1.0], [0.506629, 0.145958, 0.507806, 1.0], [0.512831, 0.148179, 0.507648, 1.0], [0.519045, 0.150383, 0.507443, 1.0], [0.525270, 0.152569, 0.507192, 1.0], [0.531507, 0.154739, 0.506895, 1.0], [0.537755, 0.156894, 0.506551, 1.0], [0.544015, 0.159033, 0.506159, 1.0], [0.550287, 0.161158, 0.505719, 1.0], [0.556571, 0.163269, 0.505230, 1.0], [0.562866, 0.165368, 0.504692, 1.0], [0.569172, 0.167454, 0.504105, 1.0], [0.575490, 0.169530, 0.503466, 1.0], [0.581819, 0.171596, 0.502777, 1.0], [0.588158, 0.173652, 0.502035, 1.0], [0.594508, 0.175701, 0.501241, 1.0], [0.600868, 0.177743, 0.500394, 1.0], [0.607238, 0.179779, 0.499492, 1.0], [0.613617, 0.181811, 0.498536, 1.0], [0.620005, 0.183840, 0.497524, 1.0], [0.626401, 0.185867, 0.496456, 1.0], [0.632805, 0.187893, 0.495332, 1.0], [0.639216, 0.189921, 0.494150, 1.0], [0.645633, 0.191952, 0.492910, 1.0], [0.652056, 0.193986, 0.491611, 1.0], [0.658483, 0.196027, 0.490253, 1.0], [0.664915, 0.198075, 0.488836, 1.0], [0.671349, 0.200133, 0.487358, 1.0], [0.677786, 0.202203, 0.485819, 1.0], [0.684224, 0.204286, 0.484219, 1.0], [0.690661, 0.206384, 0.482558, 1.0], [0.697098, 0.208501, 0.480835, 1.0], [0.703532, 0.210638, 0.479049, 1.0], [0.709962, 0.212797, 0.477201, 1.0], [0.716387, 0.214982, 0.475290, 1.0], [0.722805, 0.217194, 0.473316, 1.0], [0.729216, 0.219437, 0.471279, 1.0], [0.735616, 0.221713, 0.469180, 1.0], [0.742004, 0.224025, 0.467018, 1.0], [0.748378, 0.226377, 0.464794, 1.0], [0.754737, 0.228772, 0.462509, 1.0], [0.761077, 0.231214, 0.460162, 1.0], [0.767398, 0.233705, 0.457755, 1.0], [0.773695, 0.236249, 0.455289, 1.0], [0.779968, 0.238851, 0.452765, 1.0], [0.786212, 0.241514, 0.450184, 1.0], [0.792427, 0.244242, 0.447543, 1.0], [0.798608, 0.247040, 0.444848, 1.0], [0.804752, 0.249911, 0.442102, 1.0], [0.810855, 0.252861, 0.439305, 1.0], [0.816914, 0.255895, 0.436461, 1.0], [0.822926, 0.259016, 0.433573, 1.0], [0.828886, 0.262229, 0.430644, 1.0], [0.834791, 0.265540, 0.427671, 1.0], [0.840636, 0.268953, 0.424666, 1.0], [0.846416, 0.272473, 0.421631, 1.0], [0.852126, 0.276106, 0.418573, 1.0], [0.857763, 0.279857, 0.415496, 1.0], [0.863320, 0.283729, 0.412403, 1.0], [0.868793, 0.287728, 0.409303, 1.0], [0.874176, 0.291859, 0.406205, 1.0], [0.879464, 0.296125, 0.403118, 1.0], [0.884651, 0.300530, 0.400047, 1.0], [0.889731, 0.305079, 0.397002, 1.0], [0.894700, 0.309773, 0.393995, 1.0], [0.899552, 0.314616, 0.391037, 1.0], [0.904281, 0.319610, 0.388137, 1.0], [0.908884, 0.324755, 0.385308, 1.0], [0.913354, 0.330052, 0.382563, 1.0], [0.917689, 0.335500, 0.379915, 1.0], [0.921884, 0.341098, 0.377376, 1.0], [0.925937, 0.346844, 0.374959, 1.0], [0.929845, 0.352734, 0.372677, 1.0], [0.933606, 0.358764, 0.370541, 1.0], [0.937221, 0.364929, 0.368567, 1.0], [0.940687, 0.371224, 0.366762, 1.0], [0.944006, 0.377643, 0.365136, 1.0], [0.947180, 0.384178, 0.363701, 1.0], [0.950210, 0.390820, 0.362468, 1.0], [0.953099, 0.397563, 0.361438, 1.0], [0.955849, 0.404400, 0.360619, 1.0], [0.958464, 0.411324, 0.360014, 1.0], [0.960949, 0.418323, 0.359630, 1.0], [0.963310, 0.425390, 0.359469, 1.0], [0.965549, 0.432519, 0.359529, 1.0], [0.967671, 0.439703, 0.359810, 1.0], [0.969680, 0.446936, 0.360311, 1.0], [0.971582, 0.454210, 0.361030, 1.0], [0.973381, 0.461520, 0.361965, 1.0], [0.975082, 0.468861, 0.363111, 1.0], [0.976690, 0.476226, 0.364466, 1.0], [0.978210, 0.483612, 0.366025, 1.0], [0.979645, 0.491014, 0.367783, 1.0], [0.981000, 0.498428, 0.369734, 1.0], [0.982279, 0.505851, 0.371874, 1.0], [0.983485, 0.513280, 0.374198, 1.0], [0.984622, 0.520713, 0.376698, 1.0], [0.985693, 0.528148, 0.379371, 1.0], [0.986700, 0.535582, 0.382210, 1.0], [0.987646, 0.543015, 0.385210, 1.0], [0.988533, 0.550446, 0.388365, 1.0], [0.989363, 0.557873, 0.391671, 1.0], [0.990138, 0.565296, 0.395122, 1.0], [0.990871, 0.572706, 0.398714, 1.0], [0.991558, 0.580107, 0.402441, 1.0], [0.992196, 0.587502, 0.406299, 1.0], [0.992785, 0.594891, 0.410283, 1.0], [0.993326, 0.602275, 0.414390, 1.0], [0.993834, 0.609644, 0.418613, 1.0], [0.994309, 0.616999, 0.422950, 1.0], [0.994738, 0.624350, 0.427397, 1.0], [0.995122, 0.631696, 0.431951, 1.0], [0.995480, 0.639027, 0.436607, 1.0], [0.995810, 0.646344, 0.441361, 1.0], [0.996096, 0.653659, 0.446213, 1.0], [0.996341, 0.660969, 0.451160, 1.0], [0.996580, 0.668256, 0.456192, 1.0], [0.996775, 0.675541, 0.461314, 1.0], [0.996925, 0.682828, 0.466526, 1.0], [0.997077, 0.690088, 0.471811, 1.0], [0.997186, 0.697349, 0.477182, 1.0], [0.997254, 0.704611, 0.482635, 1.0], [0.997325, 0.711848, 0.488154, 1.0], [0.997351, 0.719089, 0.493755, 1.0], [0.997351, 0.726324, 0.499428, 1.0], [0.997341, 0.733545, 0.505167, 1.0], [0.997285, 0.740772, 0.510983, 1.0], [0.997228, 0.747981, 0.516859, 1.0], [0.997138, 0.755190, 0.522806, 1.0], [0.997019, 0.762398, 0.528821, 1.0], [0.996898, 0.769591, 0.534892, 1.0], [0.996727, 0.776795, 0.541039, 1.0], [0.996571, 0.783977, 0.547233, 1.0], [0.996369, 0.791167, 0.553499, 1.0], [0.996162, 0.798348, 0.559820, 1.0], [0.995932, 0.805527, 0.566202, 1.0], [0.995680, 0.812706, 0.572645, 1.0], [0.995424, 0.819875, 0.579140, 1.0], [0.995131, 0.827052, 0.585701, 1.0], [0.994851, 0.834213, 0.592307, 1.0], [0.994524, 0.841387, 0.598983, 1.0], [0.994222, 0.848540, 0.605696, 1.0], [0.993866, 0.855711, 0.612482, 1.0], [0.993545, 0.862859, 0.619299, 1.0], [0.993170, 0.870024, 0.626189, 1.0], [0.992831, 0.877168, 0.633109, 1.0], [0.992440, 0.884330, 0.640099, 1.0], [0.992089, 0.891470, 0.647116, 1.0], [0.991688, 0.898627, 0.654202, 1.0], [0.991332, 0.905763, 0.661309, 1.0], [0.990930, 0.912915, 0.668481, 1.0], [0.990570, 0.920049, 0.675675, 1.0], [0.990175, 0.927196, 0.682926, 1.0], [0.989815, 0.934329, 0.690198, 1.0], [0.989434, 0.941470, 0.697519, 1.0], [0.989077, 0.948604, 0.704863, 1.0], [0.988717, 0.955742, 0.712242, 1.0], [0.988367, 0.962878, 0.719649, 1.0], [0.988033, 0.970012, 0.727077, 1.0], [0.987691, 0.977154, 0.734536, 1.0], [0.987387, 0.984288, 0.742002, 1.0], [0.987053, 0.991438, 0.749504, 1.0] ], dtype=np.float)*255 COLORS_INV = COLORS[::-1] class ColorScaleInferno(ColorScale): """ Inferno color scale proposal for matplotlib default color scale """ COLORS = np.array([ [0.001462, 0.000466, 0.013866, 1.0], [0.002267, 0.001270, 0.018570, 1.0], [0.003299, 0.002249, 0.024239, 1.0], [0.004547, 0.003392, 0.030909, 1.0], [0.006006, 0.004692, 0.038558, 1.0], [0.007676, 0.006136, 0.046836, 1.0], [0.009561, 0.007713, 0.055143, 1.0], [0.011663, 0.009417, 0.063460, 1.0], [0.013995, 0.011225, 0.071862, 1.0], [0.016561, 0.013136, 0.080282, 1.0], [0.019373, 0.015133, 0.088767, 1.0], [0.022447, 0.017199, 0.097327, 1.0], [0.025793, 0.019331, 0.105930, 1.0], [0.029432, 0.021503, 0.114621, 1.0], [0.033385, 0.023702, 0.123397, 1.0], [0.037668, 0.025921, 0.132232, 1.0], [0.042253, 0.028139, 0.141141, 1.0], [0.046915, 0.030324, 0.150164, 1.0], [0.051644, 0.032474, 0.159254, 1.0], [0.056449, 0.034569, 0.168414, 1.0], [0.061340, 0.036590, 0.177642, 1.0], [0.066331, 0.038504, 0.186962, 1.0], [0.071429, 0.040294, 0.196354, 1.0], [0.076637, 0.041905, 0.205799, 1.0], [0.081962, 0.043328, 0.215289, 1.0], [0.087411, 0.044556, 0.224813, 1.0], [0.092990, 0.045583, 0.234358, 1.0], [0.098702, 0.046402, 0.243904, 1.0], [0.104551, 0.047008, 0.253430, 1.0], [0.110536, 0.047399, 0.262912, 1.0], [0.116656, 0.047574, 0.272321, 1.0], [0.122908, 0.047536, 0.281624, 1.0], [0.129285, 0.047293, 0.290788, 1.0], [0.135778, 0.046856, 0.299776, 1.0], [0.142378, 0.046242, 0.308553, 1.0], [0.149073, 0.045468, 0.317085, 1.0], [0.155850, 0.044559, 0.325338, 1.0], [0.162689, 0.043554, 0.333277, 1.0], [0.169575, 0.042489, 0.340874, 1.0], [0.176493, 0.041402, 0.348111, 1.0], [0.183429, 0.040329, 0.354971, 1.0], [0.190367, 0.039309, 0.361447, 1.0], [0.197297, 0.038400, 0.367535, 1.0], [0.204209, 0.037632, 0.373238, 1.0], [0.211095, 0.037030, 0.378563, 1.0], [0.217949, 0.036615, 0.383522, 1.0], [0.224763, 0.036405, 0.388129, 1.0], [0.231538, 0.036405, 0.392400, 1.0], [0.238273, 0.036621, 0.396353, 1.0], [0.244967, 0.037055, 0.400007, 1.0], [0.251620, 0.037705, 0.403378, 1.0], [0.258234, 0.038571, 0.406485, 1.0], [0.264810, 0.039647, 0.409345, 1.0], [0.271347, 0.040922, 0.411976, 1.0], [0.277850, 0.042353, 0.414392, 1.0], [0.284321, 0.043933, 0.416608, 1.0], [0.290763, 0.045644, 0.418637, 1.0], [0.297178, 0.047470, 0.420491, 1.0], [0.303568, 0.049396, 0.422182, 1.0], [0.309935, 0.051407, 0.423721, 1.0], [0.316282, 0.053490, 0.425116, 1.0], [0.322610, 0.055634, 0.426377, 1.0], [0.328921, 0.057827, 0.427511, 1.0], [0.335217, 0.060060, 0.428524, 1.0], [0.341500, 0.062325, 0.429425, 1.0], [0.347771, 0.064616, 0.430217, 1.0], [0.354032, 0.066925, 0.430906, 1.0], [0.360284, 0.069247, 0.431497, 1.0], [0.366529, 0.071579, 0.431994, 1.0], [0.372768, 0.073915, 0.432400, 1.0], [0.379001, 0.076253, 0.432719, 1.0], [0.385228, 0.078591, 0.432955, 1.0], [0.391453, 0.080927, 0.433109, 1.0], [0.397674, 0.083257, 0.433183, 1.0], [0.403894, 0.085580, 0.433179, 1.0], [0.410113, 0.087896, 0.433098, 1.0], [0.416331, 0.090203, 0.432943, 1.0], [0.422549, 0.092501, 0.432714, 1.0], [0.428768, 0.094790, 0.432412, 1.0], [0.434987, 0.097069, 0.432039, 1.0], [0.441207, 0.099338, 0.431594, 1.0], [0.447428, 0.101597, 0.431080, 1.0], [0.453651, 0.103848, 0.430498, 1.0], [0.459875, 0.106089, 0.429846, 1.0], [0.466100, 0.108322, 0.429125, 1.0], [0.472328, 0.110547, 0.428334, 1.0], [0.478558, 0.112764, 0.427475, 1.0], [0.484789, 0.114974, 0.426548, 1.0], [0.491022, 0.117179, 0.425552, 1.0], [0.497257, 0.119379, 0.424488, 1.0], [0.503493, 0.121575, 0.423356, 1.0], [0.509730, 0.123769, 0.422156, 1.0], [0.515967, 0.125960, 0.420887, 1.0], [0.522206, 0.128150, 0.419549, 1.0], [0.528444, 0.130341, 0.418142, 1.0], [0.534683, 0.132534, 0.416667, 1.0], [0.540920, 0.134729, 0.415123, 1.0], [0.547157, 0.136929, 0.413511, 1.0], [0.553392, 0.139134, 0.411829, 1.0], [0.559624, 0.141346, 0.410078, 1.0], [0.565854, 0.143567, 0.408258, 1.0], [0.572081, 0.145797, 0.406369, 1.0], [0.578304, 0.148039, 0.404411, 1.0], [0.584521, 0.150294, 0.402385, 1.0], [0.590734, 0.152563, 0.400290, 1.0], [0.596940, 0.154848, 0.398125, 1.0], [0.603139, 0.157151, 0.395891, 1.0], [0.609330, 0.159474, 0.393589, 1.0], [0.615513, 0.161817, 0.391219, 1.0], [0.621685, 0.164184, 0.388781, 1.0], [0.627847, 0.166575, 0.386276, 1.0], [0.633998, 0.168992, 0.383704, 1.0], [0.640135, 0.171438, 0.381065, 1.0], [0.646260, 0.173914, 0.378359, 1.0], [0.652369, 0.176421, 0.375586, 1.0], [0.658463, 0.178962, 0.372748, 1.0], [0.664540, 0.181539, 0.369846, 1.0], [0.670599, 0.184153, 0.366879, 1.0], [0.676638, 0.186807, 0.363849, 1.0], [0.682656, 0.189501, 0.360757, 1.0], [0.688653, 0.192239, 0.357603, 1.0], [0.694627, 0.195021, 0.354388, 1.0], [0.700576, 0.197851, 0.351113, 1.0], [0.706500, 0.200728, 0.347777, 1.0], [0.712396, 0.203656, 0.344383, 1.0], [0.718264, 0.206636, 0.340931, 1.0], [0.724103, 0.209670, 0.337424, 1.0], [0.729909, 0.212759, 0.333861, 1.0], [0.735683, 0.215906, 0.330245, 1.0], [0.741423, 0.219112, 0.326576, 1.0], [0.747127, 0.222378, 0.322856, 1.0], [0.752794, 0.225706, 0.319085, 1.0], [0.758422, 0.229097, 0.315266, 1.0], [0.764010, 0.232554, 0.311399, 1.0], [0.769556, 0.236077, 0.307485, 1.0], [0.775059, 0.239667, 0.303526, 1.0], [0.780517, 0.243327, 0.299523, 1.0], [0.785929, 0.247056, 0.295477, 1.0], [0.791293, 0.250856, 0.291390, 1.0], [0.796607, 0.254728, 0.287264, 1.0], [0.801871, 0.258674, 0.283099, 1.0], [0.807082, 0.262692, 0.278898, 1.0], [0.812239, 0.266786, 0.274661, 1.0], [0.817341, 0.270954, 0.270390, 1.0], [0.822386, 0.275197, 0.266085, 1.0], [0.827372, 0.279517, 0.261750, 1.0], [0.832299, 0.283913, 0.257383, 1.0], [0.837165, 0.288385, 0.252988, 1.0], [0.841969, 0.292933, 0.248564, 1.0], [0.846709, 0.297559, 0.244113, 1.0], [0.851384, 0.302260, 0.239636, 1.0], [0.855992, 0.307038, 0.235133, 1.0], [0.860533, 0.311892, 0.230606, 1.0], [0.865006, 0.316822, 0.226055, 1.0], [0.869409, 0.321827, 0.221482, 1.0], [0.873741, 0.326906, 0.216886, 1.0], [0.878001, 0.332060, 0.212268, 1.0], [0.882188, 0.337287, 0.207628, 1.0], [0.886302, 0.342586, 0.202968, 1.0], [0.890341, 0.347957, 0.198286, 1.0], [0.894305, 0.353399, 0.193584, 1.0], [0.898192, 0.358911, 0.188860, 1.0], [0.902003, 0.364492, 0.184116, 1.0], [0.905735, 0.370140, 0.179350, 1.0], [0.909390, 0.375856, 0.174563, 1.0], [0.912966, 0.381636, 0.169755, 1.0], [0.916462, 0.387481, 0.164924, 1.0], [0.919879, 0.393389, 0.160070, 1.0], [0.923215, 0.399359, 0.155193, 1.0], [0.926470, 0.405389, 0.150292, 1.0], [0.929644, 0.411479, 0.145367, 1.0], [0.932737, 0.417627, 0.140417, 1.0], [0.935747, 0.423831, 0.135440, 1.0], [0.938675, 0.430091, 0.130438, 1.0], [0.941521, 0.436405, 0.125409, 1.0], [0.944285, 0.442772, 0.120354, 1.0], [0.946965, 0.449191, 0.115272, 1.0], [0.949562, 0.455660, 0.110164, 1.0], [0.952075, 0.462178, 0.105031, 1.0], [0.954506, 0.468744, 0.099874, 1.0], [0.956852, 0.475356, 0.094695, 1.0], [0.959114, 0.482014, 0.089499, 1.0], [0.961293, 0.488716, 0.084289, 1.0], [0.963387, 0.495462, 0.079073, 1.0], [0.965397, 0.502249, 0.073859, 1.0], [0.967322, 0.509078, 0.068659, 1.0], [0.969163, 0.515946, 0.063488, 1.0], [0.970919, 0.522853, 0.058367, 1.0], [0.972590, 0.529798, 0.053324, 1.0], [0.974176, 0.536780, 0.048392, 1.0], [0.975677, 0.543798, 0.043618, 1.0], [0.977092, 0.550850, 0.039050, 1.0], [0.978422, 0.557937, 0.034931, 1.0], [0.979666, 0.565057, 0.031409, 1.0], [0.980824, 0.572209, 0.028508, 1.0], [0.981895, 0.579392, 0.026250, 1.0], [0.982881, 0.586606, 0.024661, 1.0], [0.983779, 0.593849, 0.023770, 1.0], [0.984591, 0.601122, 0.023606, 1.0], [0.985315, 0.608422, 0.024202, 1.0], [0.985952, 0.615750, 0.025592, 1.0], [0.986502, 0.623105, 0.027814, 1.0], [0.986964, 0.630485, 0.030908, 1.0], [0.987337, 0.637890, 0.034916, 1.0], [0.987622, 0.645320, 0.039886, 1.0], [0.987819, 0.652773, 0.045581, 1.0], [0.987926, 0.660250, 0.051750, 1.0], [0.987945, 0.667748, 0.058329, 1.0], [0.987874, 0.675267, 0.065257, 1.0], [0.987714, 0.682807, 0.072489, 1.0], [0.987464, 0.690366, 0.079990, 1.0], [0.987124, 0.697944, 0.087731, 1.0], [0.986694, 0.705540, 0.095694, 1.0], [0.986175, 0.713153, 0.103863, 1.0], [0.985566, 0.720782, 0.112229, 1.0], [0.984865, 0.728427, 0.120785, 1.0], [0.984075, 0.736087, 0.129527, 1.0], [0.983196, 0.743758, 0.138453, 1.0], [0.982228, 0.751442, 0.147565, 1.0], [0.981173, 0.759135, 0.156863, 1.0], [0.980032, 0.766837, 0.166353, 1.0], [0.978806, 0.774545, 0.176037, 1.0], [0.977497, 0.782258, 0.185923, 1.0], [0.976108, 0.789974, 0.196018, 1.0], [0.974638, 0.797692, 0.206332, 1.0], [0.973088, 0.805409, 0.216877, 1.0], [0.971468, 0.813122, 0.227658, 1.0], [0.969783, 0.820825, 0.238686, 1.0], [0.968041, 0.828515, 0.249972, 1.0], [0.966243, 0.836191, 0.261534, 1.0], [0.964394, 0.843848, 0.273391, 1.0], [0.962517, 0.851476, 0.285546, 1.0], [0.960626, 0.859069, 0.298010, 1.0], [0.958720, 0.866624, 0.310820, 1.0], [0.956834, 0.874129, 0.323974, 1.0], [0.954997, 0.881569, 0.337475, 1.0], [0.953215, 0.888942, 0.351369, 1.0], [0.951546, 0.896226, 0.365627, 1.0], [0.950018, 0.903409, 0.380271, 1.0], [0.948683, 0.910473, 0.395289, 1.0], [0.947594, 0.917399, 0.410665, 1.0], [0.946809, 0.924168, 0.426373, 1.0], [0.946392, 0.930761, 0.442367, 1.0], [0.946403, 0.937159, 0.458592, 1.0], [0.946903, 0.943348, 0.474970, 1.0], [0.947937, 0.949318, 0.491426, 1.0], [0.949545, 0.955063, 0.507860, 1.0], [0.951740, 0.960587, 0.524203, 1.0], [0.954529, 0.965896, 0.540361, 1.0], [0.957896, 0.971003, 0.556275, 1.0], [0.961812, 0.975924, 0.571925, 1.0], [0.966249, 0.980678, 0.587206, 1.0], [0.971162, 0.985282, 0.602154, 1.0], [0.976511, 0.989753, 0.616760, 1.0], [0.982257, 0.994109, 0.631017, 1.0], [0.988362, 0.998364, 0.644924, 1.0] ], dtype=np.float)*255 COLORS_INV = COLORS[::-1] class ColorScalePlasma(ColorScale): """ Plasma color scale proposal for matplotlib default color scale """ COLORS = np.array([ [0.050383, 0.029803, 0.527975, 1.0], [0.063536, 0.028426, 0.533124, 1.0], [0.075353, 0.027206, 0.538007, 1.0], [0.086222, 0.026125, 0.542658, 1.0], [0.096379, 0.025165, 0.547103, 1.0], [0.105980, 0.024309, 0.551368, 1.0], [0.115124, 0.023556, 0.555468, 1.0], [0.123903, 0.022878, 0.559423, 1.0], [0.132381, 0.022258, 0.563250, 1.0], [0.140603, 0.021687, 0.566959, 1.0], [0.148607, 0.021154, 0.570562, 1.0], [0.156421, 0.020651, 0.574065, 1.0], [0.164070, 0.020171, 0.577478, 1.0], [0.171574, 0.019706, 0.580806, 1.0], [0.178950, 0.019252, 0.584054, 1.0], [0.186213, 0.018803, 0.587228, 1.0], [0.193374, 0.018354, 0.590330, 1.0], [0.200445, 0.017902, 0.593364, 1.0], [0.207435, 0.017442, 0.596333, 1.0], [0.214350, 0.016973, 0.599239, 1.0], [0.221197, 0.016497, 0.602083, 1.0], [0.227983, 0.016007, 0.604867, 1.0], [0.234715, 0.015502, 0.607592, 1.0], [0.241396, 0.014979, 0.610259, 1.0], [0.248032, 0.014439, 0.612868, 1.0], [0.254627, 0.013882, 0.615419, 1.0], [0.261183, 0.013308, 0.617911, 1.0], [0.267703, 0.012716, 0.620346, 1.0], [0.274191, 0.012109, 0.622722, 1.0], [0.280648, 0.011488, 0.625038, 1.0], [0.287076, 0.010855, 0.627295, 1.0], [0.293478, 0.010213, 0.629490, 1.0], [0.299855, 0.009561, 0.631624, 1.0], [0.306210, 0.008902, 0.633694, 1.0], [0.312543, 0.008239, 0.635700, 1.0], [0.318856, 0.007576, 0.637640, 1.0], [0.325150, 0.006915, 0.639512, 1.0], [0.331426, 0.006261, 0.641316, 1.0], [0.337683, 0.005618, 0.643049, 1.0], [0.343925, 0.004991, 0.644710, 1.0], [0.350150, 0.004382, 0.646298, 1.0], [0.356359, 0.003798, 0.647810, 1.0], [0.362553, 0.003243, 0.649245, 1.0], [0.368733, 0.002724, 0.650601, 1.0], [0.374897, 0.002245, 0.651876, 1.0], [0.381047, 0.001814, 0.653068, 1.0], [0.387183, 0.001434, 0.654177, 1.0], [0.393304, 0.001114, 0.655199, 1.0], [0.399411, 0.000859, 0.656133, 1.0], [0.405503, 0.000678, 0.656977, 1.0], [0.411580, 0.000577, 0.657730, 1.0], [0.417642, 0.000564, 0.658390, 1.0], [0.423689, 0.000646, 0.658956, 1.0], [0.429719, 0.000831, 0.659425, 1.0], [0.435734, 0.001127, 0.659797, 1.0], [0.441732, 0.001540, 0.660069, 1.0], [0.447714, 0.002080, 0.660240, 1.0], [0.453677, 0.002755, 0.660310, 1.0], [0.459623, 0.003574, 0.660277, 1.0], [0.465550, 0.004545, 0.660139, 1.0], [0.471457, 0.005678, 0.659897, 1.0], [0.477344, 0.006980, 0.659549, 1.0], [0.483210, 0.008460, 0.659095, 1.0], [0.489055, 0.010127, 0.658534, 1.0], [0.494877, 0.011990, 0.657865, 1.0], [0.500678, 0.014055, 0.657088, 1.0], [0.506454, 0.016333, 0.656202, 1.0], [0.512206, 0.018833, 0.655209, 1.0], [0.517933, 0.021563, 0.654109, 1.0], [0.523633, 0.024532, 0.652901, 1.0], [0.529306, 0.027747, 0.651586, 1.0], [0.534952, 0.031217, 0.650165, 1.0], [0.540570, 0.034950, 0.648640, 1.0], [0.546157, 0.038954, 0.647010, 1.0], [0.551715, 0.043136, 0.645277, 1.0], [0.557243, 0.047331, 0.643443, 1.0], [0.562738, 0.051545, 0.641509, 1.0], [0.568201, 0.055778, 0.639477, 1.0], [0.573632, 0.060028, 0.637349, 1.0], [0.579029, 0.064296, 0.635126, 1.0], [0.584391, 0.068579, 0.632812, 1.0], [0.589719, 0.072878, 0.630408, 1.0], [0.595011, 0.077190, 0.627917, 1.0], [0.600266, 0.081516, 0.625342, 1.0], [0.605485, 0.085854, 0.622686, 1.0], [0.610667, 0.090204, 0.619951, 1.0], [0.615812, 0.094564, 0.617140, 1.0], [0.620919, 0.098934, 0.614257, 1.0], [0.625987, 0.103312, 0.611305, 1.0], [0.631017, 0.107699, 0.608287, 1.0], [0.636008, 0.112092, 0.605205, 1.0], [0.640959, 0.116492, 0.602065, 1.0], [0.645872, 0.120898, 0.598867, 1.0], [0.650746, 0.125309, 0.595617, 1.0], [0.655580, 0.129725, 0.592317, 1.0], [0.660374, 0.134144, 0.588971, 1.0], [0.665129, 0.138566, 0.585582, 1.0], [0.669845, 0.142992, 0.582154, 1.0], [0.674522, 0.147419, 0.578688, 1.0], [0.679160, 0.151848, 0.575189, 1.0], [0.683758, 0.156278, 0.571660, 1.0], [0.688318, 0.160709, 0.568103, 1.0], [0.692840, 0.165141, 0.564522, 1.0], [0.697324, 0.169573, 0.560919, 1.0], [0.701769, 0.174005, 0.557296, 1.0], [0.706178, 0.178437, 0.553657, 1.0], [0.710549, 0.182868, 0.550004, 1.0], [0.714883, 0.187299, 0.546338, 1.0], [0.719181, 0.191729, 0.542663, 1.0], [0.723444, 0.196158, 0.538981, 1.0], [0.727670, 0.200586, 0.535293, 1.0], [0.731862, 0.205013, 0.531601, 1.0], [0.736019, 0.209439, 0.527908, 1.0], [0.740143, 0.213864, 0.524216, 1.0], [0.744232, 0.218288, 0.520524, 1.0], [0.748289, 0.222711, 0.516834, 1.0], [0.752312, 0.227133, 0.513149, 1.0], [0.756304, 0.231555, 0.509468, 1.0], [0.760264, 0.235976, 0.505794, 1.0], [0.764193, 0.240396, 0.502126, 1.0], [0.768090, 0.244817, 0.498465, 1.0], [0.771958, 0.249237, 0.494813, 1.0], [0.775796, 0.253658, 0.491171, 1.0], [0.779604, 0.258078, 0.487539, 1.0], [0.783383, 0.262500, 0.483918, 1.0], [0.787133, 0.266922, 0.480307, 1.0], [0.790855, 0.271345, 0.476706, 1.0], [0.794549, 0.275770, 0.473117, 1.0], [0.798216, 0.280197, 0.469538, 1.0], [0.801855, 0.284626, 0.465971, 1.0], [0.805467, 0.289057, 0.462415, 1.0], [0.809052, 0.293491, 0.458870, 1.0], [0.812612, 0.297928, 0.455338, 1.0], [0.816144, 0.302368, 0.451816, 1.0], [0.819651, 0.306812, 0.448306, 1.0], [0.823132, 0.311261, 0.444806, 1.0], [0.826588, 0.315714, 0.441316, 1.0], [0.830018, 0.320172, 0.437836, 1.0], [0.833422, 0.324635, 0.434366, 1.0], [0.836801, 0.329105, 0.430905, 1.0], [0.840155, 0.333580, 0.427455, 1.0], [0.843484, 0.338062, 0.424013, 1.0], [0.846788, 0.342551, 0.420579, 1.0], [0.850066, 0.347048, 0.417153, 1.0], [0.853319, 0.351553, 0.413734, 1.0], [0.856547, 0.356066, 0.410322, 1.0], [0.859750, 0.360588, 0.406917, 1.0], [0.862927, 0.365119, 0.403519, 1.0], [0.866078, 0.369660, 0.400126, 1.0], [0.869203, 0.374212, 0.396738, 1.0], [0.872303, 0.378774, 0.393355, 1.0], [0.875376, 0.383347, 0.389976, 1.0], [0.878423, 0.387932, 0.386600, 1.0], [0.881443, 0.392529, 0.383229, 1.0], [0.884436, 0.397139, 0.379860, 1.0], [0.887402, 0.401762, 0.376494, 1.0], [0.890340, 0.406398, 0.373130, 1.0], [0.893250, 0.411048, 0.369768, 1.0], [0.896131, 0.415712, 0.366407, 1.0], [0.898984, 0.420392, 0.363047, 1.0], [0.901807, 0.425087, 0.359688, 1.0], [0.904601, 0.429797, 0.356329, 1.0], [0.907365, 0.434524, 0.352970, 1.0], [0.910098, 0.439268, 0.349610, 1.0], [0.912800, 0.444029, 0.346251, 1.0], [0.915471, 0.448807, 0.342890, 1.0], [0.918109, 0.453603, 0.339529, 1.0], [0.920714, 0.458417, 0.336166, 1.0], [0.923287, 0.463251, 0.332801, 1.0], [0.925825, 0.468103, 0.329435, 1.0], [0.928329, 0.472975, 0.326067, 1.0], [0.930798, 0.477867, 0.322697, 1.0], [0.933232, 0.482780, 0.319325, 1.0], [0.935630, 0.487712, 0.315952, 1.0], [0.937990, 0.492667, 0.312575, 1.0], [0.940313, 0.497642, 0.309197, 1.0], [0.942598, 0.502639, 0.305816, 1.0], [0.944844, 0.507658, 0.302433, 1.0], [0.947051, 0.512699, 0.299049, 1.0], [0.949217, 0.517763, 0.295662, 1.0], [0.951344, 0.522850, 0.292275, 1.0], [0.953428, 0.527960, 0.288883, 1.0], [0.955470, 0.533093, 0.285490, 1.0], [0.957469, 0.538250, 0.282096, 1.0], [0.959424, 0.543431, 0.278701, 1.0], [0.961336, 0.548636, 0.275305, 1.0], [0.963203, 0.553865, 0.271909, 1.0], [0.965024, 0.559118, 0.268513, 1.0], [0.966798, 0.564396, 0.265118, 1.0], [0.968526, 0.569700, 0.261721, 1.0], [0.970205, 0.575028, 0.258325, 1.0], [0.971835, 0.580382, 0.254931, 1.0], [0.973416, 0.585761, 0.251540, 1.0], [0.974947, 0.591165, 0.248151, 1.0], [0.976428, 0.596595, 0.244767, 1.0], [0.977856, 0.602051, 0.241387, 1.0], [0.979233, 0.607532, 0.238013, 1.0], [0.980556, 0.613039, 0.234646, 1.0], [0.981826, 0.618572, 0.231287, 1.0], [0.983041, 0.624131, 0.227937, 1.0], [0.984199, 0.629718, 0.224595, 1.0], [0.985301, 0.635330, 0.221265, 1.0], [0.986345, 0.640969, 0.217948, 1.0], [0.987332, 0.646633, 0.214648, 1.0], [0.988260, 0.652325, 0.211364, 1.0], [0.989128, 0.658043, 0.208100, 1.0], [0.989935, 0.663787, 0.204859, 1.0], [0.990681, 0.669558, 0.201642, 1.0], [0.991365, 0.675355, 0.198453, 1.0], [0.991985, 0.681179, 0.195295, 1.0], [0.992541, 0.687030, 0.192170, 1.0], [0.993032, 0.692907, 0.189084, 1.0], [0.993456, 0.698810, 0.186041, 1.0], [0.993814, 0.704741, 0.183043, 1.0], [0.994103, 0.710698, 0.180097, 1.0], [0.994324, 0.716681, 0.177208, 1.0], [0.994474, 0.722691, 0.174381, 1.0], [0.994553, 0.728728, 0.171622, 1.0], [0.994561, 0.734791, 0.168938, 1.0], [0.994495, 0.740880, 0.166335, 1.0], [0.994355, 0.746995, 0.163821, 1.0], [0.994141, 0.753137, 0.161404, 1.0], [0.993851, 0.759304, 0.159092, 1.0], [0.993482, 0.765499, 0.156891, 1.0], [0.993033, 0.771720, 0.154808, 1.0], [0.992505, 0.777967, 0.152855, 1.0], [0.991897, 0.784239, 0.151042, 1.0], [0.991209, 0.790537, 0.149377, 1.0], [0.990439, 0.796859, 0.147870, 1.0], [0.989587, 0.803205, 0.146529, 1.0], [0.988648, 0.809579, 0.145357, 1.0], [0.987621, 0.815978, 0.144363, 1.0], [0.986509, 0.822401, 0.143557, 1.0], [0.985314, 0.828846, 0.142945, 1.0], [0.984031, 0.835315, 0.142528, 1.0], [0.982653, 0.841812, 0.142303, 1.0], [0.981190, 0.848329, 0.142279, 1.0], [0.979644, 0.854866, 0.142453, 1.0], [0.977995, 0.861432, 0.142808, 1.0], [0.976265, 0.868016, 0.143351, 1.0], [0.974443, 0.874622, 0.144061, 1.0], [0.972530, 0.881250, 0.144923, 1.0], [0.970533, 0.887896, 0.145919, 1.0], [0.968443, 0.894564, 0.147014, 1.0], [0.966271, 0.901249, 0.148180, 1.0], [0.964021, 0.907950, 0.149370, 1.0], [0.961681, 0.914672, 0.150520, 1.0], [0.959276, 0.921407, 0.151566, 1.0], [0.956808, 0.928152, 0.152409, 1.0], [0.954287, 0.934908, 0.152921, 1.0], [0.951726, 0.941671, 0.152925, 1.0], [0.949151, 0.948435, 0.152178, 1.0], [0.946602, 0.955190, 0.150328, 1.0], [0.944152, 0.961916, 0.146861, 1.0], [0.941896, 0.968590, 0.140956, 1.0], [0.940015, 0.975158, 0.131326, 1.0] ], dtype=np.float)*255 COLORS_INV = COLORS[::-1] class ColorScaleViridis(ColorScale): """ Viridis color scale proposal for matplotlib and winner (because it is green!) """ COLORS = np.array([ [0.267004, 0.004874, 0.329415, 1.0], [0.268510, 0.009605, 0.335427, 1.0], [0.269944, 0.014625, 0.341379, 1.0], [0.271305, 0.019942, 0.347269, 1.0], [0.272594, 0.025563, 0.353093, 1.0], [0.273809, 0.031497, 0.358853, 1.0], [0.274952, 0.037752, 0.364543, 1.0], [0.276022, 0.044167, 0.370164, 1.0], [0.277018, 0.050344, 0.375715, 1.0], [0.277941, 0.056324, 0.381191, 1.0], [0.278791, 0.062145, 0.386592, 1.0], [0.279566, 0.067836, 0.391917, 1.0], [0.280267, 0.073417, 0.397163, 1.0], [0.280894, 0.078907, 0.402329, 1.0], [0.281446, 0.084320, 0.407414, 1.0], [0.281924, 0.089666, 0.412415, 1.0], [0.282327, 0.094955, 0.417331, 1.0], [0.282656, 0.100196, 0.422160, 1.0], [0.282910, 0.105393, 0.426902, 1.0], [0.283091, 0.110553, 0.431554, 1.0], [0.283197, 0.115680, 0.436115, 1.0], [0.283229, 0.120777, 0.440584, 1.0], [0.283187, 0.125848, 0.444960, 1.0], [0.283072, 0.130895, 0.449241, 1.0], [0.282884, 0.135920, 0.453427, 1.0], [0.282623, 0.140926, 0.457517, 1.0], [0.282290, 0.145912, 0.461510, 1.0], [0.281887, 0.150881, 0.465405, 1.0], [0.281412, 0.155834, 0.469201, 1.0], [0.280868, 0.160771, 0.472899, 1.0], [0.280255, 0.165693, 0.476498, 1.0], [0.279574, 0.170599, 0.479997, 1.0], [0.278826, 0.175490, 0.483397, 1.0], [0.278012, 0.180367, 0.486697, 1.0], [0.277134, 0.185228, 0.489898, 1.0], [0.276194, 0.190074, 0.493001, 1.0], [0.275191, 0.194905, 0.496005, 1.0], [0.274128, 0.199721, 0.498911, 1.0], [0.273006, 0.204520, 0.501721, 1.0], [0.271828, 0.209303, 0.504434, 1.0], [0.270595, 0.214069, 0.507052, 1.0], [0.269308, 0.218818, 0.509577, 1.0], [0.267968, 0.223549, 0.512008, 1.0], [0.266580, 0.228262, 0.514349, 1.0], [0.265145, 0.232956, 0.516599, 1.0], [0.263663, 0.237631, 0.518762, 1.0], [0.262138, 0.242286, 0.520837, 1.0], [0.260571, 0.246922, 0.522828, 1.0], [0.258965, 0.251537, 0.524736, 1.0], [0.257322, 0.256130, 0.526563, 1.0], [0.255645, 0.260703, 0.528312, 1.0], [0.253935, 0.265254, 0.529983, 1.0], [0.252194, 0.269783, 0.531579, 1.0], [0.250425, 0.274290, 0.533103, 1.0], [0.248629, 0.278775, 0.534556, 1.0], [0.246811, 0.283237, 0.535941, 1.0], [0.244972, 0.287675, 0.537260, 1.0], [0.243113, 0.292092, 0.538516, 1.0], [0.241237, 0.296485, 0.539709, 1.0], [0.239346, 0.300855, 0.540844, 1.0], [0.237441, 0.305202, 0.541921, 1.0], [0.235526, 0.309527, 0.542944, 1.0], [0.233603, 0.313828, 0.543914, 1.0], [0.231674, 0.318106, 0.544834, 1.0], [0.229739, 0.322361, 0.545706, 1.0], [0.227802, 0.326594, 0.546532, 1.0], [0.225863, 0.330805, 0.547314, 1.0], [0.223925, 0.334994, 0.548053, 1.0], [0.221989, 0.339161, 0.548752, 1.0], [0.220057, 0.343307, 0.549413, 1.0], [0.218130, 0.347432, 0.550038, 1.0], [0.216210, 0.351535, 0.550627, 1.0], [0.214298, 0.355619, 0.551184, 1.0], [0.212395, 0.359683, 0.551710, 1.0], [0.210503, 0.363727, 0.552206, 1.0], [0.208623, 0.367752, 0.552675, 1.0], [0.206756, 0.371758, 0.553117, 1.0], [0.204903, 0.375746, 0.553533, 1.0], [0.203063, 0.379716, 0.553925, 1.0], [0.201239, 0.383670, 0.554294, 1.0], [0.199430, 0.387607, 0.554642, 1.0], [0.197636, 0.391528, 0.554969, 1.0], [0.195860, 0.395433, 0.555276, 1.0], [0.194100, 0.399323, 0.555565, 1.0], [0.192357, 0.403199, 0.555836, 1.0], [0.190631, 0.407061, 0.556089, 1.0], [0.188923, 0.410910, 0.556326, 1.0], [0.187231, 0.414746, 0.556547, 1.0], [0.185556, 0.418570, 0.556753, 1.0], [0.183898, 0.422383, 0.556944, 1.0], [0.182256, 0.426184, 0.557120, 1.0], [0.180629, 0.429975, 0.557282, 1.0], [0.179019, 0.433756, 0.557430, 1.0], [0.177423, 0.437527, 0.557565, 1.0], [0.175841, 0.441290, 0.557685, 1.0], [0.174274, 0.445044, 0.557792, 1.0], [0.172719, 0.448791, 0.557885, 1.0], [0.171176, 0.452530, 0.557965, 1.0], [0.169646, 0.456262, 0.558030, 1.0], [0.168126, 0.459988, 0.558082, 1.0], [0.166617, 0.463708, 0.558119, 1.0], [0.165117, 0.467423, 0.558141, 1.0], [0.163625, 0.471133, 0.558148, 1.0], [0.162142, 0.474838, 0.558140, 1.0], [0.160665, 0.478540, 0.558115, 1.0], [0.159194, 0.482237, 0.558073, 1.0], [0.157729, 0.485932, 0.558013, 1.0], [0.156270, 0.489624, 0.557936, 1.0], [0.154815, 0.493313, 0.557840, 1.0], [0.153364, 0.497000, 0.557724, 1.0], [0.151918, 0.500685, 0.557587, 1.0], [0.150476, 0.504369, 0.557430, 1.0], [0.149039, 0.508051, 0.557250, 1.0], [0.147607, 0.511733, 0.557049, 1.0], [0.146180, 0.515413, 0.556823, 1.0], [0.144759, 0.519093, 0.556572, 1.0], [0.143343, 0.522773, 0.556295, 1.0], [0.141935, 0.526453, 0.555991, 1.0], [0.140536, 0.530132, 0.555659, 1.0], [0.139147, 0.533812, 0.555298, 1.0], [0.137770, 0.537492, 0.554906, 1.0], [0.136408, 0.541173, 0.554483, 1.0], [0.135066, 0.544853, 0.554029, 1.0], [0.133743, 0.548535, 0.553541, 1.0], [0.132444, 0.552216, 0.553018, 1.0], [0.131172, 0.555899, 0.552459, 1.0], [0.129933, 0.559582, 0.551864, 1.0], [0.128729, 0.563265, 0.551229, 1.0], [0.127568, 0.566949, 0.550556, 1.0], [0.126453, 0.570633, 0.549841, 1.0], [0.125394, 0.574318, 0.549086, 1.0], [0.124395, 0.578002, 0.548287, 1.0], [0.123463, 0.581687, 0.547445, 1.0], [0.122606, 0.585371, 0.546557, 1.0], [0.121831, 0.589055, 0.545623, 1.0], [0.121148, 0.592739, 0.544641, 1.0], [0.120565, 0.596422, 0.543611, 1.0], [0.120092, 0.600104, 0.542530, 1.0], [0.119738, 0.603785, 0.541400, 1.0], [0.119512, 0.607464, 0.540218, 1.0], [0.119423, 0.611141, 0.538982, 1.0], [0.119483, 0.614817, 0.537692, 1.0], [0.119699, 0.618490, 0.536347, 1.0], [0.120081, 0.622161, 0.534946, 1.0], [0.120638, 0.625828, 0.533488, 1.0], [0.121380, 0.629492, 0.531973, 1.0], [0.122312, 0.633153, 0.530398, 1.0], [0.123444, 0.636809, 0.528763, 1.0], [0.124780, 0.640461, 0.527068, 1.0], [0.126326, 0.644107, 0.525311, 1.0], [0.128087, 0.647749, 0.523491, 1.0], [0.130067, 0.651384, 0.521608, 1.0], [0.132268, 0.655014, 0.519661, 1.0], [0.134692, 0.658636, 0.517649, 1.0], [0.137339, 0.662252, 0.515571, 1.0], [0.140210, 0.665859, 0.513427, 1.0], [0.143303, 0.669459, 0.511215, 1.0], [0.146616, 0.673050, 0.508936, 1.0], [0.150148, 0.676631, 0.506589, 1.0], [0.153894, 0.680203, 0.504172, 1.0], [0.157851, 0.683765, 0.501686, 1.0], [0.162016, 0.687316, 0.499129, 1.0], [0.166383, 0.690856, 0.496502, 1.0], [0.170948, 0.694384, 0.493803, 1.0], [0.175707, 0.697900, 0.491033, 1.0], [0.180653, 0.701402, 0.488189, 1.0], [0.185783, 0.704891, 0.485273, 1.0], [0.191090, 0.708366, 0.482284, 1.0], [0.196571, 0.711827, 0.479221, 1.0], [0.202219, 0.715272, 0.476084, 1.0], [0.208030, 0.718701, 0.472873, 1.0], [0.214000, 0.722114, 0.469588, 1.0], [0.220124, 0.725509, 0.466226, 1.0], [0.226397, 0.728888, 0.462789, 1.0], [0.232815, 0.732247, 0.459277, 1.0], [0.239374, 0.735588, 0.455688, 1.0], [0.246070, 0.738910, 0.452024, 1.0], [0.252899, 0.742211, 0.448284, 1.0], [0.259857, 0.745492, 0.444467, 1.0], [0.266941, 0.748751, 0.440573, 1.0], [0.274149, 0.751988, 0.436601, 1.0], [0.281477, 0.755203, 0.432552, 1.0], [0.288921, 0.758394, 0.428426, 1.0], [0.296479, 0.761561, 0.424223, 1.0], [0.304148, 0.764704, 0.419943, 1.0], [0.311925, 0.767822, 0.415586, 1.0], [0.319809, 0.770914, 0.411152, 1.0], [0.327796, 0.773980, 0.406640, 1.0], [0.335885, 0.777018, 0.402049, 1.0], [0.344074, 0.780029, 0.397381, 1.0], [0.352360, 0.783011, 0.392636, 1.0], [0.360741, 0.785964, 0.387814, 1.0], [0.369214, 0.788888, 0.382914, 1.0], [0.377779, 0.791781, 0.377939, 1.0], [0.386433, 0.794644, 0.372886, 1.0], [0.395174, 0.797475, 0.367757, 1.0], [0.404001, 0.800275, 0.362552, 1.0], [0.412913, 0.803041, 0.357269, 1.0], [0.421908, 0.805774, 0.351910, 1.0], [0.430983, 0.808473, 0.346476, 1.0], [0.440137, 0.811138, 0.340967, 1.0], [0.449368, 0.813768, 0.335384, 1.0], [0.458674, 0.816363, 0.329727, 1.0], [0.468053, 0.818921, 0.323998, 1.0], [0.477504, 0.821444, 0.318195, 1.0], [0.487026, 0.823929, 0.312321, 1.0], [0.496615, 0.826376, 0.306377, 1.0], [0.506271, 0.828786, 0.300362, 1.0], [0.515992, 0.831158, 0.294279, 1.0], [0.525776, 0.833491, 0.288127, 1.0], [0.535621, 0.835785, 0.281908, 1.0], [0.545524, 0.838039, 0.275626, 1.0], [0.555484, 0.840254, 0.269281, 1.0], [0.565498, 0.842430, 0.262877, 1.0], [0.575563, 0.844566, 0.256415, 1.0], [0.585678, 0.846661, 0.249897, 1.0], [0.595839, 0.848717, 0.243329, 1.0], [0.606045, 0.850733, 0.236712, 1.0], [0.616293, 0.852709, 0.230052, 1.0], [0.626579, 0.854645, 0.223353, 1.0], [0.636902, 0.856542, 0.216620, 1.0], [0.647257, 0.858400, 0.209861, 1.0], [0.657642, 0.860219, 0.203082, 1.0], [0.668054, 0.861999, 0.196293, 1.0], [0.678489, 0.863742, 0.189503, 1.0], [0.688944, 0.865448, 0.182725, 1.0], [0.699415, 0.867117, 0.175971, 1.0], [0.709898, 0.868751, 0.169257, 1.0], [0.720391, 0.870350, 0.162603, 1.0], [0.730889, 0.871916, 0.156029, 1.0], [0.741388, 0.873449, 0.149561, 1.0], [0.751884, 0.874951, 0.143228, 1.0], [0.762373, 0.876424, 0.137064, 1.0], [0.772852, 0.877868, 0.131109, 1.0], [0.783315, 0.879285, 0.125405, 1.0], [0.793760, 0.880678, 0.120005, 1.0], [0.804182, 0.882046, 0.114965, 1.0], [0.814576, 0.883393, 0.110347, 1.0], [0.824940, 0.884720, 0.106217, 1.0], [0.835270, 0.886029, 0.102646, 1.0], [0.845561, 0.887322, 0.099702, 1.0], [0.855810, 0.888601, 0.097452, 1.0], [0.866013, 0.889868, 0.095953, 1.0], [0.876168, 0.891125, 0.095250, 1.0], [0.886271, 0.892374, 0.095374, 1.0], [0.896320, 0.893616, 0.096335, 1.0], [0.906311, 0.894855, 0.098125, 1.0], [0.916242, 0.896091, 0.100717, 1.0], [0.926106, 0.897330, 0.104071, 1.0], [0.935904, 0.898570, 0.108131, 1.0], [0.945636, 0.899815, 0.112838, 1.0], [0.955300, 0.901065, 0.118128, 1.0], [0.964894, 0.902323, 0.123941, 1.0], [0.974417, 0.903590, 0.130215, 1.0], [0.983868, 0.904867, 0.136897, 1.0], [0.993248, 0.906157, 0.143936, 1.0] ], dtype=np.float)*255 COLORS_INV = COLORS[::-1] class QudiPalette(): """ Qudi saturated color palette """ blue = pg.mkColor(34, 23, 244) c1 = blue orange = pg.mkColor(255, 164, 14) c2 = orange magenta = pg.mkColor(255, 52, 135) c3 = magenta green = pg.mkColor(0, 139, 0) c4 = green cyan = pg.mkColor(23, 190, 207) c5 = cyan purple = pg.mkColor(133, 0, 133) c6 = purple class QudiPalettePale(): """ Qudi desaturated color palette """ blue = pg.mkColor(102, 94, 252) c1 = blue orange = pg.mkColor(255, 175, 43) c2 = orange magenta = pg.mkColor(255, 81, 152) c3 = magenta green = pg.mkColor(0, 179, 0) c4 = green cyan = pg.mkColor(59, 217, 233) c5 = cyan purple = pg.mkColor(188, 0, 188) c6 = purple
gpl-3.0
hposborn/Namaste
namaste/run.py
1
66119
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' :py:mod:`Namaste.py` - Single transit fitting code ------------------------------------- ''' import numpy as np import pylab as plt import scipy.optimize as optimize from os import sys, path import datetime import logging import pandas as pd import click import emcee import celerite from .planetlib import * from .Crossfield_transit import * from scipy import stats import scipy.optimize as opt import pickle #How best to do this... # # make lightcurve a class # make candidate a class # make star a class # make planet a class # Give candidate a star and planet and lightcurve # Use the planet to generate the lightcurve model # Use the star to generate priors # Give candidate data in the form of a lightcurve - can switch out for CoRoT, Kepler, etc. class Settings(): ''' The class that contains the model settings ''' def __init__(self, **kwargs): self.GP = True # GP on/off self.nopt = 25 # Number of optimisation steps for each GP and mono before mcmc self.nsteps = 12500 # Number of MCMC steps self.npdf = 6000 # Number of samples in the distributions from which to doing calcs. self.nwalkers = 24 # Number of emcee walkers self.nthreads = 8 # Number of emcee threads self.timecut = 6 # Number of Tdurs either side of the transit to be fitted. self.anomcut = 3.5 # Number of sigma above/below to count as an outlier. #self.binning = -1 # Days to bin to. -1 dictates no bin #self.error = False # No error detected #self.error_comm = '' # Error description #self.use_previous_samples = False# Using samaples from past run self.fitsloc = './FitsFiles/' # Storage location to load stuff self.outfilesloc = './Outputs/' # Storage location to save stuff self.cadence = 0.0204318 # Cadence. Defaults to K2 self.kernel = 'quasi' # Kernel for use in GPs self.verbose = True # Print statements or not... self.mission = 'K2' # Mission def update(self, **kwargs): ''' Adding new settings... ''' self.GP = kwargs.pop('GP', self.GP) self.nopt = kwargs.pop('nopt', self.nopt) self.nsteps = kwargs.pop('nsteps', self.nsteps) self.npdf = kwargs.pop('npdf', self.npdf) self.nwalkers = kwargs.pop('nwalkers', self.nwalkers) self.nthreads = kwargs.pop('nthreads', self.nthreads) self.timecut = kwargs.pop('timecut', self.timecut) self.anomcut = kwargs.pop('anomcut', self.anomcut) #self.binning = kwargs.pop('binning', self.binning) #self.error = kwargs.pop('error', self.binning) #self.error_comm = kwargs.pop('error_comm', self.binning) #self.use_previous_samples = kwargs.pop('use_previous_samples', self.binning) self.fitsloc = kwargs.pop('fitsloc', self.fitsloc) self.outfilesloc = kwargs.pop('outfilesloc', self.outfilesloc) self.cadence = kwargs.pop('cadence', self.cadence) self.kernel = kwargs.pop('kernel', self.kernel) self.verbose = kwargs.pop('verbose', self.verbose) self.mission = kwargs.pop('mission', self.mission) def printall(self): print(vars(self)) class Star(): def __init__(self, name, settings): self.objname = name self.settings = settings #Initialising lists of monotransits and multi-pl self.meanmodels=[] #list of mean models to fit. self.fitdict={} #dictionary of model parameter PDFs to fit def exofop_dat(self): #Getting information from exofop... sdic=ExoFop(int(self.objname)) if 'radius' in sdic.columns: self.addRad(sdic['radius'],sdic['radius_err'],np.max([sdic['radius']*0.8,sdic['radius_err']])) else: raise ValueError("No radius") if 'teff' in sdic.columns: self.addTeff(sdic['teff'],sdic['teff_err'],sdic['teff_err']) else: raise ValueError("No teff") if 'mass' in sdic.columns: self.addMass(sdic['mass'],sdic['mass_err'],sdic['mass_err']) else: raise ValueError("No mass") if 'logg' in sdic.columns: self.addlogg(sdic['logg'],sdic['logg_err'],sdic['logg_err']) if 'feh' in sdic.columns: self.addfeh(sdic['feh'],sdic['feh_err'],sdic['feh_err']) if 'density' in sdic.columns: self.addDens(sdic['density'],sdic['density_err'],sdic['density_err']) else: self.addDens() def csvfile_dat(self,file): #Collecting from CSV file, eg Best_Stellar_Params_nogriz df=pd.DataFrame.from_csv(file) row=df.loc[df.epic==int(self.objname)] csvname=row.index.values[0] row=row.T.to_dict()[csvname] self.addRad(row['rad'],row['radep'],row['radem']) self.addTeff(row['teff'],row['teffep'],row['teffem']) self.addMass(row['mass'],row['massep'],row['massem']) self.addlogg(row['logg'],row['loggep'],row['loggem']) self.addfeh(row['feh'],row['fehep'],row['fehem']) if not pd.isnull(row['rho']): self.addDens(row['rho'],row['rhoep'],row['rhoem']) else: self.addDens() #avs avsem avsep dis disem disep epic feh fehem fehep input 2MASS input BV #input SDSS input_spec logg loggem loggep lum lumem lumep mass massem massep #n_mod prob rad radem radep rank realspec rho rho_err rhoem rhoep teff teffem teffep def addTeff(self,val,uerr,derr=None): self.steff = val self.steffuerr = uerr self.steffderr = uerr if type(derr)==type(None) else derr def addRad(self,val,uerr,derr=None): self.srad = val self.sraduerr = uerr self.sradderr = uerr if type(derr)==type(None) else derr def addMass(self,val,uerr,derr=None): self.smass = val self.smassuerr = uerr self.smassderr = uerr if type(derr)==type(None) else derr def addlogg(self,val,uerr,derr=None): self.slogg = val self.slogguerr = uerr self.sloggderr = uerr if type(derr)==type(None) else derr def addfeh(self,val,uerr,derr=None): self.sfeh = val self.sfehuerr = uerr self.sfehderr = uerr if type(derr)==type(None) else derr def addDens(self,val=None,uerr=None,derr=None): if val==None: val,uerr,derr=CalcDens(self) else: #Density defined by others if val>200: #Normalising to Solar density: val/=1410.0;uerr/=1410.0;derr/=1410.0 self.sdens = val self.sdensuerr = uerr self.sdensderr = uerr if type(derr)==type(None) else derr def addLightcurve(self,file): self.Lcurve=Lightcurve(file,self.objname) self.mag=self.Lcurve.mag if self.settings.mission=="kepler" or self.settings.mission=='k2': self.wn=2.42e-4/np.sqrt(10**((14-self.mag)/2.514)) #2.42e-4 is the White Noise at 14th magnitude for Kepler. else: self.wn=np.percentile(abs(np.diff(self.Lcurve.lc[:,1])),40) #Using 40th percentile of the absolute differences. return self.Lcurve def EstLimbDark(self): LDs=getKeplerLDs(self.steffs,logg=self.sloggs,FeH=self.sfeh) pctns=np.percentile(LDs[0],[15.865525393145707, 50.0, 84.13447460685429]) self.LD1s=LDs[:,0] self.LD1=pctns[1] self.LD1uerr=pctns[2]-pctns[1] self.LD1derr=pctns[1]-pctns[0] pctns=np.percentile(LDs[1],[15.865525393145707, 50.0, 84.13447460685429]) self.LD2s=LDs[:,1] self.LD2=pctns[1] self.LD2uerr=pctns[2]-pctns[1] self.LD2derr=pctns[1]-pctns[0] self.initLD() def PDFs(self): self.steffs=noneg_GetAssymDist(self.steff,self.steffuerr,self.steffderr,nd=self.settings.npdf) self.srads =noneg_GetAssymDist(self.srad,self.sraduerr,self.sradderr,nd=self.settings.npdf) self.smasss=noneg_GetAssymDist(self.smass,self.smassuerr,self.smassderr,nd=self.settings.npdf) self.sloggs=GetAssymDist(self.slogg,self.slogguerr,self.sloggderr,nd=self.settings.npdf,returndist=True) self.sdenss=noneg_GetAssymDist(self.sdens,self.sdensuerr,self.sdensderr,nd=self.settings.npdf) self.EstLimbDark() def pdflist(self): if not hasattr(self,'steffs'): self.PDFs() if not hasattr(self,'LD1s'): self.EstLimbDark() return {'steffs':self.steffs,'srads':self.srads,'smasss':self.smasss,'sloggs':self.sloggs,'sdenss':self.sdenss,\ 'LD1s':self.LD1s,'LD2s':self.LD2s} def addGP(self,vector=None): if not hasattr(self, 'kern'): if self.settings.kernel=='Real': self.kern=celerite.terms.RealTerm(log_a=np.log(np.var(self.Lcurve.lc[:,1])), log_c=-np.log(3.0))+\ celerite.terms.JitterTerm(np.log(self.wn),bounds=dict(log_sigma=(np.log(self.wn)-0.1,np.log(self.wn)+0.1))) if vector is not None: self.kern.set_parameter_vector(vector) self.kern.freeze_parameter('terms[1]:log_sigma') # and freezing white noise elif self.settings.kernel=='quasi': self.kern= RotationTerm(np.log(np.var(self.Lcurve.lc[:,1])), np.log(0.25*self.Lcurve.range), np.log(2.5), 0.0, bounds=dict( log_amp=(-20.0, -2.0), log_timescale=(np.log(1.5), np.log(5*self.Lcurve.range)), log_period=(np.log(1.2), np.log(2*self.Lcurve.range)), log_factor=(-5.0, 5.0), ) )+\ celerite.terms.JitterTerm(np.log(self.wn), bounds=dict( log_sigma=(np.log(self.wn)-0.1,np.log(self.wn)+0.1) ) ) if vector is not None: self.kern.set_parameter_vector(vector) #self.initgp={'log_amp':,'log_timescale':,'log_period':,'log_factor':,'log_sigma':} self.kern.freeze_parameter('terms[0]:log_factor') # freezing log factor self.kern.freeze_parameter('terms[1]:log_sigma') # and freezing white noise self.initgp={itrm:self.kern.get_parameter(itrm) for itrm in self.kern.get_parameter_names()} else: self.initgp={itrm:self.kern.get_parameter(itrm) for itrm in self.kern.get_parameter_names()} def Optimize_GP(self): #Optimizing initial GP parameters, depending on GP supplied... ''' #This optimizes the gaussian process on out-of-transit data. This is then held with a gaussian prior during modelling ''' import scipy.optimize as op #Cutting transits from lc self.Lcurve.calc_mask(self.meanmodels) lc_trn=self.Lcurve.lc[self.Lcurve.lcmask] lc_trn[:,1]/=np.nanmedian(lc_trn[:,1])#This half may be different in median from the full lc, so adjusting for this... if not hasattr(self,'gp'): self.addGP() self.kern.thaw_all_parameters() gp_notr=celerite.GP(kernel=self.kern,mean=1.0,fit_mean=True) gp_notr.compute(lc_trn[:,0],lc_trn[:,2]) #Initial results: init_res=op.minimize(neg_log_like, list(gp_notr.get_parameter_vector()), args=(lc_trn[:,1],gp_notr), method="L-BFGS-B")#jac=grad_neg_log_like fails=0 if self.settings.kernel=='quasi': suc_res=np.zeros(7) # Run the optimization routine for a grid of size self.settings.nopt #log_amp, log_timescale, log_period, log_factor, log_sigma, mean = params iterparams= np.column_stack((np.random.normal(gp_notr.kernel.get_parameter_vector()[0],3.0,self.settings.nopt), np.random.uniform(1.2,np.log(0.75*self.Lcurve.range),self.settings.nopt), np.random.uniform(np.log(6*self.settings.cadence),np.log(0.75*self.Lcurve.range),self.settings.nopt), np.tile(0.0,self.settings.nopt))) elif self.settings.kernel=='Real': suc_res=np.zeros(4) #log_a, log_c = params iterparams=np.column_stack((np.random.normal(gp_notr.kernel.get_parameter_vector()[0],np.sqrt(abs(gp_notr.kernel.get_parameter_vector()[0])),self.settings.nopt), np.random.normal(gp_notr.kernel.get_parameter_vector()[1],np.sqrt(abs(gp_notr.kernel.get_parameter_vector()[1])),self.settings.nopt))) for n_p in np.arange(self.settings.nopt): vect=np.hstack((iterparams[n_p],np.log(self.wn),1.0)) #gp_notr.kernel.set_parameter_vector(vect) try: result = op.minimize(neg_log_like, vect, args=(lc_trn[:,1], gp_notr), method="L-BFGS-B")# jac=grad_nll,") if result.success: #print("success,",result.fun) suc_res=np.vstack((suc_res,np.hstack((result.x,result.fun)))) else: fails+=1 except: #print("fail,",vect) fails+=1 print(suc_res) if self.settings.verbose else 0 print(str(fails)+" failed attempts out of "+str(self.settings.nopt)) if self.settings.verbose else 0 if len(np.shape(suc_res))==1: raise ValueError("No successful GP minimizations") else: suc_res=suc_res[1:,:] bestres=suc_res[np.argmin(suc_res[:,-1])] gp_notr.set_parameter_vector(bestres[:-1]) self.optimised_gp=gp_notr wn_factor = bestres[4]-np.log(self.wn) self.optgp={itrm:gp_notr.get_parameter(itrm) for itrm in gp_notr.get_parameter_names()} # Update the kernel and print the final log-likelihood. for itrm in gp_notr.kernel.get_parameter_names()[:-1]: self.kern.set_parameter(itrm,gp_notr.kernel.get_parameter(itrm)) if self.settings.kernel=='quasi': self.kern.freeze_parameter('terms[0]:log_factor') # re-freezing log factor self.kern.freeze_parameter('terms[1]:log_sigma') # and re-freezing white noise #Adding self.fitdict.update({'kernel:'+nm:np.random.normal(gp_notr.get_parameter('kernel:'+nm),1.5,self.settings.nwalkers) for nm in self.kern.get_parameter_names()}) print("white noise changed by a factor of "+str(np.exp(wn_factor))[:4]) if self.settings.verbose else 0 print("GP improved from ",init_res.fun," to ",bestres[-1]) if self.settings.verbose else 0 '''return bestres[:-2] #mean_shift... is this indicative of the whole lightcurve or just this half of it?''' def AddMonotransit(self, tcen, tdur, depth, b=0.41,replace=True): if not hasattr(self,'steffs'): self.PDFs() #Adding monotransit classes to the star class... Up to four possible. if not hasattr(self,'mono1') or replace: #self.LD1s, self.LD2s, self.sdenss,self.Lcurve.lc, self.mono1 = Monotransit(tcen, tdur, depth, self.settings, name=self.objname+'.1') self.mono1.calcmaxvel(self.Lcurve.lc,self.sdenss) self.mono1.Optimize_mono(self.Lcurve.flatten(),self.LDprior.copy()) self.mono1.SaveInput(self.pdflist()) self.meanmodels+=[self.mono1] ''' while a<=5 and len(self.meanmodels)==initlenmonos: if not hasattr(self,'mono'+str(a)) or replace: setattr(self,'mono'+str(a)) = Monotransit(tcen, tdur, depth, self.settings, self.LD1s, self.LD2s, self.denss,self.lc, name=self.objname+'.'+str(a), b=b) exec("self.mono"+str(a)+".Optimize_mono(Lcurve.flaten())") exec("self.mono"+str(a)+".calcmaxvel(self.Lcurve.lc,self.sdenss)") exec("self.meanmodels+=[self.mono"+str(a)+"]") a+=1 ''' def AddNormalPlanet(self, tcen, tdur, depth, Period, b=0.41,replace=False): if not hasattr(self,'steffs'): self.PDFs() #Adding transiting planet classes to the star class using dfm's "transit"... Up to four possible. if not hasattr(self,'norm1') or replace: self.norm1 = Multtransit(tcen, tdur, depth, self.settings, name=self.objname+'.5', b=b) self.meanmodels+=[self.norm1] ''' while a<=5 and len(self.meanmodels)==initlenmonos: if not hasattr(self,'mono'+str(a)) or replace: setattr(self,'mono'+str(a)) = Monotransit(tcen, tdur, depth, self.settings, self.LD1s, self.LD2s, self.denss,self.lc, name=self.objname+'.'+str(a), b=b) exec("self.mono"+str(a)+".Optimize_mono(Lcurve.flaten())") exec("self.mono"+str(a)+".calcmaxvel(self.Lcurve.lc,self.sdenss)") exec("self.meanmodels+=[self.mono"+str(a)+"]") a+=1 ''' def initLD(self): if not hasattr(self,'steffs'): self.PDFs() #Getting LD parameters for transit modelling: self.LDprior={'LD1':[0,1.0,'gaussian',np.median(self.LD1s),np.std(self.LD1s)], 'LD2':[0,1.0,'gaussian',np.median(self.LD2s),np.std(self.LD2s)]} def BuildMeanModel(self): #for model in self.meanmodels: #<<<TBD self.meanmodel_comb=MonotransitModel(tcen=self.mono1.tcen, b=self.mono1.b, vel=self.mono1.vel, RpRs=self.mono1.RpRs, LD1=np.median(self.LD1s), LD2=np.median(self.LD2s)) def BuildMeanPriors(self): #Building mean model priors if not hasattr(self, 'LDprior'): self.initLD() self.meanmodel_priors=self.mono1.priors.copy() self.meanmodel_priors.update({'mean:'+ldp:self.LDprior[ldp] for ldp in self.LDprior}) def BuildAllPriors(self,keylist=None): #Building priors from both GP and mean model and ordering by if not hasattr(self, 'meanmodel_priors'): self.BuildMeanPriors() self.priors=self.meanmodel_priors.copy()#{key:self.meanmodel_priors[key] for key in self.meanmodel_priors.keys()} self.priors.update({'kernel:'+self.kern.get_parameter_names()[keyn]:[self.kern.get_parameter_bounds()[keyn][0], self.kern.get_parameter_bounds()[keyn][1]] for keyn in range(len(self.kern.get_parameter_names())) }) self.priors['kernel:terms[0]:log_amp']=self.priors['kernel:terms[0]:log_amp']+['evans',0.25*len(self.Lcurve.lc[:,0])] print(self.priors) if keylist is not None: #Sorting to match parameter vector: newprior={key:self.priors[key] for key in keylist} #print(str(len(keylist))+" keys in vector leading to "+str(len(newprior))+" new keys in priors, from "+str(len(self.priors))+" initially") if self.settings.verbose else 0 self.priors=newprior print(self.priors) def RunModel(self): self.BuildMeanPriors() self.BuildMeanModel() self.gp=celerite.GP(kernel=self.kern,mean=self.meanmodel_comb,fit_mean=True) self.BuildAllPriors(self.gp.get_parameter_names()) #Returning monotransit model from information. chx=np.random.choice(self.settings.npdf,self.settings.nwalkers,replace=False) self.fitdict.update({'mean:'+nm:getattr(self.mono1,nm+'s')[chx] for nm in ['tcen','b','vel','RpRs']}) self.fitdict.update({'mean:'+nm:getattr(self,nm+'s')[chx] for nm in ['LD1','LD2']}) #Removing medians: for row in self.fitdict: self.fitdict[row][np.isnan(self.fitdict[row])]=np.nanmedian(np.isnan(self.fitdict[row])) dists=[self.fitdict[cname] for cname in self.gp.get_parameter_names()] self.init_mcmc_params=np.column_stack(dists) print(np.shape(self.init_mcmc_params)) #[,:]) mask=abs(self.Lcurve.lc[:,0]-self.gp.get_parameter('mean:tcen'))<2.75 PlotModel(self.Lcurve.lc[mask,:], self.gp, np.median(self.init_mcmc_params,axis=0), fname=self.settings.outfilesloc+self.objname+'_initfit.png') #dists=[np.random.normal(self.gp.get_parameter(nm),abs(self.gp.get_parameter(nm))**0.25,len(chx)) for nm in ['kernel:terms[0]:log_amp', 'kernel:terms[0]:log_timescale', 'kernel:terms[0]:log_period']]+\ # [self.tcens[chx],self.bs[chx],self.vels[~np.isnan(self.vels)][chx],self.RpRss[chx],self.LD1s[chx],self.LD2s[chx]] #'kernel:terms[0]:log_factor', 'kernel:terms[1]:log_sigma' <- frozen and not used #print(len(pos[0,:])) #[np.array(list(initparams.values())) *(1+ 1.5e-4*np.random.normal()) for i in range(nwalkers)] print("EMCEE HAPPENING. INIT DISTS:") print(self.init_mcmc_params[0,:]) print(self.gp.get_parameter_names()) print(self.priors.keys()) #print(' , '.join( [str(list(self.priors.keys())[nk]) [-8:]+' - '+str(abs(self.priors[list(self.priors.keys())[nk]]-self.init_mcmc_params[nk]))[:5] for nk in range(len(self.priors.keys()))]) ) print(' \n '.join([str(list(self.priors.keys())[nk])+' - '+str(self.priors[list(self.priors.keys())[nk]][0])+" > "+str(np.median(self.init_mcmc_params[nk]))+\ " < "+str(self.priors[list(self.priors.keys())[nk]][1]) for nk in range(len(self.priors.keys()))]\ )) if self.settings.verbose else 0 self.sampler = emcee.EnsembleSampler(self.settings.nwalkers, len(self.gp.get_parameter_vector()), MonoLogProb, args=(self.Lcurve.lc,self.priors,self.gp), threads=self.settings.nthreads) self.sampler.run_mcmc(self.init_mcmc_params, 1, rstate0=np.random.get_state()) self.sampler.run_mcmc(self.init_mcmc_params, self.settings.nsteps, rstate0=np.random.get_state()) #Trimming samples: ncut=np.min([int(self.settings.nsteps*0.25),3000]) lnprobs=self.sampler.lnprobability[:,ncut:]#.reshape(-1) prcnt=np.percentile(lnprobs,[50,95],axis=1) #"Failed" walkers are where the 97th percentile is below the median of the rest good_wlkrs=(prcnt[1]>np.median(prcnt[0])) self.sampleheaders=self.gp.get_parameter_names()+['logprob'] self.samples = self.sampler.chain[good_wlkrs, ncut:, :].reshape((-1, len(self.gp.get_parameter_vector()))) self.samples = np.column_stack((self.samples,self.sampler.lnprobability[good_wlkrs,ncut:].reshape(-1))) #Making impact parameter always positive: self.samples[:,1]=abs(self.amples[:,1]) self.SaveMCMC() def SaveMCMC(self): np.save(self.settings.outfilesloc+self.objname+'_MCMCsamples',self.samples) def MonoFinalPars(self,model=None): if model is None and hasattr(self,'mono1'): model=self.mono1 #Taking random Nsamples from samples to put through calculations #Need to form assymetric gaussians of Star Dat parameters if not equal #Rstardist2=np.hstack((np.sort(Rstardist[:, 0])[0:int(nsamp/2)], np.sort(Rstardist[:, 1])[int(nsamp/2):] )) modelmeanvals=[col.find('mean:')!=-1 for col in self.sampleheaders] model.gen_PDFs({modelmeanvals[nmmv].split(":")[-1]+'s':self.samples[:,modelmeanvals][:,nmmv] for nmmv in modelmeanvals}) rn=np.random.choice(len(self.samples[:,0]),self.settings.npdf,replace=False) #for model in meanmodels: setattr(model,Rps,(self.samples[rn,self.sampleheaders=='mean:RpRs']*695500000*self.Rss)/6.371e6)#inearths setattr(model,'Prob_pl',len(model.Rps[model.Rps<(1.5*11.2)])/len(model.Rps)) aest,Pest=VelToOrbit(self.samples[rn,self.sampleheaders=='mean:vel'], self.sdenss, self.Mss) setattr(model,smas,aest) setattr(model,Ps,Pest) setattr(model,Mps,PlanetRtoM(model.Rps)) setattr(model,Krvs,((2.*np.pi*6.67e-11)/(model.Ps*86400))**(1./3.)*(model.Mps*5.96e24/((1.96e30*self.Mss)**(2./3.)))) #sigs=np.array([2.2750131948178987, , 97.7249868051821]) sigs=[15.865525393145707, 50.0, 84.13447460685429] for val in ['Rps','smas','Ps','Mps','Krvs']: percnts=np.percentile(np.array(getattr(model,val)), sigs) setattr(model,val[:-1],percnts[1]) setattr(model,val[:-1]+'uerr',(percnts[2]-percnts[1])) setattr(model,val[:-1]+'derr',(percnts[1]-percnts[0])) def PlotMCMC(usecols=None): import corner newnames={'kernel:terms[0]:log_amp':'$\log{a}$', 'kernel:terms[0]:log_timescale':'$\log{\tau}$', 'kernel:terms[0]:log_period':'$\log{P}$', 'mean:tcen':'$t_{\rm cen}$', 'mean:b':'$b$', 'mean:vel':'$v\'$', 'mean:RpRs':'$R_p/R_s$', 'mean:LD1':'LD$_1$', 'mean:LD2':'LD$_2$'} if usecols is None: #Plotting corner with all parameter names usecols=self.gp.get_parameter_names() plt.figure(1) Npars=len(samples[0]-1) tobeplotted=np.in1d(gp.get_parameter_names(),usecols) #Clipping extreme values (top.bottom 0.1 percentiles) toclip=np.array([(np.percentile(self.samples[:,t],99.9)>self.samples[:,t]) // (self.samples[:,t]>np.percentile(self.samples[:,t],0.1)) for t in range(Npars)[tobeplotted]]).all(axis=0) clipsamples=self.samples[toclip] #Earmarking the difference between GP and non labs = [newnames[key] for key in gp.get_parameter_names() if key in usecols] #This plots the corner: fig = corner.corner(clipsamples[:,tobeplotted], labels=labs, quantiles=[0.16, 0.5, 0.84], plot_datapoints=False,range=np.tile(0.985,Npars)) #Making sure the lightcurve plot doesnt overstep the corner ndim=np.sum(tobeplotted) rows=(ndim-1)/2 cols=(ndim-1)/2 #Printing Kepler name on plot plt.subplot(ndim,ndim,ndim+3).axis('off') plt.title(str(self.objname), fontsize=22) #This plots the model on the same plot as the corner ax = plt.subplot2grid((ndim,ndim), (0, ndim-cols), rowspan=rows-1-int(GP), colspan=cols) modelfits=PlotModel(self.Lcurve.lc, self.gp, np.nanmedian(self.samples,axis=0)) #(lc, samples, scale=1.0, GP=GP) #If we do a Gaussian Process fit, plotting both the transit-subtractedGP model and the residuals ax=plt.subplot2grid((ndim,ndim), (rows-2, ndim-cols), rowspan=1, colspan=cols) _=PlotModel(self.Lcurve.lc, self.gp, np.nanmedian(self.samples,axis=0), prevmodels=modelfits, subGP=True) #plotting residuals beneath: ax = plt.subplot2grid((ndim,ndim), (rows-1, ndim-cols), rowspan=1, colspan=cols) _=PlotModel(self.Lcurve.lc, self.gp, np.nanmedian(self.samples,axis=0), prevmodels=modelfits, residuals=True) #Adding text values to MCMC pdf #Plotting text wrt to residuals plot... xlims=ax.get_xlim() x0=(xlims[0]+0.5*(xlims[1]-xlims[0])) #Left of box in x xwid=0.5*(xlims[1]-xlims[0]) #Total width of ybox ylims=ax.get_ylim() y1=(ylims[0]-0.5*(ylims[1]-ylims[0])) #Top of y box yheight=-2.5*(ylims[1]-ylims[0]) #Total height of ybox from matplotlib import rc;rc('text', usetex=True) #matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"] matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath']#Needed for latex commands here: plt.text(x0+0.14*xwid,y1,"EPIC"+str(self.objname),fontsize=20) n_textpos=0 txt=["EPIC"+str(self.objname)] sigs=[2.2750131948178987, 15.865525393145707, 50.0, 84.13447460685429, 97.7249868051821] for lab in labs: xloc=x0+0.05*xwid yloc=y1+0.02*yheight+(0.05*yheight*(n+1)) if lab=='$t_{\rm cen}$':#Need larger float size for Tcen... txt+=[r"\textbf{"+lab+":} "+('%s' % float('%.8g' % (np.median(self.samples[:,n]))))+" +"+('%s' % float('%.2g' % (np.percentile(self.samples[:,n_textpos],sigs[3])-np.median(self.samples[:,n_textpos]))))+" -"+\ ('%s' % float('%.2g' % (np.median(self.samples[:,n_textpos])-np.percentile(self.samples[:,n_textpos],sigs[1]))))] plt.text(xloc,yloc,txt[-1]) else: txt+=[r"\textbf{"+lab+":} "+('%s' % float('%.3g' % (np.median(self.samples[:,n]))))+" +"+('%s' % float('%.2g' % (np.percentile(self.samples[:,n_textpos],sigs[3])-np.median(self.samples[:,n_textpos]))))+" -"+\ ('%s' % float('%.2g' % (np.median(self.samples[:,n_textpos])-np.percentile(self.samples[:,n_textpos],sigs[1]))))] plt.text(xloc,yloc,txt[-1]) n_textpos+=1 info={'Rps':'$R_p (R_{\oplus})$','Ps':'Per (d)','smas':'A (au)','Mps':'$M_p (M_{\oplus})$','Krvs':'K$_{\rm rv}$(ms$^{-1}$)','Prob_pl':'ProbPl ($\%$)',\ 'steffs':'Teff (K)','srads':'Rs ($R_{\odot}$)','smasss':'Ms ($M_{\odot}$)','sloggs':'logg','sdenss':'$\\rho_s (\\rho_{\odot})$'} pdfs=self.pdflist() for ival in pdfs: if ival[:2]!='LD': xloc=x0+0.05*xwid yloc=y1+0.02*yheight+(0.05*yheight*(n_textpos+2)) vals=np.percentile(pdfs[ival],sigs) txt+=[(r"\textbf{%s:} " % info[ival])+('%s' % float('%.3g' % vals[2]))+" +"+\ ('%s' % float('%.2g' % (vals[3]-vals[2])))+" -"+('%s' % float('%.2g' % (vals[2]-vals[1])))] plt.text(xloc,yloc,txt[-1]) n_textpos+=1 self.MonoFinalPars() for ival in ['Rps','smas','Ps','Mps','Krvs']: xloc=x0+0.05*xwid yloc=y1+0.02*yheight+(0.05*yheight*(n_textpos+2)) vals=np.percentile(getattr(self.mono1,ival),sigs) txt+=[(r"\textbf{%s:} " % info[ival])+('%s' % float('%.3g' % vals[2]))+" +"+\ ('%s' % float('%.2g' % (vals[3]-vals[2])))+" -"+('%s' % float('%.2g' % (vals[2]-vals[1])))] plt.text(xloc,yloc,txt[-1]) n_textpos+=1 xloc=x0+0.05*xwid yloc=y1+0.02*yheight+(0.05*yheight*(2+n_textpos+1)) txt+=[(r"\textbf{%s:} " % info['Prob_pl'])+('%s' % float('%.3g' % getattr(self.mono1,'Prob_pl')))] plt.text(xloc,yloc,txt[-1]) with open(self.settings.outfilesloc+'latextable.tex','ra') as latextable: latextable.write(' & '.join(txt)+'/n') #Saving as pdf. Will save up to 3 unique files. fname='';n=0 while fname=='': if os.path.exists(self.settings.outfilesloc+'Corner_'+str(self.objname)+'_'+str(int(n))+'.pdf'): n+=1 else: fname=self.settings.outfilesloc+'/Corner_'+str(EPIC)+'_'+str(int(n))+'.pdf' plt.savefig(fname,Transparent=True,dpi=300) plt.savefig(fname.replace('pdf','png'),Transparent=True,dpi=300) class Monotransit(): #Monotransit detection to analyse def __init__(self, tcen, tdur, depth, settings, name, b=0.4, RpRs=None, vel=None,\ tcenuerr=None,tduruerr=None,depthuerr=None,buerr=None,RpRsuerr=None, veluerr=None,\ tcenderr=None,tdurderr=None,depthderr=None,bderr=None,RpRsderr=None,velderr=None): self.settings=settings self.mononame = name self.starname = name.split('.')[0] self.pdfs = {} self.update_pars(tcen, tdur, depth, b=b, RpRs=RpRs, vel=vel, tcenuerr=tcenuerr,tduruerr=tduruerr,depthuerr=depthuerr,buerr=buerr, RpRsuerr=RpRsuerr,veluerr=veluerr, tcenderr=tcenderr,tdurderr=tdurderr,depthderr=depthderr,bderr=bderr,RpRsderr=RpRsderr, velderr=velderr) self.gen_PDFs() ''' def addStarDat(self,pdflist): self.LD1s=pdflist['LD1s'] self.LD2s=pdflist['LD2s'] self.srads=pdflist['srads'] self.sdenss=pdflist['sdenss'] self.smasss=pdflist['smasss'] self.steffs=pdflist['steffs'] self.pdfs.update({'LD1s':self.LD1s,'LD2s':self.LD2s,'srads':self.srads,'sdenss':self.sdenss,'smasss':self.smasss,'steffs':self.steffs}) ''' def update_pars(self, tcen=None, tdur=None, depth=None, b=0.4, RpRs=None, vel=None, tcenuerr=None,tduruerr=None,depthuerr=None,buerr=None,RpRsuerr=None, tcenderr=None,tdurderr=None,depthderr=None,bderr=None,RpRsderr=None,veluerr=None,velderr=None): if tcen is not None: self.tcen = float(tcen) # detected transit centre self.tcenuerr = 0.15*tdur if type(tcenuerr)==type(None) else tcenuerr # estimated transit centre errors (default = 0.1*dur) self.tcenderr = 0.15*tdur if type(tcenderr)==type(None) else tcenderr # estimated transit centre errors (default = 0.1*dur) if tdur is not None: self.tdur = float(tdur) # detected transit duration self.tduruerr = 0.33*tdur if type(tcenuerr)==type(None) else tcenuerr # estimated transit duration errors (default = 0.2*dur) self.tdurderr = 0.33*tdur if type(tcenuerr)==type(None) else tcenuerr # estimated transit duration errors (default = 0.2*dur) if depth is not None: self.depth = float(depth) # detected transit depth self.depthuerr = 0.33*depth if type(tcenuerr)==type(None) else tcenuerr # estimated transit depth errors (default = 0.1*depth) self.depthderr = 0.33*depth if type(tcenuerr)==type(None) else tcenuerr # estimated transit depth errors (default = 0.1*depth) self.b = 0.5 if type(b)==type(None) else b # estimated impact parameter (default = 0.5) self.buerr = 0.5 if type(buerr)==type(None) else buerr # estimated impact parameter errors (default = 0.5) self.bderr = 0.5 if type(bderr)==type(None) else bderr # estimated impact parameter errors (default = 0.5) self.RpRs = self.depth**0.5 if type(RpRs)==type(None) else RpRs # Ratio of planet to star radius self.RpRsuerr = 0.5*self.RpRs if type(RpRsuerr)==type(None) else RpRsuerr # Ratio of planet to star radius errors (default = 25%) self.RpRsderr = 0.5*self.RpRs if type(RpRsderr)==type(None) else RpRsderr # Ratio of planet to star radius errors (default = 25%) #self.vel = CalcVel() # Velocity of planet relative to stellar radius if vel is not None: self.vel = vel # Velocity scaled to stellar radius elif not hasattr(self,'vel'): self.vel = None # Velocity scaled to stellar radius def gen_PDFs(self,paramdict=None): #Turns params into PDFs if paramdict is None: self.tcens=GetAssymDist(self.tcen,self.tcenuerr,self.tcenderr,nd=self.settings.npdf,returndist=True) #self.depths=GetAssymDist(self.depth,self.depthuerr,self.depthderr,nd=self.settings.npdf,returndist=True) self.bs=abs(GetAssymDist(self.b,self.buerr,self.bderr,nd=self.settings.npdf,returndist=True)) self.RpRss=GetAssymDist(self.RpRs,self.RpRsuerr,self.RpRsderr,nd=self.settings.npdf,returndist=True) #Velocity tends to get "Nan"-y, so looping to avoid that: nanvels=np.tile(True,self.settings.npdf) v=np.zeros(self.settings.npdf) while np.sum(nanvels)>self.settings.npdf*0.002: v[nanvels]=CalcVel(np.random.normal(self.tdur,self.tdur*0.15,nanvels.sum()), self.bs[np.random.choice(self.settings.npdf,np.sum(nanvels))], self.RpRss[np.random.choice(self.settings.npdf,np.sum(nanvels))]) nanvels=(np.isnan(v))*(v<0.0)*(v>100.0) self.vels=v prcnts=np.diff(np.percentile(self.vels[~np.isnan(self.vels)],[15.865525393145707, 50.0, 84.13447460685429])) self.veluerr=prcnts[1] self.velderr=prcnts[0] if self.vel is not None and ~np.isnan(self.vel): #Velocity pre-defined. Distribution is not, however, so we'll use the scaled distribution of the "derived" velocity dist to give the vel errors velrat=self.vel/np.nanmedian(self.vels) self.vels*=velrat self.veluerr*=velrat self.velderr*=velrat else: self.vel=np.nanmedian(self.vels) else: #included dictionary of new "samples" sigs=[15.865525393145707, 50.0, 84.13447460685429] for colname in ['tcens','bs','vels','RpRss']: setattr(self,colname,paramdict[colname]) percnts=np.percentile(np.array(getattr(model,colname)), sigs) setattr(self,colname[:-1],percnts[1]) setattr(self,colname[:-1]+'uerr',percnts[2]-percnts[1]) setattr(self,colname[:-1]+'derr',percnts[1]-percnts[0]) self.pdfs.update({'tcens':self.tcens,'bs':self.bs,'RpRss':self.RpRss,'vels':self.vels}) #if StarPDFs is not None: # self.pdflist.update(StarPDFs) def calcmaxvel(self,lc,sdenss): #Estimate maximum velocity given lightcurve duration without transit. self.calcminp(lc) maxvels=np.array([((18226*rho)/self.minp)**(1/3.0) for rho in abs(sdenss)]) prcnts=np.percentile(maxvels,[15.865525393145707, 50.0, 84.13447460685429]) self.maxvelderr=prcnts[1]-prcnts[0] self.maxvel=prcnts[1] self.maxveluerr=prcnts[2]-prcnts[1] def calcminp(self,lc): #finding tdur-wide jumps in folded LC dur_jumps=np.where(np.diff(abs(lc[:,0]-self.tcen))>self.tdur)[0] if len(dur_jumps)==0: #No tdur-wide jumps until end of lc - using the maximum difference to a point in the lc self.minp=np.max(abs(lc[:,0]-self.tcen))+self.tdur*0.33 else: #Taking the first Tdur-wide jump in the folded lightcurve where a transit could be hiding. self.minp=abs(lc[:,0]-self.tcen)[dur_jumps[0]]+self.tdur*0.33 ''' def CalcOrbit(self,denss): #Calculating orbital information #VelToOrbit(Vel, Rs, Ms, ecc=0, omega=0): SMA,P=Vel2Per(denss,self.vels) self.SMAs=SMA self.PS=P def update(self, **kwargs): #Modify detection parameters... self.tcen = kwargs.pop('tcen', self.tcen) # detected transit centre self.tdur = kwargs.pop('tdur', self.tdur) # detected transit duration self.depth = kwargs.pop('dep', self.depth) # detected transit depth self.b = kwargs.pop('b', self.b) # estimated impact parameter (default = 0.4) self.RpRs = kwargs.pop('RpRs', self.RpRs) # Ratio of planet to star radius self.vel = kwargs.pop('vel', self.vel) # Velocity of planet relative to stellar radius def FitParams(self,star,info): #Returns params array needed for fitting if info.GP: return np.array([self.tcen,self.b,self.vel,self.RpRs]) else: return np.array([self.tcen,self.b,self.vel,self.RpRs]) def InitialiseGP(self,settings,star,Lcurve): import george self.gp, res, self.lnlikfit = TrainGP(Lcurve.lc,self.tcen,star.wn) self.newmean,self.newwn,self.a,self.tau=res def FitPriors(self,star,settings): #Returns priros array needed for fitting if settings.GP: if not self.hasattr('tau'): self.InitialiseGP() return np.array([[self.tcen,self.tcen,self.tcen,self.tcen], [-1.2,1.2,0,0], [0.0,100.0,self.vmax,self.vmaxerr], [0.0,0.3,0,0], [0.0,1.0,star.LD1,np.average(star.LD1uerr,star.LD1derr)], [0.0,1.0,star.LD2,np.average(star.LD2uerr,star.LD2derr)], [np.log(star.wn)-1.5,np.log(star.wn)+1.5,np.log(star.wn),0.3], [self.tau-10,self.tau+10,self.tau,np.sqrt(np.abs(self.tau))], [self.a-10,self.a+10,self.a,np.sqrt(np.abs(self.a))]]) else: return np.array([[self.tcen,self.tcen,self.tcen,self.tcen], [-1.2,1.2,0,0], [0.0,100.0,self.vmax,self.vmaxerr], [0.0,0.3,0,0], [0.0,1.0,star.LD1,np.average(star.LD1uerr,star.LD1derr)], [0.0,1.0,star.LD2,np.average(star.LD2uerr,star.LD2derr)]] ''' def Optimize_mono(self,flatlc,LDprior,nopt=20): if not hasattr(self,'priors'): self.monoPriors() #Cutting to short area around transit: flatlc=flatlc[abs(flatlc[:,0]-self.tcen)<5*self.tdur] #Optimizing initial transit parameters opt_monomodel=MonotransitModel(tcen=self.tcen, b=self.b, vel=self.vel, RpRs=self.RpRs, LD1=LDprior['LD1'][3], LD2=LDprior['LD2'][3]) print(self.priors) temp_priors=self.priors.copy() temp_priors['mean:LD1'] = LDprior['LD1'] temp_priors['mean:LD2'] = LDprior['LD2'] print("monopriors:",temp_priors) if self.settings.verbose else 0 init_neglogprob=MonoOnlyNegLogProb(opt_monomodel.get_parameter_vector(),flatlc,temp_priors,opt_monomodel) print("nll init",init_neglogprob) if self.settings.verbose else 0 suc_res=np.zeros(7) LD1s=np.random.normal(LDprior['LD1'][3],LDprior['LD1'][4],self.settings.npdf) LD2s=np.random.normal(LDprior['LD2'][3],LDprior['LD2'][4],self.settings.npdf) #Running multiple optimizations using rough grid of important model paramsself. for n_par in np.random.choice(self.settings.npdf,nopt,replace=False): initpars=np.array([self.tcens[n_par],self.bs[n_par],self.vels[n_par],self.RpRss[n_par],LD1s[n_par],LD2s[n_par]]) result = opt.minimize(MonoOnlyNegLogProb, initpars, args=(flatlc, temp_priors, opt_monomodel), method="L-BFGS-B") if result.success: suc_res=np.vstack((suc_res,np.hstack((result.x,result.fun)))) if len(np.shape(suc_res))==1: raise ValueError("No successful Monotransit minimizations") else: #Ordering successful optimizations by neglogprob... suc_res=suc_res[1:,:] print("All_Results:",suc_res) if self.settings.verbose else 0 suc_res=suc_res[~np.isnan(suc_res[:,-1]),:] bestres=suc_res[np.argmin(suc_res[:,-1])] self.bestres=bestres print("Best_Result:",bestres) if self.settings.verbose else 0 #tcen, tdur, depth, b=0.4, RpRs=None, vel=None self.update_pars(bestres[0], CalcTdur(bestres[2], bestres[1], bestres[3]), bestres[3]**2, b=bestres[1], RpRs=bestres[3],vel=bestres[2]) print("initial fit nll: ",init_neglogprob," to new fit nll: ",bestres[-1]) if self.settings.verbose else 0 #for nn,name in enumerate(['mean:tcen', 'mean:b', 'mean:vel', 'mean:RpRs', 'mean:LD1', 'mean:LD2']): # self.gp.set_parameter(name,bestres[nn]) PlotBestMono(flatlc, opt_monomodel, bestres[:-1], fname=self.settings.outfilesloc+self.mononame+'_init_monoonly_fit.png') def monoPriors(self,name='mean'): self.priors={} self.priors.update({name+':tcen':[self.tcen-self.tdur*0.3,self.tcen+self.tdur*0.3], name+':b':[0.0,1.25], name+':vel':[0,self.maxvel+5*self.maxveluerr,'normlim',self.maxvel,self.maxveluerr], name+':RpRs':[0.02,0.25] }) return self.priors ''' def RunModel(self,lc,gp): self.modelPriors() #Returning monotransit model from information. sampler = emcee.EnsembleSampler(self.settings.nwalkers, len(gp.get_parameter_vector()), MonoLogProb, args=(lc,self.priors,gp), threads=self.settings.nthreads) chx=np.random.choice(np.sum(~np.isnan(self.vels)),self.settings.nwalkers,replace=False) dists=[np.random.normal(gp.get_parameter(nm),abs(gp.get_parameter(nm))**0.25,len(chx)) for nm in ['kernel:terms[0]:log_amp', 'kernel:terms[0]:log_timescale', 'kernel:terms[0]:log_period']]+\ [self.tcens[chx],self.bs[chx],self.vels[~np.isnan(self.vels)][chx],self.RpRss[chx],self.LD1s[chx],self.LD2s[chx]] #'kernel:terms[0]:log_factor', 'kernel:terms[1]:log_sigma' <- frozen and not used col=['kernel:terms[0]:log_amp', 'kernel:terms[0]:log_timescale', 'kernel:terms[0]:log_period',\ 'mean:tcen','mean:b','mean:vel','mean:RpRs','mean:LD1','mean:LD2'] pos=np.column_stack(dists) self.init_mcmc_params=pos #print(len(pos[0,:])) #[np.array(list(initparams.values())) *(1+ 1.5e-4*np.random.normal()) for i in range(nwalkers)] Nsteps = 30000 sampler.run_mcmc(pos, 1, rstate0=np.random.get_state()) sampler.run_mcmc(pos, self.settings.nsteps, rstate0=np.random.get_state()) self.samples = sampler.chain[:, 3000:, :].reshape((-1, ndim)) return self.samples #.light_curve(np.arange(0,40,0.024),texp=0.024)) ''' def SaveInput(self,stellarpdfs): np.save(self.settings.fitsloc+self.mononame+'_inputsamples',np.column_stack(([self.pdfs[ipdf] for ipdf in self.pdfs.keys()]+[stellarpdfs[ipdf] for ipdf in stellarpdfs.keys()]))) class Lightcurve(): # Lightcurve class - contains all lightcurve information def __init__(self, file, epic): self.fileloc=file self.lc,self.mag=OpenLC(self.fileloc) try: self.mag=k2_quickdat(epic)['k2_kepmag'] except: self.mag=self.mag self.lc=self.lc[~np.isnan(np.sum(self.lc,axis=1))] self.fluxmed=np.nanmedian(self.lc[:,1]) self.lc[:,1:]/=self.fluxmed self.lc=self.lc[AnomCutDiff(self.lc[:,1])] self.range=self.lc[-1,0]-self.lc[self.lc[:,0]!=0.0,0][0] self.cadence=np.nanmedian(np.diff(self.lc[:,0])) self.lcmask=np.tile(True,len(self.lc[:,0])) def BinLC(self, binsize,gap=0.4): #Bins lightcurve to some time interval. Finds gaps in the lightcurve using the threshold "gap" spl_ts=np.array_split(self.lc[:,0],np.where(np.diff(self.lc[:,0])>gap)[0]+1) bins=np.hstack([np.arange(s[0],s[-1],binsize) for s in spl_ts]) digitized = np.digitize(self.lc[:,0], bins) ws=(self.lc[:,2])**-2.0 ws=np.where(ws==0.0,np.median(ws[ws!=0.0]),ws) bin_means = np.array([np.ma.average(self.lc[digitized==i,2],weights=ws[digitized==i]) for i in range(np.max(digitized))]) bin_stds = np.array([np.ma.average((self.lc[digitized==i,2]-bin_means[i])**2, weights=ws[digitized==i]) for i in range(np.max(digitized))]) whok=(~np.isnan(bin_means))&(bin_means!=0.0) self.binlc=np.column_stack((bins,bin_means,bin_stds))[whok,:] self.binsize=binsize return self.binlc ''' def keys(self): return ['NPTS','SKY_TILE','RA_OBJ','DEC_OBJ','BMAG','VMAG','JMAG','KMAG','HMAG','PMRA','PMDEC','PMRAERR','PMDECERR','NFIELDS'] def keyvals(self,*args): #Returns values for the given key list arr=[] for ke in args[0]: exec('arr+=[self.%s]' % ke) return arr ''' def savelc(self): np.save(self.settings.fitsloc+self.OBJNAME.replace(' ','')+'_bin.npy',self.get_binlc()) np.save(self.settings.fitsloc+self.OBJNAME.replace(' ','')+'.npy',self.get_lc()) def flatten(self,winsize=4.5,stepsize=0.125): import k2flatten return k2flatten.ReduceNoise(self.lc,winsize=winsize,stepsize=stepsize) def calc_mask(self,meanmodels): for model in meanmodels: if hasattr(model,'P'): self.lcmask[((abs(self.lc[:,0]-model.tcen)%model.P)<(self.cadence+model.tdur*0.5))+((abs(self.lc[:,0]-model.tcen)%model.P)>(model.P-(model.tdur*0.5+self.cadence)))]=False else: #Mono self.lcmask[abs(self.lc[:,0]-model.tcen)<(self.cadence+model.tdur*0.5)]=False class MonotransitModel(celerite.modeling.Model): parameter_names = ("tcen", "b","vel","RpRs","LD1","LD2") def get_value(self,t): #Getting fine cadence (cad/100). Integrating later: cad=np.median(np.diff(t)) oversamp=10#Oversampling finetime=np.empty(0) for i in range(len(t)): finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) )) finetime=np.sort(finetime) z = np.sqrt(self.b**2+(self.vel*(finetime - self.tcen))**2) #Removed the flux component below. Can be done by GP model = occultquad(z, self.RpRs, np.array((self.LD1,self.LD2))) return np.average(np.resize(model, (len(t), oversamp)), axis=1) ''' #TBD: class MonotransitModel_x2(celerite.modeling.Model): nmodels=2 parameter_names = tuple([item for sublist in [["tcen"+str(n), "b"+str(n),"vel"+str(n),"RpRs"+str(n)] for n in range(1,nmodels+1)]+[["LD1","LD2"]] for item in sublist ]) def get_value(self,t): cad=np.median(np.diff(t)) oversamp=10#Oversampling finetime=np.empty(0) for i in range(len(t)): finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) )) finetime=np.sort(finetime) model=np.zeros((len(finetime))) for nmod in range(nmodels): z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2) #Removed the flux component below. Can be done by GP model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2))) return np.average(np.resize(model, (len(t), oversamp)), axis=1) class MonotransitModel_x3(celerite.modeling.Model): nmodels =3 parameter_names = tuple([item for sublist in [["tcen"+str(n), "b"+str(n),"vel"+str(n),"RpRs"+str(n)] for n in range(1,nmodels+1)]+[["LD1","LD2"]] for item in sublist ]) def get_value(self,t): cad=np.median(np.diff(t)) oversamp=10#Oversampling finetime=np.empty(0) for i in range(len(t)): finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) )) finetime=np.sort(finetime) model=np.zeros((len(finetime))) for nmod in range(nmodels): z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2) #Removed the flux component below. Can be done by GP model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2))) return np.average(np.resize(model, (len(t), oversamp)), axis=1) class MonotransitModel_plus_pl(celerite.modeling.Model): parameter_names = ("monotcen", "monob","monovel","monoRpRs","multitcen", "multib","multiP","multiRpRs","multia_Rs","LD1","LD2") def get_value(self,t): cad=np.median(np.diff(t)) oversamp=10#Oversampling finetime=np.empty(0) for i in range(len(t)): finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) )) finetime=np.sort(finetime) model=np.zeros((len(finetime))) for nmod in range(nmodels): z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2) #Removed the flux component below. Can be done by GP model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2))) return np.average(np.resize(model, (len(t), oversamp)), axis=1) class MonotransitModel_plus_plx2(celerite.modeling.Model): parameter_names = tuple(["monotcen", "monob","monovel","monoRpRs"]+ ['multi'+item for sublist in [["tcen"+str(n), "b"+str(n),"P"+str(n),"RpRs"+str(n),"a_Rs"+str(n)] for n in range(1,3)]+ [["LD1","LD2"]] for item in sublist ]) def get_value(self,t): cad=np.median(np.diff(t)) oversamp=10#Oversampling finetime=np.empty(0) for i in range(len(t)): finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) )) finetime=np.sort(finetime) model=np.zeros((len(finetime))) for nmod in range(nmodels): z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2) #Removed the flux component below. Can be done by GP model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2))) return np.average(np.resize(model, (len(t), oversamp)), axis=1) class MonotransitModelx3_plus_plx2(celerite.modeling.Model): parameter_names = tuple(['mono'+item for sublist in [["tcen"+str(n), "b"+str(n),"vel"+str(n),"RpRs"+str(n)] for n in range(1,4)]]+\ ['multi'+item for sublist in [["tcen"+str(n), "b"+str(n),"P"+str(n),"RpRs"+str(n),"a_Rs"+str(n)] for n in range(4,6)]]+\ ["LD1","LD2"]]) def get_value(self,t): cad=np.median(np.diff(t)) oversamp=10#Oversampling finetime=np.empty(0) for i in range(len(t)): finetime=np.hstack((finetime, np.linspace(t[i]-(1-1/oversamp)*(cad/2.), t[i]+(1-1/oversamp)*(cad/2.), oversamp) )) finetime=np.sort(finetime) model=np.zeros((len(finetime))) for nmod in range(nmodels): z = np.sqrt(getattr(self,'b'+str(nmod))**2+(getattr(self,'vel'+str(nmod))*(finetime - getattr(self,'tcen'+str(nmod))))**2) #Removed the flux component below. Can be done by GP model *= occultquad(z, getattr(self,'RpRs'+str(nmod)), np.array((self.LD1,self.LD2))) return np.average(np.resize(model, (len(t), oversamp)), axis=1) ''' def k2_quickdat(kic): kicdat=pd.DataFrame.from_csv("https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=k2targets&where=epic_number=%27"+str(int(kic))+"%27") if len(kicdat.shape)>1: print("Multiple entries - ",str(kicdat.shape)) kicdat=kicdat.iloc[0] return kicdat ''' def MonoLnPriorDict(params,priors): lp=0 for key in priors.keys(): #print(params[n],key,priors[key]) if params[key]<priors[key][0] or params[key]>priors[key][1]: #hidesously low number that still has gradient towards "mean" of the uniform priors. lp-=1e15 * (params[key]-(0.5*(priors[key][0]+priors[key][1])))**2 #print(key," over prior limit") if len(priors[key])>2: if priors[key][2]=='gaussian': lp+=stats.norm(priors[key][3],priors[key][4]).pdf(params[key]) elif priors[key][2]=='normlim': #Special velocity prior from min period & density: lp+=((1.0-stats.norm.cdf(params[key],priors[key][3],priors[key][4]))*params[key])**2 return lp ''' def MonoLnPrior(params,priors): lp=0 for n,key in enumerate(priors.keys()): #print(params[n],key,priors[key]) if params[n]<priors[key][0] or params[n]>priors[key][1]: #hideously low number that still has gradient towards "mean" of the uniform priors. Scaled by prior width lp-=1e20 * (((params[n]-0.5*(priors[key][0]+priors[key][1])))/(priors[key][1]-priors[key][0]))**2 #print(key," over prior limit") if len(priors[key])>2: if priors[key][2]=='gaussian': lp+=stats.norm(priors[key][3],priors[key][4]).pdf(params[n]) elif priors[key][2]=='normlim': #Special velocity prior from min period & density: lp+=((1.0-stats.norm.cdf(params[n],priors[key][3],priors[key][4]))*params[n])**2 elif priors[key][2]=='evans': #Evans 2015 limit on amplitude - same as p(Ai) = Gam(1, 100) (apparently, although I cant see it.) #Basically prior is ln lp-=priors[key][3]*np.exp(params[n])# return lp def MonoLogProb(params,lc,priors,gp): gp.set_parameter_vector(params) gp.compute(lc[:,0],lc[:,2]) ll = gp.log_likelihood(lc[:,1]) lp = MonoLnPrior(params,priors) return (ll+lp) def MonoNegLogProb(params,lc,priors,gp): return -1*MonoLogProb(params,lc,priors,gp) def MonoOnlyLogProb(params,lc,priors,monomodel): monomodel.set_parameter_vector(params) ll = np.sum(-2*lc[:,2]**-2*(lc[:,1]-monomodel.get_value(lc[:,0]))**2)#-(0.5*len(lc[:,0]))*np.log(2*np.pi*lc[:,2]**2) #Constant not neceaary for gradient descent lp = MonoLnPrior(params,priors) return (ll+lp) def MonoOnlyNegLogProb(params,lc,priors,monomodel): return -1*MonoOnlyLogProb(params,lc,priors,monomodel) class RotationTerm(celerite.terms.Term): parameter_names = ("log_amp", "log_timescale", "log_period", "log_factor") def get_real_coefficients(self, params): log_amp, log_timescale, log_period, log_factor = params f = np.exp(log_factor) return ( np.exp(log_amp) * (1.0 + f) / (2.0 + f), np.exp(-log_timescale), ) def get_complex_coefficients(self, params): log_amp, log_timescale, log_period, log_factor = params f = np.exp(log_factor) return ( np.exp(log_amp) / (2.0 + f), 0.0, np.exp(-log_timescale), 2*np.pi*np.exp(-log_period), ) def neg_log_like(params, y, gp): gp.set_parameter_vector(params) return -gp.log_likelihood(y) def grad_neg_log_like(params, y, gp): gp.set_parameter_vector(params) return -gp.grad_log_likelihood(y)[1] ''' def grad_nll(p,gp,y,prior=[]): #Gradient of the objective function for TrainGP gp.set_parameter_vector(p) return -gp.grad_log_likelihood(y, quiet=True) def nll_gp(p,gp,y,prior=[]): #inverted LnLikelihood function for GP training gp.set_parameter_vector(p) if prior!=[]: prob=MonoLogProb(p,y,prior,gp) return -prob if np.isfinite(prob) else 1e25 else: ll = gp.log_likelihood(y, quiet=True) return -ll if np.isfinite(ll) else 1e25''' def VelToOrbit(vel, Rs, Ms, ecc=0, omega=0,timebin=86400.0): '''Takes in velocity (in units of stellar radius), Stellar radius estimate and (later) eccentricity & angle of periastron. Returns Semi major axis (AU) and period (days)''' Rs=Rs*695500000# if Rs<5 else Rs Ms=Ms*1.96e30# if Ms<5 else Ms SMA=(6.67e-11*Ms)/((vel*Rs/86400.)**2) Per=(2*np.pi*SMA)/(vel*Rs/86400) return SMA/1.49e11, Per/86400 def getKeplerLDs(Ts,logg=4.43812,FeH=0.0,how='2'): #Get Kepler Limb darkening coefficients. from scipy.interpolate import CloughTocher2DInterpolator as ct2d #print(label) types={'1':[3],'2':[4, 5],'3':[6, 7, 8],'4':[9, 10, 11, 12]} if how in types: checkint = types[how] #print(checkint) else: print("no key...") arr = np.genfromtxt("KeplerLDlaws.txt",skip_header=2) FeHarr=np.unique(arr[:, 2]) #Just using a single value of FeH if not (type(FeH)==float) and not (type(FeH)==int): FeH=np.nanmedian(FeH) FeH=FeHarr[find_nearest(FeHarr,FeH)] feh_ix=arr[:,2]==FeH feh_ix=arr[:,2]==FeH Tlen=1 if type(Ts)==float or type(Ts)==int else len(Ts) outarr=np.zeros((Tlen,len(checkint))) for n,i in enumerate(checkint): u_interp=ct2d(np.column_stack((arr[feh_ix,0],arr[feh_ix,1])),arr[feh_ix,i]) if (type(Ts)==float)+(type(Ts)==int) and ((Ts<50000.)*(Ts>=2000)): outarr[0,n]=u_interp(Ts,logg) elif ((Ts<50000.)*(Ts>=2000)).all(): if type(logg)==float: outarr[:,n]=np.array([u_interp(T,logg) for T in Ts]) else: outarr[:,n]=np.array([u_interp(Ts[t],logg[t]) for t in range(len(Ts))]) else: print('Temperature outside limits') outarr=None break return outarr def nonan(lc): return np.logical_not(np.isnan(np.sum(lc,axis=1))) def AnomCutDiff(flux,thresh=4.2): #Uses differences between points to establish anomalies. #Only removes single points with differences to both neighbouring points greater than threshold above median difference (ie ~rms) #Fast: 0.05s for 1 million-point array. #Must be nan-cut first diffarr=np.vstack((np.diff(flux[1:]),np.diff(flux[:-1]))) diffarr/=np.median(abs(diffarr[0,:])) anoms=np.hstack((True,((diffarr[0,:]*diffarr[1,:])>0)+(abs(diffarr[0,:])<thresh)+(abs(diffarr[1,:])<thresh),True)) return anoms def CalcTdur(vel, b, p): '''Caculates a velocity (in v/Rs) from the input transit duration Tdur, impact parameter b and planet-to-star ratio p''' # In Rs per day return (2*(1+p)*np.sqrt(1-(b/(1+p))**2))/vel def CalcVel(Tdur, b, p): '''Caculates a velocity (in v/Rs) from the input transit duration Tdur, impact parameter b and planet-to-star ratio p''' # In Rs per day return (2*(1+p)*np.sqrt(1-(b/(1+p))**2))/Tdur def PlotBestMono(lc, monomodel, vector, fname=None): cad=np.median(np.diff(lc[:,0])) t=np.arange(lc[0,0],lc[-1,0]+0.01,cad) monomodel.set_parameter_vector(vector) ypred=monomodel.get_value(t) plt.errorbar(lc[:, 0], lc[:,1], yerr=lc[:, 2], fmt='.',color='#999999') plt.plot(lc[:, 0], lc[:,1], '.',color='#333399') plt.plot(t,ypred,'--',color='#003333',linewidth=2.0,label='Median transit model fit') if fname is not None: plt.savefig(fname) def PlotModel(lc, model, vector, prevmodels=[], plot=True, residuals=False, scale=1, nx=10000, GP=False, subGP=False, monomodel=0, verbose=True,fname=None): # - lc : lightcurve # - model : celerite-style model (eg gp) to apply the vector to # - vector : best-fit parameters to use cad=np.median(np.diff(lc[:,0])) t=np.arange(lc[0,0],lc[-1,0]+0.01,cad) if len(prevmodels)==0: model.set_parameter_vector(vector) model.compute(lc[:,0],lc[:,2]) ypreds,varpreds=model.predict(lc[:,1], t) stds=np.sqrt(np.diag(varpreds)) model.mean.get_value(t) modelfits=np.column_stack((ypreds-stds*2,ypreds-stds,ypreds,ypreds+stds,ypreds+stds*2,model.mean.get_value(t))) else: modelfits=prevmodels if residuals: #Subtracting bestfit model from both flux and model to give residuals newmodelfits=np.column_stack((modelfits[:,:5]-np.tile(modelfits[:,2], (5, 1)).swapaxes(0, 1),modelfits[:,5])) #subtracting median fit Ploty=lc[:,1]-modelfits[:, 2][(np.round((lc[:,0]-lc[0,0])/0.020431700249901041)).astype(int)] #p.xlim([t[np.where(redfits[:,2]==np.min(redfits[:,2]))]-1.6, t[np.where(redfits[:,2]==np.min(redfits[:,2]))]+1.6]) nomys=None #Not plotting the non-GP model. elif subGP: newmodelfits=np.column_stack((modelfits[:,:5]-np.tile(nomys, (5, 1)).swapaxes(0, 1),modelfits[:,5])) #subtracting median fit Ploty=lc[:,1]-modelfits[:,5] #p.xlim([t[np.where(redfits[:,2]==np.min(redfits[:,2]))]-1.6, t[np.where(redfits[:,2]==np.min(redfits[:,2]))]+1.6]) nomys=None #Not plotting the non-GP model. else: Ploty=lc[:,1] newmodelfits=np.copy(modelfits) if plot: plt.errorbar(lc[:, 0], lc[:,1], yerr=lc[:, 2], fmt=',',color='#999999',alpha=0.8,zorder=-100) plt.plot(lc[:, 0], lc[:,1], '.',color='#333399') #Plotting 1-sigma error region and models print(np.shape(newmodelfits)) plt.fill(np.hstack((t,t[::-1])), np.hstack((newmodelfits[:,2]-(newmodelfits[:,2]-newmodelfits[:,1]),(newmodelfits[:,2]+(newmodelfits[:,3]-newmodelfits[:,2]))[::-1])), '#3399CC', linewidth=0,label='$1-\sigma$ region ('+str(scale*100)+'% scaled)',alpha=0.5) plt.fill(np.hstack((t,t[::-1])), np.hstack(((1.0+newmodelfits[:,2]-newmodelfits[:,5])-(newmodelfits[:,2]-newmodelfits[:,1]),((1.0+newmodelfits[:,2]-newmodelfits[:,5])+(newmodelfits[:,3]-newmodelfits[:,2]))[::-1])), '#66BBCC', linewidth=0,label='$1-\sigma$ region without transit',alpha=0.5) plt.plot(t,modelfits[:,2],'-',color='#003333',linewidth=2.0,label='Median model fit') if not residuals and not subGP: plt.plot(t,model.mean.get_value(t),'--',color='#003333',linewidth=2.0,label='Median transit model fit') if not residuals and not subGP: #Putting title on upper (non-residuals) graph plt.title('Best fit model') plt.legend(loc=3,fontsize=9) if fname is not None: plt.savefig(fname) return modelfits
mit
phoebe-project/phoebe2-docs
development/examples/distribution_constraints.py
2
1988
#!/usr/bin/env python # coding: utf-8 # # Propagating Distributions through Constraints # # In this example script, we'll reproduce Figure 4 from the fitting release paper ([Conroy et al. 2020](http://phoebe-project.org/publications/2020Conroy+)). # # <img src="http://phoebe-project.org/images/figures/2020Conroy+_fig4.png" alt="Figure 4" width="800px"/> # # Setup # Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab). # In[1]: #!pip install -I "phoebe>=2.3,<2.4" # In[2]: import matplotlib.pyplot as plt plt.rc('font', family='serif', size=14, serif='STIXGeneral') plt.rc('mathtext', fontset='stix') # In[3]: import phoebe logger = phoebe.logger('warning') # In[4]: b = phoebe.default_binary() # In[5]: b.set_value('latex_repr', component='binary', value='orb') b.set_value('latex_repr', component='primary', value='1') b.set_value('latex_repr', component='secondary', value='2') # In[6]: b.add_distribution({'sma@binary': phoebe.uniform(5,8), 'incl@binary': phoebe.gaussian(75,10)}, distribution='mydist') # # Plotting Distributions # In[7]: dist = b.get_parameter('sma', component='binary', context='component').get_distribution('mydist') plt.clf() figure = plt.figure(figsize=(4,4)) _ = dist.plot(plot_uncertainties=False) _ = plt.tight_layout() _ = plt.savefig('figure_priors_sma.pdf') # In[8]: dist = b.get_parameter('incl', component='binary', context='component').get_distribution('mydist') plt.clf() figure = plt.figure(figsize=(4,4)) _ = dist.plot(plot_uncertainties=False) _ =plt.tight_layout() _ = plt.savefig('figure_priors_incl.pdf') # In[9]: dist = b.get_parameter('asini', component='binary', context='component').get_distribution('mydist') plt.clf() figure = plt.figure(figsize=(4,4)) _ = dist.plot(plot_uncertainties=False) _ = plt.tight_layout() _ = plt.savefig('figure_priors_asini.pdf') # In[ ]:
gpl-3.0
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/pandas/tests/indexes/datetimes/test_datetime.py
3
31691
import pytest import numpy as np from datetime import date, timedelta, time import pandas as pd import pandas.util.testing as tm from pandas.compat import lrange from pandas.compat.numpy import np_datetime64_compat from pandas import (DatetimeIndex, Index, date_range, Series, DataFrame, Timestamp, datetime, offsets, _np_version_under1p8) from pandas.util.testing import assert_series_equal, assert_almost_equal randn = np.random.randn class TestDatetimeIndex(object): def test_get_loc(self): idx = pd.date_range('2000-01-01', periods=3) for method in [None, 'pad', 'backfill', 'nearest']: assert idx.get_loc(idx[1], method) == 1 assert idx.get_loc(idx[1].to_pydatetime(), method) == 1 assert idx.get_loc(str(idx[1]), method) == 1 if method is not None: assert idx.get_loc(idx[1], method, tolerance=pd.Timedelta('0 days')) == 1 assert idx.get_loc('2000-01-01', method='nearest') == 0 assert idx.get_loc('2000-01-01T12', method='nearest') == 1 assert idx.get_loc('2000-01-01T12', method='nearest', tolerance='1 day') == 1 assert idx.get_loc('2000-01-01T12', method='nearest', tolerance=pd.Timedelta('1D')) == 1 assert idx.get_loc('2000-01-01T12', method='nearest', tolerance=np.timedelta64(1, 'D')) == 1 assert idx.get_loc('2000-01-01T12', method='nearest', tolerance=timedelta(1)) == 1 with tm.assert_raises_regex(ValueError, 'must be convertible'): idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo') with pytest.raises(KeyError): idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours') assert idx.get_loc('2000', method='nearest') == slice(0, 3) assert idx.get_loc('2000-01', method='nearest') == slice(0, 3) assert idx.get_loc('1999', method='nearest') == 0 assert idx.get_loc('2001', method='nearest') == 2 with pytest.raises(KeyError): idx.get_loc('1999', method='pad') with pytest.raises(KeyError): idx.get_loc('2001', method='backfill') with pytest.raises(KeyError): idx.get_loc('foobar') with pytest.raises(TypeError): idx.get_loc(slice(2)) idx = pd.to_datetime(['2000-01-01', '2000-01-04']) assert idx.get_loc('2000-01-02', method='nearest') == 0 assert idx.get_loc('2000-01-03', method='nearest') == 1 assert idx.get_loc('2000-01', method='nearest') == slice(0, 2) # time indexing idx = pd.date_range('2000-01-01', periods=24, freq='H') tm.assert_numpy_array_equal(idx.get_loc(time(12)), np.array([12]), check_dtype=False) tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)), np.array([]), check_dtype=False) with pytest.raises(NotImplementedError): idx.get_loc(time(12, 30), method='pad') def test_get_indexer(self): idx = pd.date_range('2000-01-01', periods=3) exp = np.array([0, 1, 2], dtype=np.intp) tm.assert_numpy_array_equal(idx.get_indexer(idx), exp) target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour']) tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'), np.array([-1, 0, 1], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'), np.array([0, 1, 2], dtype=np.intp)) tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'), np.array([0, 1, 1], dtype=np.intp)) tm.assert_numpy_array_equal( idx.get_indexer(target, 'nearest', tolerance=pd.Timedelta('1 hour')), np.array([0, -1, 1], dtype=np.intp)) with pytest.raises(ValueError): idx.get_indexer(idx[[0]], method='nearest', tolerance='foo') def test_reasonable_keyerror(self): # GH #1062 index = DatetimeIndex(['1/3/2000']) try: index.get_loc('1/1/2000') except KeyError as e: assert '2000' in str(e) def test_roundtrip_pickle_with_tz(self): # GH 8367 # round-trip of timezone index = date_range('20130101', periods=3, tz='US/Eastern', name='foo') unpickled = tm.round_trip_pickle(index) tm.assert_index_equal(index, unpickled) def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): # GH7774 index = date_range('20130101', periods=3, tz='US/Eastern') assert str(index.reindex([])[0].tz) == 'US/Eastern' assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern' def test_time_loc(self): # GH8667 from datetime import time from pandas._libs.index import _SIZE_CUTOFF ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64) key = time(15, 11, 30) start = key.hour * 3600 + key.minute * 60 + key.second step = 24 * 3600 for n in ns: idx = pd.date_range('2014-11-26', periods=n, freq='S') ts = pd.Series(np.random.randn(n), index=idx) i = np.arange(start, n, step) tm.assert_numpy_array_equal(ts.index.get_loc(key), i, check_dtype=False) tm.assert_series_equal(ts[key], ts.iloc[i]) left, right = ts.copy(), ts.copy() left[key] *= -10 right.iloc[i] *= -10 tm.assert_series_equal(left, right) def test_time_overflow_for_32bit_machines(self): # GH8943. On some machines NumPy defaults to np.int32 (for example, # 32-bit Linux machines). In the function _generate_regular_range # found in tseries/index.py, `periods` gets multiplied by `strides` # (which has value 1e9) and since the max value for np.int32 is ~2e9, # and since those machines won't promote np.int32 to np.int64, we get # overflow. periods = np.int_(1000) idx1 = pd.date_range(start='2000', periods=periods, freq='S') assert len(idx1) == periods idx2 = pd.date_range(end='2000', periods=periods, freq='S') assert len(idx2) == periods def test_nat(self): assert DatetimeIndex([np.nan])[0] is pd.NaT def test_ufunc_coercions(self): idx = date_range('2011-01-01', periods=3, freq='2D', name='x') delta = np.timedelta64(1, 'D') for result in [idx + delta, np.add(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = date_range('2011-01-02', periods=3, freq='2D', name='x') tm.assert_index_equal(result, exp) assert result.freq == '2D' for result in [idx - delta, np.subtract(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = date_range('2010-12-31', periods=3, freq='2D', name='x') tm.assert_index_equal(result, exp) assert result.freq == '2D' delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'), np.timedelta64(3, 'D')]) for result in [idx + delta, np.add(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'], freq='3D', name='x') tm.assert_index_equal(result, exp) assert result.freq == '3D' for result in [idx - delta, np.subtract(idx, delta)]: assert isinstance(result, DatetimeIndex) exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'], freq='D', name='x') tm.assert_index_equal(result, exp) assert result.freq == 'D' def test_week_of_month_frequency(self): # GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise d1 = date(2002, 9, 1) d2 = date(2013, 10, 27) d3 = date(2012, 9, 30) idx1 = DatetimeIndex([d1, d2]) idx2 = DatetimeIndex([d3]) result_append = idx1.append(idx2) expected = DatetimeIndex([d1, d2, d3]) tm.assert_index_equal(result_append, expected) result_union = idx1.union(idx2) expected = DatetimeIndex([d1, d3, d2]) tm.assert_index_equal(result_union, expected) # GH 5115 result = date_range("2013-1-1", periods=4, freq='WOM-1SAT') dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06'] expected = DatetimeIndex(dates, freq='WOM-1SAT') tm.assert_index_equal(result, expected) def test_hash_error(self): index = date_range('20010101', periods=10) with tm.assert_raises_regex(TypeError, "unhashable type: %r" % type(index).__name__): hash(index) def test_stringified_slice_with_tz(self): # GH2658 import datetime start = datetime.datetime.now() idx = DatetimeIndex(start=start, freq="1d", periods=10) df = DataFrame(lrange(10), index=idx) df["2013-01-14 23:44:34.437768-05:00":] # no exception here def test_append_join_nondatetimeindex(self): rng = date_range('1/1/2000', periods=10) idx = Index(['a', 'b', 'c', 'd']) result = rng.append(idx) assert isinstance(result[0], Timestamp) # it works rng.join(idx, how='outer') def test_to_period_nofreq(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) pytest.raises(ValueError, idx.to_period) idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], freq='infer') assert idx.freqstr == 'D' expected = pd.PeriodIndex(['2000-01-01', '2000-01-02', '2000-01-03'], freq='D') tm.assert_index_equal(idx.to_period(), expected) # GH 7606 idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03']) assert idx.freqstr is None tm.assert_index_equal(idx.to_period(), expected) def test_comparisons_coverage(self): rng = date_range('1/1/2000', periods=10) # raise TypeError for now pytest.raises(TypeError, rng.__lt__, rng[3].value) result = rng == list(rng) exp = rng == rng tm.assert_numpy_array_equal(result, exp) def test_comparisons_nat(self): fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, '2014-05-01', '2014-07-01']) didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT, '2014-06-01', '2014-07-01']) darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'), np_datetime64_compat('2014-03-01 00:00Z'), np_datetime64_compat('nat'), np.datetime64('nat'), np_datetime64_compat('2014-06-01 00:00Z'), np_datetime64_compat('2014-07-01 00:00Z')]) if _np_version_under1p8: # cannot test array because np.datetime('nat') returns today's date cases = [(fidx1, fidx2), (didx1, didx2)] else: cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] # Check pd.NaT is handles as the same as np.nan with tm.assert_produces_warning(None): for idx1, idx2 in cases: result = idx1 < idx2 expected = np.array([True, False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) result = idx2 > idx1 expected = np.array([True, False, False, False, True, False]) tm.assert_numpy_array_equal(result, expected) result = idx1 <= idx2 expected = np.array([True, False, False, False, True, True]) tm.assert_numpy_array_equal(result, expected) result = idx2 >= idx1 expected = np.array([True, False, False, False, True, True]) tm.assert_numpy_array_equal(result, expected) result = idx1 == idx2 expected = np.array([False, False, False, False, False, True]) tm.assert_numpy_array_equal(result, expected) result = idx1 != idx2 expected = np.array([True, True, True, True, True, False]) tm.assert_numpy_array_equal(result, expected) with tm.assert_produces_warning(None): for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]: result = idx1 < val expected = np.array([False, False, False, False, False, False]) tm.assert_numpy_array_equal(result, expected) result = idx1 > val tm.assert_numpy_array_equal(result, expected) result = idx1 <= val tm.assert_numpy_array_equal(result, expected) result = idx1 >= val tm.assert_numpy_array_equal(result, expected) result = idx1 == val tm.assert_numpy_array_equal(result, expected) result = idx1 != val expected = np.array([True, True, True, True, True, True]) tm.assert_numpy_array_equal(result, expected) # Check pd.NaT is handles as the same as np.nan with tm.assert_produces_warning(None): for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: result = idx1 < val expected = np.array([True, False, False, False, False, False]) tm.assert_numpy_array_equal(result, expected) result = idx1 > val expected = np.array([False, False, False, False, True, True]) tm.assert_numpy_array_equal(result, expected) result = idx1 <= val expected = np.array([True, False, True, False, False, False]) tm.assert_numpy_array_equal(result, expected) result = idx1 >= val expected = np.array([False, False, True, False, True, True]) tm.assert_numpy_array_equal(result, expected) result = idx1 == val expected = np.array([False, False, True, False, False, False]) tm.assert_numpy_array_equal(result, expected) result = idx1 != val expected = np.array([True, True, False, True, True, True]) tm.assert_numpy_array_equal(result, expected) def test_map(self): rng = date_range('1/1/2000', periods=10) f = lambda x: x.strftime('%Y%m%d') result = rng.map(f) exp = Index([f(x) for x in rng], dtype='<U8') tm.assert_index_equal(result, exp) def test_iteration_preserves_tz(self): tm._skip_if_no_dateutil() # GH 8890 import dateutil index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern') for i, ts in enumerate(index): result = ts expected = index[i] assert result == expected index = date_range("2012-01-01", periods=3, freq='H', tz=dateutil.tz.tzoffset(None, -28800)) for i, ts in enumerate(index): result = ts expected = index[i] assert result._repr_base == expected._repr_base assert result == expected # 9100 index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00', '2014-12-01 04:12:34.987000-08:00']) for i, ts in enumerate(index): result = ts expected = index[i] assert result._repr_base == expected._repr_base assert result == expected def test_misc_coverage(self): rng = date_range('1/1/2000', periods=5) result = rng.groupby(rng.day) assert isinstance(list(result.values())[0][0], Timestamp) idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02']) assert not idx.equals(list(idx)) non_datetime = Index(list('abc')) assert not idx.equals(list(non_datetime)) def test_string_index_series_name_converted(self): # #1644 df = DataFrame(np.random.randn(10, 4), index=date_range('1/1/2000', periods=10)) result = df.loc['1/3/2000'] assert result.name == df.index[2] result = df.T['1/3/2000'] assert result.name == df.index[2] def test_overflow_offset(self): # xref https://github.com/statsmodels/statsmodels/issues/3374 # ends up multiplying really large numbers which overflow t = Timestamp('2017-01-13 00:00:00', freq='D') offset = 20169940 * pd.offsets.Day(1) def f(): t + offset pytest.raises(OverflowError, f) def f(): offset + t pytest.raises(OverflowError, f) def f(): t - offset pytest.raises(OverflowError, f) def test_get_duplicates(self): idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02', '2000-01-03', '2000-01-03', '2000-01-04']) result = idx.get_duplicates() ex = DatetimeIndex(['2000-01-02', '2000-01-03']) tm.assert_index_equal(result, ex) def test_argmin_argmax(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) assert idx.argmin() == 1 assert idx.argmax() == 0 def test_sort_values(self): idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02']) ordered = idx.sort_values() assert ordered.is_monotonic ordered = idx.sort_values(ascending=False) assert ordered[::-1].is_monotonic ordered, dexer = idx.sort_values(return_indexer=True) assert ordered.is_monotonic tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp)) ordered, dexer = idx.sort_values(return_indexer=True, ascending=False) assert ordered[::-1].is_monotonic tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp)) def test_take(self): dates = [datetime(2010, 1, 1, 14), datetime(2010, 1, 1, 15), datetime(2010, 1, 1, 17), datetime(2010, 1, 1, 21)] for tz in [None, 'US/Eastern', 'Asia/Tokyo']: idx = DatetimeIndex(start='2010-01-01 09:00', end='2010-02-01 09:00', freq='H', tz=tz, name='idx') expected = DatetimeIndex(dates, freq=None, name='idx', tz=tz) taken1 = idx.take([5, 6, 8, 12]) taken2 = idx[[5, 6, 8, 12]] for taken in [taken1, taken2]: tm.assert_index_equal(taken, expected) assert isinstance(taken, DatetimeIndex) assert taken.freq is None assert taken.tz == expected.tz assert taken.name == expected.name def test_take_fill_value(self): # GH 12631 idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], name='xxx') result = idx.take(np.array([1, 0, -1])) expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], name='xxx') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], name='xxx') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], name='xxx') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -2]), fill_value=True) with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_take_fill_value_with_timezone(self): idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'], name='xxx', tz='US/Eastern') result = idx.take(np.array([1, 0, -1])) expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], name='xxx', tz='US/Eastern') tm.assert_index_equal(result, expected) # fill_value result = idx.take(np.array([1, 0, -1]), fill_value=True) expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'], name='xxx', tz='US/Eastern') tm.assert_index_equal(result, expected) # allow_fill=False result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'], name='xxx', tz='US/Eastern') tm.assert_index_equal(result, expected) msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -2]), fill_value=True) with tm.assert_raises_regex(ValueError, msg): idx.take(np.array([1, 0, -5]), fill_value=True) with pytest.raises(IndexError): idx.take(np.array([1, -5])) def test_map_bug_1677(self): index = DatetimeIndex(['2012-04-25 09:30:00.393000']) f = index.asof result = index.map(f) expected = Index([f(index[0])]) tm.assert_index_equal(result, expected) def test_groupby_function_tuple_1677(self): df = DataFrame(np.random.rand(100), index=date_range("1/1/2000", periods=100)) monthly_group = df.groupby(lambda x: (x.year, x.month)) result = monthly_group.mean() assert isinstance(result.index[0], tuple) def test_append_numpy_bug_1681(self): # another datetime64 bug dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI') a = DataFrame() c = DataFrame({'A': 'foo', 'B': dr}, index=dr) result = a.append(c) assert (result['B'] == dr).all() def test_isin(self): index = tm.makeDateIndex(4) result = index.isin(index) assert result.all() result = index.isin(list(index)) assert result.all() assert_almost_equal(index.isin([index[2], 5]), np.array([False, False, True, False])) def test_time(self): rng = pd.date_range('1/1/2000', freq='12min', periods=10) result = pd.Index(rng).time expected = [t.time() for t in rng] assert (result == expected).all() def test_date(self): rng = pd.date_range('1/1/2000', freq='12H', periods=10) result = pd.Index(rng).date expected = [t.date() for t in rng] assert (result == expected).all() def test_does_not_convert_mixed_integer(self): df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs: randn(), r_idx_type='i', c_idx_type='dt') cols = df.columns.join(df.index, how='outer') joined = cols.join(df.columns) assert cols.dtype == np.dtype('O') assert cols.dtype == joined.dtype tm.assert_numpy_array_equal(cols.values, joined.values) def test_slice_keeps_name(self): # GH4226 st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') dr = pd.date_range(st, et, freq='H', name='timebucket') assert dr[1:].name == dr.name def test_join_self(self): index = date_range('1/1/2000', periods=10) kinds = 'outer', 'inner', 'left', 'right' for kind in kinds: joined = index.join(index, how=kind) assert index is joined def assert_index_parameters(self, index): assert index.freq == '40960N' assert index.inferred_freq == '40960N' def test_ns_index(self): nsamples = 400 ns = int(1e9 / 24414) dtstart = np.datetime64('2012-09-20T00:00:00') dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns') freq = ns * offsets.Nano() index = pd.DatetimeIndex(dt, freq=freq, name='time') self.assert_index_parameters(index) new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq) self.assert_index_parameters(new_index) def test_join_with_period_index(self): df = tm.makeCustomDataframe( 10, 10, data_gen_f=lambda *args: np.random.randint(2), c_idx_type='p', r_idx_type='dt') s = df.iloc[:5, 0] joins = 'left', 'right', 'inner', 'outer' for join in joins: with tm.assert_raises_regex(ValueError, 'can only call with other ' 'PeriodIndex-ed objects'): df.columns.join(s.index, how=join) def test_factorize(self): idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02', '2014-03', '2014-03']) exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp) exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) arr, idx = idx1.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) arr, idx = idx1.factorize(sort=True) tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) # tz must be preserved idx1 = idx1.tz_localize('Asia/Tokyo') exp_idx = exp_idx.tz_localize('Asia/Tokyo') arr, idx = idx1.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01', '2014-03', '2014-01']) exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp) exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03']) arr, idx = idx2.factorize(sort=True) tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp) exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01']) arr, idx = idx2.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, exp_idx) # freq must be preserved idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo') exp_arr = np.array([0, 1, 2, 3], dtype=np.intp) arr, idx = idx3.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, idx3) def test_factorize_tz(self): # GH 13750 for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']: base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz) idx = base.repeat(5) exp_arr = np.arange(100, dtype=np.intp).repeat(5) for obj in [idx, pd.Series(idx)]: arr, res = obj.factorize() tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(res, base) def test_factorize_dst(self): # GH 13750 idx = pd.date_range('2016-11-06', freq='H', periods=12, tz='US/Eastern') for obj in [idx, pd.Series(idx)]: arr, res = obj.factorize() tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp)) tm.assert_index_equal(res, idx) idx = pd.date_range('2016-06-13', freq='H', periods=12, tz='US/Eastern') for obj in [idx, pd.Series(idx)]: arr, res = obj.factorize() tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp)) tm.assert_index_equal(res, idx) def test_slice_with_negative_step(self): ts = Series(np.arange(20), date_range('2014-01-01', periods=20, freq='MS')) SLC = pd.IndexSlice def assert_slices_equivalent(l_slc, i_slc): assert_series_equal(ts[l_slc], ts.iloc[i_slc]) assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) assert_series_equal(ts.loc[l_slc], ts.iloc[i_slc]) assert_slices_equivalent(SLC[Timestamp('2014-10-01')::-1], SLC[9::-1]) assert_slices_equivalent(SLC['2014-10-01'::-1], SLC[9::-1]) assert_slices_equivalent(SLC[:Timestamp('2014-10-01'):-1], SLC[:8:-1]) assert_slices_equivalent(SLC[:'2014-10-01':-1], SLC[:8:-1]) assert_slices_equivalent(SLC['2015-02-01':'2014-10-01':-1], SLC[13:8:-1]) assert_slices_equivalent(SLC[Timestamp('2015-02-01'):Timestamp( '2014-10-01'):-1], SLC[13:8:-1]) assert_slices_equivalent(SLC['2015-02-01':Timestamp('2014-10-01'):-1], SLC[13:8:-1]) assert_slices_equivalent(SLC[Timestamp('2015-02-01'):'2014-10-01':-1], SLC[13:8:-1]) assert_slices_equivalent(SLC['2014-10-01':'2015-02-01':-1], SLC[:0]) def test_slice_with_zero_step_raises(self): ts = Series(np.arange(20), date_range('2014-01-01', periods=20, freq='MS')) tm.assert_raises_regex(ValueError, 'slice step cannot be zero', lambda: ts[::0]) tm.assert_raises_regex(ValueError, 'slice step cannot be zero', lambda: ts.loc[::0]) tm.assert_raises_regex(ValueError, 'slice step cannot be zero', lambda: ts.loc[::0]) def test_slice_bounds_empty(self): # GH 14354 empty_idx = DatetimeIndex(freq='1H', periods=0, end='2015') right = empty_idx._maybe_cast_slice_bound('2015-01-02', 'right', 'loc') exp = Timestamp('2015-01-02 23:59:59.999999999') assert right == exp left = empty_idx._maybe_cast_slice_bound('2015-01-02', 'left', 'loc') exp = Timestamp('2015-01-02 00:00:00') assert left == exp def test_slice_duplicate_monotonic(self): # https://github.com/pandas-dev/pandas/issues/16515 idx = pd.DatetimeIndex(['2017', '2017']) result = idx._maybe_cast_slice_bound('2017-01-01', 'left', 'loc') expected = Timestamp('2017-01-01') assert result == expected
mit
EderSantana/deeplivecoding
demo-rehearsal.py
3
7905
# coding: utf-8 ## Theano Basics # In[1]: from __future__ import print_function import theano import numpy as np from theano import tensor as T floatX = theano.config.floatX # In[2]: # Convention: # uppercase: symbolic theano element or function # lowercase: numpy array W = T.vector('w') X = T.matrix('X') Y = X.dot(W) F = theano.function([W,X], Y) w = np.ones(4) x = np.ones((10,4)) y = F(w,x) print(y) # In[3]: # The most underused tool in machine learning # AUTODIFF grad_w = T.grad(Y.sum(), W) F_grad = theano.function([W,X], grad_w) g = F_grad(w,x) # this should be equal to the sum of the columns of X (do you know how to matrix calculus?) print(g) # In[4]: # An easier example B = T.scalar('E') R = T.sqr(B) A = T.grad(R, B) Z = theano.function([B], A) i = 2 l = Z(i) print(l) # In[5]: # If that didn't blow your mind, well, it should have. def sharedX(X): return theano.shared(X.astype(floatX)) B = sharedX(np.ones(2)) R = T.sqr(B).sum() A = T.grad(R, B) Z = theano.function([], R, updates={B: B - .1*A}) for i in range(10): print('cost function = {}'.format(Z())) print('parameters = {}'.format(B.get_value())) # Try to change range to 100 to see what happens ## Neural Nets # In[6]: """ Now that we now how to sum, we have enough to Deep Learn ... I should say something in the board about the Model-View-Controller way we usually deep learn with Theano. Model : Neural net parameters and dataset generator View : Logging, graph updates, saving cross-validated best parameters Controller : Update algorithm that follows gradient directions to optimize paramters Download this dataset : http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz """ get_ipython().magic(u'matplotlib inline') import cPickle from pylab import imshow train_set, valid_set, test_set = cPickle.load(file('mnist.pkl', 'r')) print(len(train_set)) train_x, train_y = train_set test_x , test_y = test_set print(train_x.shape) print(train_y.shape) _ = imshow(train_x[0].reshape((28,28)), cmap='gray') # In[7]: def batch_iterator(x, y, batch_size): num_batches = x.shape[0] // batch_size for i in xrange(0,num_batches): # TODO: use random integers instead of consecutive # values to avoid biased gradients first = i * batch_size last = (i+1) * batch_size x_batch = x[first:last].astype(floatX) y_pre = y[first:last] y_batch = np.zeros((batch_size, 10)) for row, col in enumerate(y_pre): y_batch[row, col] = 1 yield (x_batch, y_batch.astype(floatX)) for x,y in batch_iterator(train_x, train_y, 10000): print('{}, {}'.format(x.shape, y.shape)) print(y[0]) _ = imshow(x[0].reshape((28,28)), cmap='gray') # In[13]: # Define layers def rectifier(input_dim, output_dim, X): W = sharedX(np.random.normal(0, .001, size=(input_dim, output_dim))) b = sharedX(np.zeros((output_dim,))) Z = T.dot(X,W) + b.dimshuffle('x',0) O = T.switch(Z>0, Z, 0) return W,b,O def softmax(input_dim, output_dim, X, Y): W = sharedX(np.random.normal(0, .001, size=(input_dim, output_dim))) b = sharedX(np.zeros((output_dim,))) Z = T.dot(X,W) + b.dimshuffle('x',0) O = T.nnet.softmax(Z) cost = T.nnet.binary_crossentropy(O, Y).sum(axis=-1).mean() return W,b,O,cost X = T.matrix('X') Y = T.matrix('Y') W0, b0, O0 = rectifier(784, 100, X) W1, b1, O1 = rectifier(100, 100, O0) W2, b2, O2, cost = softmax(100, 10, O1, Y) # Always write tests F = theano.function([X,Y], [cost, O2]) x = np.zeros((100,784)).astype(floatX) y = np.ones((100,10)).astype(floatX) c, z = F(x,y) assert c>0 assert z.shape == (100,10) print(z[0]) # In[14]: from collections import OrderedDict params = [W0, b0, W1, b1, W2, b2] updates = dict() for p in params: updates[p] = p - .01 * T.grad(cost, p) updates = OrderedDict(updates) trainer = theano.function([X,Y], cost, updates=updates) # In[15]: num_epochs = 100 for i in range(num_epochs): print('-'*10) print('Epoch: {}'.format(i)) for iter,b in enumerate(batch_iterator(train_x, train_y, 128)): x = b[0] y = b[1] last_cost = trainer(x,y) print('cost: {}'.format(trainer(x,y))) # In[16]: w0 = W0.get_value() _ = imshow(w0[:,0].reshape((28,28)), cmap='gray') # In[17]: ERR = T.neq(O2.argmax(axis=-1), Y.argmax(axis=-1)) Ferr = theano.function([X,Y], ERR) def testnet(x, y): testerr = 0. for b1,b2 in batch_iterator(x, y, 500): testerr += Ferr(b1,b2) return testerr.sum() print('test error: {}, test acc: {}'.format(testnet(test_x, test_y), 1 - testnet(test_x, test_y) / 10000.)) ## Convolutional Nets # In[19]: """ We can do much better than this with more hidden neurons and dropout. Watch Alec Radford's presentation to see how to do that with Python/Theano: https://www.youtube.com/watch?v=S75EdAcXHKk For now, lets move on to convnets. """ from theano.tensor.nnet.conv import conv2d from theano.tensor.signal.downsample import max_pool_2d def conv_rectifier(input_channels, output_channels, filter_dim, X): W = sharedX(np.random.normal(0, .001, size=(output_channels, input_channels, filter_dim, filter_dim))) b = sharedX(np.zeros((output_channels,))) Z = conv2d(X,W) + b.dimshuffle('x',0,'x','x') DS = max_pool_2d(Z, ds=[2,2]) O = T.switch(DS>0, DS, 0) return W,b,O # test X = T.tensor4('X') W, b, O = conv_rectifier(1, 9, 5, X) F = theano.function([X], O) x = np.ones((5, 1, 28, 28)) print(x.shape) o = F(x) o.shape # In[21]: Y = T.matrix('Y') W0, b0, O0 = conv_rectifier(1, 20, 5, X) W1, b1, O1 = conv_rectifier(20, 50, 5, O0) # test F = theano.function([X], O1) o = F(x) print(o.shape) # In[22]: W2, b2, O2 = rectifier(50*4*4, 500, O1.flatten(2)) W3, b3, O3, cost = softmax(500, 10, O2, Y) # Teeeeeest x = np.ones((128,1,28,28)).astype(floatX) y = np.ones((128,10)).astype(floatX) F = theano.function([X, Y], [O3, cost]) z, c = F(x,y) assert c>0 assert z.shape == (128,10) # In[23]: # We need to modify the batch_iterator slightly to serve formated images def batch_iterator(x, y, batch_size): num_batches = x.shape[0] // batch_size for i in xrange(0,num_batches): # TODO: use random integers instead of consecutive # values to avoid biased gradients first = i * batch_size last = (i+1) * batch_size x_batch = x[first:last].reshape((batch_size,1,28,28)) y_pre = y[first:last] y_batch = np.zeros((batch_size, 10)) for row, col in enumerate(y_pre): y_batch[row, col] = 1 yield (x_batch, y_batch) for x,y in batch_iterator(train_x, train_y, 10000): print('{}, {}'.format(x.shape, y.shape)) print(y[0]) _ = imshow(x[0].reshape((28,28)), cmap='gray') # In[24]: params = [W0, b0, W1, b1, W2, b2, W3, b3] updates = dict() for p in params: updates[p] = p - .01 * T.grad(cost, p) updates = OrderedDict(updates) trainer = theano.function([X,Y], cost, updates=updates) # In[ ]: num_epochs = 100 for i in range(num_epochs): print('-'*10) print('Epoch: {}'.format(i)) for iter,b in enumerate(batch_iterator(train_x, train_y, 128)): x = b[0] y = b[1] last_cost = trainer(x,y) print('cost: {}'.format(trainer(x,y))) # In[ ]: w0 = W0.get_value() _ = imshow(w0[0,0,:,:].reshape((5,5)), cmap='gray') # In[ ]: ERR = T.neq(O3.argmax(axis=-1), Y.argmax(axis=-1)) Ferr = theano.function([X,Y], ERR) def testnet(x, y): testerr = 0. for b1,b2 in batch_iterator(x, y, 500): testerr += Ferr(b1,b2) return testerr.sum() print('test error: {}, test acc: {}'.format(testnet(test_x, test_y), 1 - testnet(test_x, test_y) / 10000.))
mit
PmagPy/PmagPy
programs/irmaq_magic.py
2
6781
#!/usr/bin/env python import sys import os import matplotlib if matplotlib.get_backend() != "TKAgg": matplotlib.use("TKAgg") import pmagpy.pmag as pmag from pmagpy import contribution_builder as cb import pmagpy.pmagplotlib as pmagplotlib def main(): """ NAME irmaq_magic.py DESCRIPTION plots IRM acquisition curves from measurements file SYNTAX irmaq_magic [command line options] INPUT takes magic formatted magic_measurements.txt files OPTIONS -h prints help message and quits -f FILE: specify input file, default is: magic_measurements.txt/measurements.txt -obj OBJ: specify object [loc, sit, sam, spc] for plot, default is by location -N ; do not normalize by last point - use original units -fmt [png,jpg,eps,pdf] set plot file format [default is svg] -sav save plot[s] and quit -DM MagIC data model number, default is 3 NOTE loc: location (study); sit: site; sam: sample; spc: specimen """ FIG = {} # plot dictionary FIG['exp'] = 1 # exp is figure 1 dir_path = './' plot, fmt = 0, 'svg' units = 'T', XLP = [] norm = 1 LP = "LP-IRM" if len(sys.argv) > 1: if '-h' in sys.argv: print(main.__doc__) sys.exit() data_model = int(pmag.get_named_arg("-DM", 3)) if '-N' in sys.argv: norm = 0 if '-sav' in sys.argv: plot = 1 if '-fmt' in sys.argv: ind = sys.argv.index("-fmt") fmt = sys.argv[ind + 1] if data_model == 3: in_file = pmag.get_named_arg("-f", 'measurements.txt') else: in_file = pmag.get_named_arg("-f", 'magic_measurements.txt') if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path = sys.argv[ind + 1] dir_path = os.path.realpath(dir_path) in_file = pmag.resolve_file_name(in_file, dir_path) if '-WD' not in sys.argv: dir_path = os.path.split(in_file)[0] plot_by = pmag.get_named_arg("-obj", "loc") if data_model == 3: plot_key = 'location' if plot_by == 'sit': plot_key = 'site' if plot_by == 'sam': plot_key = 'sample' if plot_by == 'spc': plot_key = 'specimen' else: plot_key = 'er_location_name' if plot_by == 'sit': plot_key = 'er_site_name' if plot_by == 'sam': plot_key = 'er_sample_name' if plot_by == 'spc': plot_key = 'er_specimen_name' # set defaults and get more information if needed if data_model == 3: dmag_key = 'treat_dc_field' else: dmag_key = 'treatment_dc_field' # if data_model == 3 and plot_key != 'specimen': # gonna need to read in more files print('-W- You are trying to plot measurements by {}'.format(plot_key)) print(' By default, this information is not available in your measurement file.') print(' Trying to acquire this information from {}'.format(dir_path)) con = cb.Contribution(dir_path) meas_df = con.propagate_location_to_measurements() if meas_df is None: print('-W- No data found in {}'.format(dir_path)) return if plot_key not in meas_df.columns: print('-W- Could not find required data.') print(' Try a different plot key.') return else: print('-I- Found {} information, continuing with plotting'.format(plot_key)) # need to take the data directly from the contribution here, to keep # location/site/sample columns in the measurements table data = con.tables['measurements'].convert_to_pmag_data_list() file_type = "measurements" else: data, file_type = pmag.magic_read(in_file) # read in data sids = pmag.get_specs(data) pmagplotlib.plot_init(FIG['exp'], 6, 6) # # # find desired intensity data # # get plotlist # plotlist = [] if data_model == 3: intlist = ['magn_moment', 'magn_volume', 'magn_mass', 'magnitude'] else: intlist = ['measurement_magnitude', 'measurement_magn_moment', 'measurement_magn_volume', 'measurement_magn_mass'] IntMeths = [] # get all the records with this lab protocol #print('data', len(data)) #print('data[0]', data[0]) if data_model == 3: data = pmag.get_dictitem(data, 'method_codes', LP, 'has') else: data = pmag.get_dictitem(data, 'magic_method_codes', LP, 'has') Ints = {} NoInts, int_key = 1, "" for key in intlist: # get all non-blank data for intensity type Ints[key] = pmag.get_dictitem(data, key, '', 'F') if len(Ints[key]) > 0: NoInts = 0 if int_key == "": int_key = key if NoInts == 1: print('No intensity information found') sys.exit() for rec in Ints[int_key]: if rec[plot_key] not in plotlist: plotlist.append(rec[plot_key]) plotlist.sort() for plt in plotlist: print(plt) INTblock = [] # get data with right intensity info whose plot_key matches plot data = pmag.get_dictitem(Ints[int_key], plot_key, plt, 'T') # get a list of specimens with appropriate data sids = pmag.get_specs(data) if len(sids) > 0: title = data[0][plot_key] for s in sids: INTblock = [] # get data for each specimen if data_model == 3: sdata = pmag.get_dictitem(data, 'specimen', s, 'T') else: sdata = pmag.get_dictitem(data, 'er_specimen_name', s, 'T') for rec in sdata: INTblock.append([float(rec[dmag_key]), 0, 0, float(rec[int_key]), 1, 'g']) pmagplotlib.plot_mag(FIG['exp'], INTblock, title, 0, units, norm) files = {} for key in list(FIG.keys()): files[key] = title + '_' + LP + '.' + fmt if plot == 0: pmagplotlib.draw_figs(FIG) ans = input(" S[a]ve to save plot, [q]uit, Return to continue: ") if ans == 'q': sys.exit() if ans == "a": pmagplotlib.save_plots(FIG, files) if plt != plotlist[-1]: # if it isn't the last plot, init the next one pmagplotlib.plot_init(FIG['exp'], 6, 6) else: pmagplotlib.save_plots(FIG, files) pmagplotlib.clearFIG(FIG['exp']) if __name__ == "__main__": main()
bsd-3-clause
riveridea/gnuradio
gr-digital/examples/example_timing.py
5
9211
#!/usr/bin/env python # # Copyright 2011-2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, digital, filter from gnuradio import blocks from gnuradio import channels from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser try: import scipy except ImportError: print "Error: could not import scipy (http://www.scipy.org/)" sys.exit(1) try: import pylab except ImportError: print "Error: could not import pylab (http://matplotlib.sourceforge.net/)" sys.exit(1) from scipy import fftpack class example_timing(gr.top_block): def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset, mode=0): gr.top_block.__init__(self) rrc_taps = filter.firdes.root_raised_cosine( sps, sps, 1.0, rolloff, ntaps) gain = bw nfilts = 32 rrc_taps_rx = filter.firdes.root_raised_cosine( nfilts, sps*nfilts, 1.0, rolloff, ntaps*nfilts) data = 2.0*scipy.random.randint(0, 2, N) - 1.0 data = scipy.exp(1j*poffset) * data self.src = blocks.vector_source_c(data.tolist(), False) self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps) self.chn = channels.channel_model(noise, foffset, toffset) self.off = filter.fractional_resampler_cc(0.20, 1.0) if mode == 0: self.clk = digital.pfb_clock_sync_ccf(sps, gain, rrc_taps_rx, nfilts, nfilts//2, 1) self.taps = self.clk.taps() self.dtaps = self.clk.diff_taps() self.delay = int(scipy.ceil(((len(rrc_taps)-1)/2 + (len(self.taps[0])-1)/2)/float(sps))) + 1 self.vsnk_err = blocks.vector_sink_f() self.vsnk_rat = blocks.vector_sink_f() self.vsnk_phs = blocks.vector_sink_f() self.connect((self.clk,1), self.vsnk_err) self.connect((self.clk,2), self.vsnk_rat) self.connect((self.clk,3), self.vsnk_phs) else: # mode == 1 mu = 0.5 gain_mu = bw gain_omega = 0.25*gain_mu*gain_mu omega_rel_lim = 0.02 self.clk = digital.clock_recovery_mm_cc(sps, gain_omega, mu, gain_mu, omega_rel_lim) self.vsnk_err = blocks.vector_sink_f() self.connect((self.clk,1), self.vsnk_err) self.vsnk_src = blocks.vector_sink_c() self.vsnk_clk = blocks.vector_sink_c() self.connect(self.src, self.rrc, self.chn, self.off, self.clk, self.vsnk_clk) self.connect(self.src, self.vsnk_src) def main(): parser = OptionParser(option_class=eng_option, conflict_handler="resolve") parser.add_option("-N", "--nsamples", type="int", default=2000, help="Set the number of samples to process [default=%default]") parser.add_option("-S", "--sps", type="int", default=4, help="Set the samples per symbol [default=%default]") parser.add_option("-r", "--rolloff", type="eng_float", default=0.35, help="Set the rolloff factor [default=%default]") parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0, help="Set the loop bandwidth (PFB) or gain (M&M) [default=%default]") parser.add_option("-n", "--ntaps", type="int", default=45, help="Set the number of taps in the filters [default=%default]") parser.add_option("", "--noise", type="eng_float", default=0.0, help="Set the simulation noise voltage [default=%default]") parser.add_option("-f", "--foffset", type="eng_float", default=0.0, help="Set the simulation's normalized frequency offset (in Hz) [default=%default]") parser.add_option("-t", "--toffset", type="eng_float", default=1.0, help="Set the simulation's timing offset [default=%default]") parser.add_option("-p", "--poffset", type="eng_float", default=0.0, help="Set the simulation's phase offset [default=%default]") parser.add_option("-M", "--mode", type="int", default=0, help="Set the recovery mode (0: polyphase, 1: M&M) [default=%default]") (options, args) = parser.parse_args () # Adjust N for the interpolation by sps options.nsamples = options.nsamples // options.sps # Set up the program-under-test put = example_timing(options.nsamples, options.sps, options.rolloff, options.ntaps, options.bandwidth, options.noise, options.foffset, options.toffset, options.poffset, options.mode) put.run() if options.mode == 0: data_src = scipy.array(put.vsnk_src.data()[20:]) data_clk = scipy.array(put.vsnk_clk.data()[20:]) data_err = scipy.array(put.vsnk_err.data()[20:]) data_rat = scipy.array(put.vsnk_rat.data()[20:]) data_phs = scipy.array(put.vsnk_phs.data()[20:]) f1 = pylab.figure(1, figsize=(12,10), facecolor='w') # Plot the IQ symbols s1 = f1.add_subplot(2,2,1) s1.plot(data_src.real, data_src.imag, "bo") s1.plot(data_clk.real, data_clk.imag, "ro") s1.set_title("IQ") s1.set_xlabel("Real part") s1.set_ylabel("Imag part") s1.set_xlim([-2, 2]) s1.set_ylim([-2, 2]) # Plot the symbols in time delay = put.delay m = len(data_clk.real) s2 = f1.add_subplot(2,2,2) s2.plot(data_src.real, "bs", markersize=10, label="Input") s2.plot(data_clk.real[delay:], "ro", label="Recovered") s2.set_title("Symbols") s2.set_xlabel("Samples") s2.set_ylabel("Real Part of Signals") s2.legend() # Plot the clock recovery loop's error s3 = f1.add_subplot(2,2,3) s3.plot(data_err, label="Error") s3.plot(data_rat, 'r', label="Update rate") s3.set_title("Clock Recovery Loop Error") s3.set_xlabel("Samples") s3.set_ylabel("Error") s3.set_ylim([-0.5, 0.5]) s3.legend() # Plot the clock recovery loop's error s4 = f1.add_subplot(2,2,4) s4.plot(data_phs) s4.set_title("Clock Recovery Loop Filter Phase") s4.set_xlabel("Samples") s4.set_ylabel("Filter Phase") diff_taps = put.dtaps ntaps = len(diff_taps[0]) nfilts = len(diff_taps) t = scipy.arange(0, ntaps*nfilts) f3 = pylab.figure(3, figsize=(12,10), facecolor='w') s31 = f3.add_subplot(2,1,1) s32 = f3.add_subplot(2,1,2) s31.set_title("Differential Filters") s32.set_title("FFT of Differential Filters") for i,d in enumerate(diff_taps): D = 20.0*scipy.log10(1e-20+abs(fftpack.fftshift(fftpack.fft(d, 10000)))) s31.plot(t[i::nfilts].real, d, "-o") s32.plot(D) s32.set_ylim([-120, 10]) # If testing the M&M clock recovery loop else: data_src = scipy.array(put.vsnk_src.data()[20:]) data_clk = scipy.array(put.vsnk_clk.data()[20:]) data_err = scipy.array(put.vsnk_err.data()[20:]) f1 = pylab.figure(1, figsize=(12,10), facecolor='w') # Plot the IQ symbols s1 = f1.add_subplot(2,2,1) s1.plot(data_src.real, data_src.imag, "o") s1.plot(data_clk.real, data_clk.imag, "ro") s1.set_title("IQ") s1.set_xlabel("Real part") s1.set_ylabel("Imag part") s1.set_xlim([-2, 2]) s1.set_ylim([-2, 2]) # Plot the symbols in time s2 = f1.add_subplot(2,2,2) s2.plot(data_src.real, "bs", markersize=10, label="Input") s2.plot(data_clk.real, "ro", label="Recovered") s2.set_title("Symbols") s2.set_xlabel("Samples") s2.set_ylabel("Real Part of Signals") s2.legend() # Plot the clock recovery loop's error s3 = f1.add_subplot(2,2,3) s3.plot(data_err) s3.set_title("Clock Recovery Loop Error") s3.set_xlabel("Samples") s3.set_ylabel("Error") pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
Srisai85/scikit-learn
sklearn/cluster/tests/test_birch.py
342
5603
""" Tests for the birch clustering algorithm. """ from scipy import sparse import numpy as np from sklearn.cluster.tests.common import generate_clustered_data from sklearn.cluster.birch import Birch from sklearn.cluster.hierarchical import AgglomerativeClustering from sklearn.datasets import make_blobs from sklearn.linear_model import ElasticNet from sklearn.metrics import pairwise_distances_argmin, v_measure_score from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns def test_n_samples_leaves_roots(): # Sanity check for the number of samples in leaves and roots X, y = make_blobs(n_samples=10) brc = Birch() brc.fit(X) n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_]) n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves() for sc in leaf.subclusters_]) assert_equal(n_samples_leaves, X.shape[0]) assert_equal(n_samples_root, X.shape[0]) def test_partial_fit(): # Test that fit is equivalent to calling partial_fit multiple times X, y = make_blobs(n_samples=100) brc = Birch(n_clusters=3) brc.fit(X) brc_partial = Birch(n_clusters=None) brc_partial.partial_fit(X[:50]) brc_partial.partial_fit(X[50:]) assert_array_equal(brc_partial.subcluster_centers_, brc.subcluster_centers_) # Test that same global labels are obtained after calling partial_fit # with None brc_partial.set_params(n_clusters=3) brc_partial.partial_fit(None) assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_) def test_birch_predict(): # Test the predict method predicts the nearest centroid. rng = np.random.RandomState(0) X = generate_clustered_data(n_clusters=3, n_features=3, n_samples_per_cluster=10) # n_samples * n_samples_per_cluster shuffle_indices = np.arange(30) rng.shuffle(shuffle_indices) X_shuffle = X[shuffle_indices, :] brc = Birch(n_clusters=4, threshold=1.) brc.fit(X_shuffle) centroids = brc.subcluster_centers_ assert_array_equal(brc.labels_, brc.predict(X_shuffle)) nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids) assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0) def test_n_clusters(): # Test that n_clusters param works properly X, y = make_blobs(n_samples=100, centers=10) brc1 = Birch(n_clusters=10) brc1.fit(X) assert_greater(len(brc1.subcluster_centers_), 10) assert_equal(len(np.unique(brc1.labels_)), 10) # Test that n_clusters = Agglomerative Clustering gives # the same results. gc = AgglomerativeClustering(n_clusters=10) brc2 = Birch(n_clusters=gc) brc2.fit(X) assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_) assert_array_equal(brc1.labels_, brc2.labels_) # Test that the wrong global clustering step raises an Error. clf = ElasticNet() brc3 = Birch(n_clusters=clf) assert_raises(ValueError, brc3.fit, X) # Test that a small number of clusters raises a warning. brc4 = Birch(threshold=10000.) assert_warns(UserWarning, brc4.fit, X) def test_sparse_X(): # Test that sparse and dense data give same results X, y = make_blobs(n_samples=100, centers=10) brc = Birch(n_clusters=10) brc.fit(X) csr = sparse.csr_matrix(X) brc_sparse = Birch(n_clusters=10) brc_sparse.fit(csr) assert_array_equal(brc.labels_, brc_sparse.labels_) assert_array_equal(brc.subcluster_centers_, brc_sparse.subcluster_centers_) def check_branching_factor(node, branching_factor): subclusters = node.subclusters_ assert_greater_equal(branching_factor, len(subclusters)) for cluster in subclusters: if cluster.child_: check_branching_factor(cluster.child_, branching_factor) def test_branching_factor(): # Test that nodes have at max branching_factor number of subclusters X, y = make_blobs() branching_factor = 9 # Purposefully set a low threshold to maximize the subclusters. brc = Birch(n_clusters=None, branching_factor=branching_factor, threshold=0.01) brc.fit(X) check_branching_factor(brc.root_, branching_factor) brc = Birch(n_clusters=3, branching_factor=branching_factor, threshold=0.01) brc.fit(X) check_branching_factor(brc.root_, branching_factor) # Raises error when branching_factor is set to one. brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01) assert_raises(ValueError, brc.fit, X) def check_threshold(birch_instance, threshold): """Use the leaf linked list for traversal""" current_leaf = birch_instance.dummy_leaf_.next_leaf_ while current_leaf: subclusters = current_leaf.subclusters_ for sc in subclusters: assert_greater_equal(threshold, sc.radius) current_leaf = current_leaf.next_leaf_ def test_threshold(): # Test that the leaf subclusters have a threshold lesser than radius X, y = make_blobs(n_samples=80, centers=4) brc = Birch(threshold=0.5, n_clusters=None) brc.fit(X) check_threshold(brc, 0.5) brc = Birch(threshold=5.0, n_clusters=None) brc.fit(X) check_threshold(brc, 5.)
bsd-3-clause
malmiron/incubator-airflow
airflow/hooks/base_hook.py
1
3192
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import random from airflow.models import Connection from airflow.exceptions import AirflowException from airflow.utils.db import provide_session from airflow.utils.log.logging_mixin import LoggingMixin CONN_ENV_PREFIX = 'AIRFLOW_CONN_' class BaseHook(LoggingMixin): """ Abstract base class for hooks, hooks are meant as an interface to interact with external systems. MySqlHook, HiveHook, PigHook return object that can handle the connection and interaction to specific instances of these systems, and expose consistent methods to interact with them. """ def __init__(self, source): pass @classmethod @provide_session def _get_connections_from_db(cls, conn_id, session=None): db = ( session.query(Connection) .filter(Connection.conn_id == conn_id) .all() ) session.expunge_all() if not db: raise AirflowException( "The conn_id `{0}` isn't defined".format(conn_id)) return db @classmethod def _get_connection_from_env(cls, conn_id): environment_uri = os.environ.get(CONN_ENV_PREFIX + conn_id.upper()) conn = None if environment_uri: conn = Connection(conn_id=conn_id, uri=environment_uri) return conn @classmethod def get_connections(cls, conn_id): conn = cls._get_connection_from_env(conn_id) if conn: conns = [conn] else: conns = cls._get_connections_from_db(conn_id) return conns @classmethod def get_connection(cls, conn_id): conn = random.choice(cls.get_connections(conn_id)) if conn.host: log = LoggingMixin().log log.info("Using connection to: %s", conn.debug_info()) return conn @classmethod def get_hook(cls, conn_id): connection = cls.get_connection(conn_id) return connection.get_hook() def get_conn(self): raise NotImplementedError() def get_records(self, sql): raise NotImplementedError() def get_pandas_df(self, sql): raise NotImplementedError() def run(self, sql): raise NotImplementedError()
apache-2.0
basauri89/TFM-online-sales
archivos .py/blender.py
1
3309
""" Simple blender para los valores de regresion deseados durante meses """ import numpy as np from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor, GradientBoostingRegressor import load_data from sklearn.cross_validation import KFold from sklearn.linear_model import Ridge, RidgeCV, LinearRegression import pickle import gzip import math def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y)-np.log1p(y0), 2))) if __name__ == '__main__': #iniciamos la seed para la aleatoriedad y creamos un 5 fold cross validation np.random.seed(0) n_folds = 5 #cagamos el dataset X, X_submission, ys, ids, idx = load_data.load() # evitamos el logscale en la evaluacion: ys = np.log(ys/500.0 + 1.0) y_submission = np.zeros((X_submission.shape[0], 12)) #se prueba con n stimators 1000 para que se ejecute más rápido regs = [GradientBoostingRegressor(learning_rate=0.001, subsample=0.5, max_depth=6, n_estimators=10000)] dataset_blend_train = np.zeros((X.shape[0], 12*len(regs)), dtype=np.double) dataset_blend_submission = np.zeros((X_submission.shape[0], 12*len(regs), n_folds), dtype=np.double) for i in range(12): print "Month", i y = ys[:,i] kfcv = KFold(n=X.shape[0], n_folds=n_folds) for j, (train, test) in enumerate(kfcv): print "Fold", j for k, reg in enumerate(regs): print reg #Nos aseguramos de eliminar todos los valores infinitos o NaN y[train] = np.nan_to_num(y[train]) X[train] = np.nan_to_num(X[train]) X[test] = np.nan_to_num(X[test]) X_submission = np.nan_to_num(X_submission) #check de valores NaN o infinitos print "y tiene valores infinitos: ", np.isinf(y[train]).any() print "y tiene valores nan: ", np.isnan(y[train]).any() print "X tiene valores nan: ", np.isnan(X[train]).any() print "X tiene valores infinitos: ", np.isnan(X[train]).any() reg.fit(X[train], y[train]) #ejecutamos el predictor dataset_blend_train[test,12*k+i] = reg.predict(X[test]) dataset_blend_submission[:,12*k+i,j] = reg.predict(X_submission) dataset_blend_submission_final = dataset_blend_submission.mean(2) print "dataset_blend_submission_final:", dataset_blend_submission_final.shape print "Blending." for i in range(12): print "Month", i, '-', y = ys[:,i] reg = RidgeCV(alphas=np.logspace(-2,4,40)) reg.fit(dataset_blend_train, y) print "best_alpha =", reg.alpha_ y_submission[:,i] = reg.predict(dataset_blend_submission_final) # reconversion de los resultados a la dimension original: y_submission = (np.exp(y_submission) - 1.0) * 500.0 print "Guardando resultados en test.csv..." np.savetxt("test.csv", np.hstack([ids[:,None], y_submission]), fmt="%d", delimiter=',') print("Resultados guardados en test.csv") yreal = (np.exp(dataset_blend_submission_final) - 1.0) * 500.0 print rmsle(yreal, y_submission)
gpl-2.0
mattloper/opendr
opendr/test_sh.py
1
4734
#!/usr/bin/env python # encoding: utf-8 """ Author(s): Matthew Loper See LICENCE.txt for licensing and contact information. """ from chumpy import Ch import numpy as np from chumpy.utils import row, col from .lighting import SphericalHarmonics import unittest try: import matplotlib.pyplot as plt except: from .dummy import dummy as plt from .topology import loop_subdivider visualize = False def getcam(): from .camera import ProjectPoints3D w = 640 h = 320 f = np.array([500,500]) rt = np.zeros(3) t = np.zeros(3) k = np.zeros(5) c = np.array([w/2., h/2.]) near = .1 far = 20. frustum = {'near': near, 'far': far, 'width': int(w), 'height': int(h)} pp = ProjectPoints3D(f=f, rt=rt, t=t, k=k, c=c) return pp, frustum class TestSphericalHarmonics(unittest.TestCase): def test_spherical_harmonics(self): global visualize if visualize: plt.ion() # Get mesh v, f = get_sphere_mesh() from .geometry import VertNormals vn = VertNormals(v=v, f=f) #vn = Ch(mesh.estimate_vertex_normals()) # Get camera cam, frustum = getcam() # Get renderer from .renderer import ColoredRenderer cam.v = v cr = ColoredRenderer(f=f, camera=cam, frustum=frustum, v=v) sh_red = SphericalHarmonics(vn=vn, light_color=np.array([1,0,0])) sh_green = SphericalHarmonics(vn=vn, light_color=np.array([0,1,0])) cr.vc = sh_red + sh_green ims_baseline = [] for comp_idx, subplot_idx in enumerate([3,7,8,9,11,12,13,14,15]): sh_comps = np.zeros(9) sh_comps[comp_idx] = 1 sh_red.components = Ch(sh_comps) sh_green.components = Ch(-sh_comps) newim = cr.r.reshape((frustum['height'], frustum['width'], 3)) ims_baseline.append(newim) if visualize: plt.subplot(3,5,subplot_idx) plt.imshow(newim) plt.axis('off') offset = row(.4 * (np.random.rand(3)-.5)) #offset = row(np.array([1.,1.,1.]))*.05 vn_shifted = (vn.r + offset) vn_shifted = vn_shifted / col(np.sqrt(np.sum(vn_shifted**2, axis=1))) vn_shifted = vn_shifted.ravel() vn_shifted[vn_shifted>1.] = 1 vn_shifted[vn_shifted<-1.] = -1 vn_shifted = Ch(vn_shifted) cr.replace(sh_red.vn, vn_shifted) if True: for comp_idx in range(9): if visualize: plt.figure(comp_idx+2) sh_comps = np.zeros(9) sh_comps[comp_idx] = 1 sh_red.components = Ch(sh_comps) sh_green.components = Ch(-sh_comps) pred = cr.dr_wrt(vn_shifted).dot(col(vn_shifted.r.reshape(vn.r.shape) - vn.r)).reshape((frustum['height'], frustum['width'], 3)) if visualize: plt.subplot(1,2,1) plt.imshow(pred) plt.title('pred (comp %d)' % (comp_idx,)) plt.subplot(1,2,2) newim = cr.r.reshape((frustum['height'], frustum['width'], 3)) emp = newim - ims_baseline[comp_idx] if visualize: plt.imshow(emp) plt.title('empirical (comp %d)' % (comp_idx,)) pred_flat = pred.ravel() emp_flat = emp.ravel() nnz = np.unique(np.concatenate((np.nonzero(pred_flat)[0], np.nonzero(emp_flat)[0]))) if comp_idx != 0: med_diff = np.median(np.abs(pred_flat[nnz]-emp_flat[nnz])) med_obs = np.median(np.abs(emp_flat[nnz])) if comp_idx == 4 or comp_idx == 8: self.assertTrue(med_diff / med_obs < .6) else: self.assertTrue(med_diff / med_obs < .3) if visualize: plt.axis('off') def get_sphere_mesh(): from .util_tests import get_earthmesh mesh = get_earthmesh(np.zeros(3), np.zeros(3)) # load_mesh(filename) v, f = mesh.v*64., mesh.f for i in range(3): mtx, f = loop_subdivider(v, f) v = mtx.dot(v.ravel()).reshape((-1,3)) v /= 200. v[:,2] += 2 return v, f if __name__ == '__main__': visualize = True plt.ion() #unittest.main() suite = unittest.TestLoader().loadTestsFromTestCase(TestSphericalHarmonics) unittest.TextTestRunner(verbosity=2).run(suite) plt.show() import pdb; pdb.set_trace()
mit
NeuroDataDesign/seelviz
graphfiles/LukeGraphs/3dhtml/viz_3d_plotly.py
1
2646
from plotly.graph_objs import * def get_brain_figure(g, atlas_data, plot_title=''): """ Returns the plotly figure object for vizualizing a 3d brain network. g: igraph object of brain atlas_data: pandas DataFrame containing the x,y,z coordinates of each brain region Example ------- import plotly plotly.offline.init_notebook_mode() fig = get_brain_figure(g, atlas_data) plotly.offline.iplot(fig) """ # grab the node positions from the centroids file V = atlas_data.shape[0] node_positions_3d = pd.DataFrame(columns=['x', 'y', 'z'], index=range(V)) for r in range(V): node_positions_3d.loc[r] = atlas_data.loc[r, ['x', 'y', 'z']].tolist() # grab edge endpoints edge_x = [] edge_y = [] edge_z = [] for e in g.es: source_pos = node_positions_3d.loc[e.source] target_pos = node_positions_3d.loc[e.target] edge_x += [source_pos['x'], target_pos['x'], None] edge_y += [source_pos['y'], target_pos['y'], None] edge_z += [source_pos['z'], target_pos['z'], None] # node style node_trace = Scatter3d(x=node_positions_3d['x'], y=node_positions_3d['y'], z=node_positions_3d['z'], mode='markers', # name='regions', marker=Marker(symbol='dot', size=6, color='red'), # text=[str(r) for r in range(V)], text=atlas_data['name'], hoverinfo='text') # edge style edge_trace = Scatter3d(x=edge_x, y=edge_y, z=edge_z, mode='lines', line=Line(color='black', width=.5), hoverinfo='none') # axis style axis = dict(showbackground=False, showline=False, zeroline=False, showgrid=False, showticklabels=False) # overall layout layout = Layout(title=plot_title, width=800, height=900, showlegend=False, scene=Scene(xaxis=XAxis(axis), yaxis=YAxis(axis), zaxis=ZAxis(axis)), margin=Margin(t=50), hovermode='closest') data = Data([node_trace, edge_trace]) fig = Figure(data=data, layout=layout) return fig
apache-2.0
phobson/mpl-probscale
docs/sphinxext/ipython_directive.py
3
38140
# -*- coding: utf-8 -*- """ Sphinx directive to support embedded IPython code. This directive allows pasting of entire interactive IPython sessions, prompts and all, and their code will actually get re-executed at doc build time, with all prompts renumbered sequentially. It also allows you to input code as a pure python input by giving the argument python to the directive. The output looks like an interactive ipython section. To enable this directive, simply list it in your Sphinx ``conf.py`` file (making sure the directory where you placed it is visible to sphinx, as is needed for all Sphinx directives). For example, to enable syntax highlighting and the IPython directive:: extensions = ['IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive'] The IPython directive outputs code-blocks with the language 'ipython'. So if you do not have the syntax highlighting extension enabled as well, then all rendered code-blocks will be uncolored. By default this directive assumes that your prompts are unchanged IPython ones, but this can be customized. The configurable options that can be placed in conf.py are: ipython_savefig_dir: The directory in which to save the figures. This is relative to the Sphinx source directory. The default is `html_static_path`. ipython_rgxin: The compiled regular expression to denote the start of IPython input lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You shouldn't need to change this. ipython_rgxout: The compiled regular expression to denote the start of IPython output lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You shouldn't need to change this. ipython_promptin: The string to represent the IPython input prompt in the generated ReST. The default is 'In [%d]:'. This expects that the line numbers are used in the prompt. ipython_promptout: The string to represent the IPython prompt in the generated ReST. The default is 'Out [%d]:'. This expects that the line numbers are used in the prompt. ipython_mplbackend: The string which specifies if the embedded Sphinx shell should import Matplotlib and set the backend. The value specifies a backend that is passed to `matplotlib.use()` before any lines in `ipython_execlines` are executed. If not specified in conf.py, then the default value of 'agg' is used. To use the IPython directive without matplotlib as a dependency, set the value to `None`. It may end up that matplotlib is still imported if the user specifies so in `ipython_execlines` or makes use of the @savefig pseudo decorator. ipython_execlines: A list of strings to be exec'd in the embedded Sphinx shell. Typical usage is to make certain packages always available. Set this to an empty list if you wish to have no imports always available. If specified in conf.py as `None`, then it has the effect of making no imports available. If omitted from conf.py altogether, then the default value of ['import numpy as np', 'import matplotlib.pyplot as plt'] is used. ipython_holdcount When the @suppress pseudo-decorator is used, the execution count can be incremented or not. The default behavior is to hold the execution count, corresponding to a value of `True`. Set this to `False` to increment the execution count after each suppressed command. As an example, to use the IPython directive when `matplotlib` is not available, one sets the backend to `None`:: ipython_mplbackend = None An example usage of the directive is: .. code-block:: rst .. ipython:: In [1]: x = 1 In [2]: y = x**2 In [3]: print(y) See http://matplotlib.org/sampledoc/ipython_directive.html for additional documentation. ToDo ---- - Turn the ad-hoc test() function into a real test suite. - Break up ipython-specific functionality from matplotlib stuff into better separated code. Authors ------- - John D Hunter: orignal author. - Fernando Perez: refactoring, documentation, cleanups, port to 0.11. - VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations. - Skipper Seabold, refactoring, cleanups, pure python addition """ from __future__ import print_function from __future__ import unicode_literals # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- # Stdlib import os import re import sys import tempfile import ast from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO import warnings # To keep compatibility with various python versions try: from hashlib import md5 except ImportError: from md5 import md5 # Third-party import sphinx from docutils.parsers.rst import directives from docutils import nodes from sphinx.util.compat import Directive # Our own from traitlets.config import Config from IPython import InteractiveShell from IPython.core.profiledir import ProfileDir from IPython.utils import io from IPython.utils.py3compat import PY3 if PY3: from io import StringIO text_type = str else: from StringIO import StringIO text_type = unicode # ----------------------------------------------------------------------------- # Globals # ----------------------------------------------------------------------------- # for tokenizing blocks COMMENT, INPUT, OUTPUT = range(3) # ----------------------------------------------------------------------------- # Functions and class declarations # ----------------------------------------------------------------------------- def block_parser(part, rgxin, rgxout, fmtin, fmtout): """ part is a string of ipython text, comprised of at most one input, one ouput, comments, and blank lines. The block parser parses the text into a list of:: blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...] where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and data is, depending on the type of token:: COMMENT : the comment string INPUT: the (DECORATOR, INPUT_LINE, REST) where DECORATOR: the input decorator (or None) INPUT_LINE: the input as string (possibly multi-line) REST : any stdout generated by the input line (not OUTPUT) OUTPUT: the output string, possibly multi-line """ block = [] lines = part.split("\n") N = len(lines) i = 0 decorator = None while 1: if i == N: # nothing left to parse -- the last line break line = lines[i] i += 1 line_stripped = line.strip() if line_stripped.startswith("#"): block.append((COMMENT, line)) continue if line_stripped.startswith("@"): # we're assuming at most one decorator -- may need to # rethink decorator = line_stripped continue # does this look like an input line? matchin = rgxin.match(line) if matchin: lineno, inputline = int(matchin.group(1)), matchin.group(2) # the ....: continuation string continuation = " %s:" % "".join(["."] * (len(str(lineno)) + 2)) Nc = len(continuation) # input lines can continue on for more than one line, if # we have a '\' line continuation char or a function call # echo line 'print'. The input line can only be # terminated by the end of the block or an output line, so # we parse out the rest of the input line if it is # multiline as well as any echo text rest = [] while i < N: # look ahead; if the next line is blank, or a comment, or # an output line, we're done nextline = lines[i] matchout = rgxout.match(nextline) # print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation)) if matchout or nextline.startswith("#"): break elif nextline.startswith(continuation): nextline = nextline[Nc:] if nextline and nextline[0] == " ": nextline = nextline[1:] inputline += "\n" + nextline else: rest.append(nextline) i += 1 block.append((INPUT, (decorator, inputline, "\n".join(rest)))) continue # if it looks like an output line grab all the text to the end # of the block matchout = rgxout.match(line) if matchout: lineno, output = int(matchout.group(1)), matchout.group(2) if i < N - 1: output = "\n".join([output] + lines[i:]) block.append((OUTPUT, output)) break return block class DecodingStringIO(StringIO, object): def __init__(self, buf="", encodings=("utf8",), *args, **kwds): super(DecodingStringIO, self).__init__(buf, *args, **kwds) self.set_encodings(encodings) def set_encodings(self, encodings): self.encodings = encodings def write(self, data): if isinstance(data, text_type): return super(DecodingStringIO, self).write(data) else: for enc in self.encodings: try: data = data.decode(enc) return super(DecodingStringIO, self).write(data) except: pass # default to brute utf8 if no encoding succeded return super(DecodingStringIO, self).write(data.decode("utf8", "replace")) class EmbeddedSphinxShell(object): """An embedded IPython instance to run inside Sphinx""" def __init__(self, exec_lines=None, state=None): self.cout = DecodingStringIO("") if exec_lines is None: exec_lines = [] self.state = state # Create config object for IPython config = Config() config.InteractiveShell.autocall = False config.InteractiveShell.autoindent = False config.InteractiveShell.colors = "NoColor" # create a profile so instance history isn't saved tmp_profile_dir = tempfile.mkdtemp(prefix="profile_") profname = "auto_profile_sphinx_build" pdir = os.path.join(tmp_profile_dir, profname) profile = ProfileDir.create_profile_dir(pdir) # Create and initialize global ipython, but don't start its mainloop. # This will persist across different EmbededSphinxShell instances. IP = InteractiveShell.instance(config=config, profile_dir=profile) # io.stdout redirect must be done after instantiating InteractiveShell io.stdout = self.cout io.stderr = self.cout # For debugging, so we can see normal output, use this: # from IPython.utils.io import Tee # io.stdout = Tee(self.cout, channel='stdout') # dbg # io.stderr = Tee(self.cout, channel='stderr') # dbg # Store a few parts of IPython we'll need. self.IP = IP self.user_ns = self.IP.user_ns self.user_global_ns = self.IP.user_global_ns self.input = "" self.output = "" self.is_verbatim = False self.is_doctest = False self.is_suppress = False # Optionally, provide more detailed information to shell. self.directive = None # on the first call to the savefig decorator, we'll import # pyplot as plt so we can make a call to the plt.gcf().savefig self._pyplot_imported = False # Prepopulate the namespace. for line in exec_lines: self.process_input_line(line, store_history=False) def clear_cout(self): self.cout.seek(0) self.cout.truncate(0) def process_input_line(self, line, store_history=True): """process the input, capturing stdout""" stdout = sys.stdout splitter = self.IP.input_splitter try: sys.stdout = self.cout splitter.push(line) more = splitter.push_accepts_more() if not more: try: source_raw = splitter.source_raw_reset()[1] except: # recent ipython #4504 source_raw = splitter.raw_reset() self.IP.run_cell(source_raw, store_history=store_history) finally: sys.stdout = stdout def process_image(self, decorator): """ # build out an image directive like # .. image:: somefile.png # :width 4in # # from an input like # savefig somefile.png width=4in """ savefig_dir = self.savefig_dir source_dir = self.source_dir saveargs = decorator.split(" ") filename = saveargs[1] # insert relative path to image file in source outfile = os.path.relpath(os.path.join(savefig_dir, filename), source_dir) imagerows = [".. image:: %s" % outfile] for kwarg in saveargs[2:]: arg, val = kwarg.split("=") arg = arg.strip() val = val.strip() imagerows.append(" :%s: %s" % (arg, val)) image_file = os.path.basename(outfile) # only return file name image_directive = "\n".join(imagerows) return image_file, image_directive # Callbacks for each type of token def process_input(self, data, input_prompt, lineno): """ Process data block for INPUT token. """ decorator, input, rest = data image_file = None image_directive = None is_verbatim = decorator == "@verbatim" or self.is_verbatim is_doctest = ( decorator is not None and decorator.startswith("@doctest") ) or self.is_doctest is_suppress = decorator == "@suppress" or self.is_suppress is_okexcept = decorator == "@okexcept" or self.is_okexcept is_okwarning = decorator == "@okwarning" or self.is_okwarning is_savefig = decorator is not None and decorator.startswith("@savefig") # set the encodings to be used by DecodingStringIO # to convert the execution output into unicode if # needed. this attrib is set by IpythonDirective.run() # based on the specified block options, defaulting to ['ut self.cout.set_encodings(self.output_encoding) input_lines = input.split("\n") if len(input_lines) > 1: if input_lines[-1] != "": input_lines.append("") # make sure there's a blank line # so splitter buffer gets reset continuation = " %s:" % "".join(["."] * (len(str(lineno)) + 2)) if is_savefig: image_file, image_directive = self.process_image(decorator) ret = [] is_semicolon = False # Hold the execution count, if requested to do so. if is_suppress and self.hold_count: store_history = False else: store_history = True # Note: catch_warnings is not thread safe with warnings.catch_warnings(record=True) as ws: for i, line in enumerate(input_lines): if line.endswith(";"): is_semicolon = True if i == 0: # process the first input line if is_verbatim: self.process_input_line("") self.IP.execution_count += 1 # increment it anyway else: # only submit the line in non-verbatim mode self.process_input_line(line, store_history=store_history) formatted_line = "%s %s" % (input_prompt, line) else: # process a continuation line if not is_verbatim: self.process_input_line(line, store_history=store_history) formatted_line = "%s %s" % (continuation, line) if not is_suppress: ret.append(formatted_line) if not is_suppress and len(rest.strip()) and is_verbatim: # the "rest" is the standard output of the # input, which needs to be added in # verbatim mode ret.append(rest) self.cout.seek(0) output = self.cout.read() if not is_suppress and not is_semicolon: ret.append(output) elif is_semicolon: # get spacing right ret.append("") # context information filename = self.state.document.current_source lineno = self.state.document.current_line # output any exceptions raised during execution to stdout # unless :okexcept: has been specified. if not is_okexcept and "Traceback" in output: s = "\nException in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n" sys.stdout.write("\n\n>>>" + ("-" * 73)) sys.stdout.write(s) sys.stdout.write(output) sys.stdout.write("<<<" + ("-" * 73) + "\n\n") # output any warning raised during execution to stdout # unless :okwarning: has been specified. if not is_okwarning: for w in ws: s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno) s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n" sys.stdout.write("\n\n>>>" + ("-" * 73)) sys.stdout.write(s) sys.stdout.write("-" * 76 + "\n") s = warnings.formatwarning( w.message, w.category, w.filename, w.lineno, w.line ) sys.stdout.write(s) sys.stdout.write("<<<" + ("-" * 73) + "\n") self.cout.truncate(0) return ( ret, input_lines, output, is_doctest, decorator, image_file, image_directive, ) def process_output( self, data, output_prompt, input_lines, output, is_doctest, decorator, image_file, ): """ Process data block for OUTPUT token. """ TAB = " " * 4 if is_doctest and output is not None: found = output found = found.strip() submitted = data.strip() if self.directive is None: source = "Unavailable" content = "Unavailable" else: source = self.directive.state.document.current_source content = self.directive.content # Add tabs and join into a single string. content = "\n".join([TAB + line for line in content]) # Make sure the output contains the output prompt. ind = found.find(output_prompt) if ind < 0: e = ( "output does not contain output prompt\n\n" "Document source: {0}\n\n" "Raw content: \n{1}\n\n" "Input line(s):\n{TAB}{2}\n\n" "Output line(s):\n{TAB}{3}\n\n" ) e = e.format( source, content, "\n".join(input_lines), repr(found), TAB=TAB ) raise RuntimeError(e) found = found[len(output_prompt) :].strip() # Handle the actual doctest comparison. if decorator.strip() == "@doctest": # Standard doctest if found != submitted: e = ( "doctest failure\n\n" "Document source: {0}\n\n" "Raw content: \n{1}\n\n" "On input line(s):\n{TAB}{2}\n\n" "we found output:\n{TAB}{3}\n\n" "instead of the expected:\n{TAB}{4}\n\n" ) e = e.format( source, content, "\n".join(input_lines), repr(found), repr(submitted), TAB=TAB, ) raise RuntimeError(e) else: self.custom_doctest(decorator, input_lines, found, submitted) def process_comment(self, data): """Process data fPblock for COMMENT token.""" if not self.is_suppress: return [data] def save_image(self, image_file): """ Saves the image file to disk. """ self.ensure_pyplot() command = ( 'plt.gcf().savefig("%s", bbox_inches="tight", ' "dpi=100)" % image_file ) # print 'SAVEFIG', command # dbg self.process_input_line("bookmark ipy_thisdir", store_history=False) self.process_input_line("cd -b ipy_savedir", store_history=False) self.process_input_line(command, store_history=False) self.process_input_line("cd -b ipy_thisdir", store_history=False) self.process_input_line("bookmark -d ipy_thisdir", store_history=False) self.clear_cout() def process_block(self, block): """ process block from the block_parser and return a list of processed lines """ ret = [] output = None input_lines = None lineno = self.IP.execution_count input_prompt = self.promptin % lineno output_prompt = self.promptout % lineno image_file = None image_directive = None for token, data in block: if token == COMMENT: out_data = self.process_comment(data) elif token == INPUT: ( out_data, input_lines, output, is_doctest, decorator, image_file, image_directive, ) = self.process_input(data, input_prompt, lineno) elif token == OUTPUT: out_data = self.process_output( data, output_prompt, input_lines, output, is_doctest, decorator, image_file, ) if out_data: ret.extend(out_data) # save the image files if image_file is not None: self.save_image(image_file) return ret, image_directive def ensure_pyplot(self): """ Ensures that pyplot has been imported into the embedded IPython shell. Also, makes sure to set the backend appropriately if not set already. """ # We are here if the @figure pseudo decorator was used. Thus, it's # possible that we could be here even if python_mplbackend were set to # `None`. That's also strange and perhaps worthy of raising an # exception, but for now, we just set the backend to 'agg'. if not self._pyplot_imported: if "matplotlib.backends" not in sys.modules: # Then ipython_matplotlib was set to None but there was a # call to the @figure decorator (and ipython_execlines did # not set a backend). # raise Exception("No backend was set, but @figure was used!") import matplotlib matplotlib.use("agg") # Always import pyplot into embedded shell. self.process_input_line( "import matplotlib.pyplot as plt", store_history=False ) self._pyplot_imported = True def process_pure_python(self, content): """ content is a list of strings. it is unedited directive content This runs it line by line in the InteractiveShell, prepends prompts as needed capturing stderr and stdout, then returns the content as a list as if it were ipython code """ output = [] savefig = False # keep up with this to clear figure multiline = False # to handle line continuation multiline_start = None fmtin = self.promptin ct = 0 for lineno, line in enumerate(content): line_stripped = line.strip() if not len(line): output.append(line) continue # handle decorators if line_stripped.startswith("@"): output.extend([line]) if "savefig" in line: savefig = True # and need to clear figure continue # handle comments if line_stripped.startswith("#"): output.extend([line]) continue # deal with lines checking for multiline continuation = " %s:" % "".join(["."] * (len(str(ct)) + 2)) if not multiline: modified = "%s %s" % (fmtin % ct, line_stripped) output.append(modified) ct += 1 try: ast.parse(line_stripped) output.append("") except Exception: # on a multiline multiline = True multiline_start = lineno else: # still on a multiline modified = "%s %s" % (continuation, line) output.append(modified) # if the next line is indented, it should be part of multiline if len(content) > lineno + 1: nextline = content[lineno + 1] if len(nextline) - len(nextline.lstrip()) > 3: continue try: mod = ast.parse("\n".join(content[multiline_start : lineno + 1])) if isinstance(mod.body[0], ast.FunctionDef): # check to see if we have the whole function for element in mod.body[0].body: if isinstance(element, ast.Return): multiline = False else: output.append("") multiline = False except Exception: pass if savefig: # clear figure if plotted self.ensure_pyplot() self.process_input_line("plt.clf()", store_history=False) self.clear_cout() savefig = False return output def custom_doctest(self, decorator, input_lines, found, submitted): """ Perform a specialized doctest. """ from .custom_doctests import doctests args = decorator.split() doctest_type = args[1] if doctest_type in doctests: doctests[doctest_type](self, args, input_lines, found, submitted) else: e = "Invalid option to @doctest: {0}".format(doctest_type) raise Exception(e) class IPythonDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 4 # python, suppress, verbatim, doctest final_argumuent_whitespace = True option_spec = { "python": directives.unchanged, "suppress": directives.flag, "verbatim": directives.flag, "doctest": directives.flag, "okexcept": directives.flag, "okwarning": directives.flag, "output_encoding": directives.unchanged_required, } shell = None seen_docs = set() def get_config_options(self): # contains sphinx configuration variables config = self.state.document.settings.env.config # get config variables to set figure output directory confdir = self.state.document.settings.env.app.confdir savefig_dir = config.ipython_savefig_dir source_dir = os.path.dirname(self.state.document.current_source) if savefig_dir is None: savefig_dir = config.html_static_path if isinstance(savefig_dir, list): savefig_dir = savefig_dir[0] # safe to assume only one path? savefig_dir = os.path.join(confdir, savefig_dir) # get regex and prompt stuff rgxin = config.ipython_rgxin rgxout = config.ipython_rgxout promptin = config.ipython_promptin promptout = config.ipython_promptout mplbackend = config.ipython_mplbackend exec_lines = config.ipython_execlines hold_count = config.ipython_holdcount return ( savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count, ) def setup(self): # Get configuration values. ( savefig_dir, source_dir, rgxin, rgxout, promptin, promptout, mplbackend, exec_lines, hold_count, ) = self.get_config_options() if self.shell is None: # We will be here many times. However, when the # EmbeddedSphinxShell is created, its interactive shell member # is the same for each instance. if mplbackend: import matplotlib # Repeated calls to use() will not hurt us since `mplbackend` # is the same each time. matplotlib.use(mplbackend) # Must be called after (potentially) importing matplotlib and # setting its backend since exec_lines might import pylab. self.shell = EmbeddedSphinxShell(exec_lines, self.state) # Store IPython directive to enable better error messages self.shell.directive = self # reset the execution count if we haven't processed this doc # NOTE: this may be borked if there are multiple seen_doc tmp files # check time stamp? if not self.state.document.current_source in self.seen_docs: self.shell.IP.history_manager.reset() self.shell.IP.execution_count = 1 self.shell.IP.prompt_manager.width = 0 self.seen_docs.add(self.state.document.current_source) # and attach to shell so we don't have to pass them around self.shell.rgxin = rgxin self.shell.rgxout = rgxout self.shell.promptin = promptin self.shell.promptout = promptout self.shell.savefig_dir = savefig_dir self.shell.source_dir = source_dir self.shell.hold_count = hold_count # setup bookmark for saving figures directory self.shell.process_input_line( "bookmark ipy_savedir %s" % savefig_dir, store_history=False ) self.shell.clear_cout() return rgxin, rgxout, promptin, promptout def teardown(self): # delete last bookmark self.shell.process_input_line("bookmark -d ipy_savedir", store_history=False) self.shell.clear_cout() def run(self): debug = False # TODO, any reason block_parser can't be a method of embeddable shell # then we wouldn't have to carry these around rgxin, rgxout, promptin, promptout = self.setup() options = self.options self.shell.is_suppress = "suppress" in options self.shell.is_doctest = "doctest" in options self.shell.is_verbatim = "verbatim" in options self.shell.is_okexcept = "okexcept" in options self.shell.is_okwarning = "okwarning" in options self.shell.output_encoding = [options.get("output_encoding", "utf8")] # handle pure python code if "python" in self.arguments: content = self.content self.content = self.shell.process_pure_python(content) parts = "\n".join(self.content).split("\n\n") lines = [".. code-block:: ipython", ""] figures = [] for part in parts: block = block_parser(part, rgxin, rgxout, promptin, promptout) if len(block): rows, figure = self.shell.process_block(block) for row in rows: lines.extend([" %s" % line for line in row.split("\n")]) if figure is not None: figures.append(figure) for figure in figures: lines.append("") lines.extend(figure.split("\n")) lines.append("") if len(lines) > 2: if debug: print("\n".join(lines)) else: # This has to do with input, not output. But if we comment # these lines out, then no IPython code will appear in the # final output. self.state_machine.insert_input( lines, self.state_machine.input_lines.source(0) ) # cleanup self.teardown() return [] # Enable as a proper Sphinx directive def setup(app): setup.app = app app.add_directive("ipython", IPythonDirective) app.add_config_value("ipython_savefig_dir", None, "env") app.add_config_value("ipython_rgxin", re.compile("In \[(\d+)\]:\s?(.*)\s*"), "env") app.add_config_value("ipython_rgxout", re.compile("Out\[(\d+)\]:\s?(.*)\s*"), "env") app.add_config_value("ipython_promptin", "In [%d]:", "env") app.add_config_value("ipython_promptout", "Out[%d]:", "env") # We could just let matplotlib pick whatever is specified as the default # backend in the matplotlibrc file, but this would cause issues if the # backend didn't work in headless environments. For this reason, 'agg' # is a good default backend choice. app.add_config_value("ipython_mplbackend", "agg", "env") # If the user sets this config value to `None`, then EmbeddedSphinxShell's # __init__ method will treat it as []. execlines = ["import numpy as np", "import matplotlib.pyplot as plt"] app.add_config_value("ipython_execlines", execlines, "env") app.add_config_value("ipython_holdcount", True, "env") # Simple smoke test, needs to be converted to a proper automatic test. def test(): examples = [ r""" In [9]: pwd Out[9]: '/home/jdhunter/py4science/book' In [10]: cd bookdata/ /home/jdhunter/py4science/book/bookdata In [2]: from pylab import * In [2]: ion() In [3]: im = imread('stinkbug.png') @savefig mystinkbug.png width=4in In [4]: imshow(im) Out[4]: <matplotlib.image.AxesImage object at 0x39ea850> """, r""" In [1]: x = 'hello world' # string methods can be # used to alter the string @doctest In [2]: x.upper() Out[2]: 'HELLO WORLD' @verbatim In [3]: x.st<TAB> x.startswith x.strip """, r""" In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\ .....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv' In [131]: print url.split('&') ['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv'] In [60]: import urllib """, r"""\ In [133]: import numpy.random @suppress In [134]: numpy.random.seed(2358) @doctest In [135]: numpy.random.rand(10,2) Out[135]: array([[ 0.64524308, 0.59943846], [ 0.47102322, 0.8715456 ], [ 0.29370834, 0.74776844], [ 0.99539577, 0.1313423 ], [ 0.16250302, 0.21103583], [ 0.81626524, 0.1312433 ], [ 0.67338089, 0.72302393], [ 0.7566368 , 0.07033696], [ 0.22591016, 0.77731835], [ 0.0072729 , 0.34273127]]) """, r""" In [106]: print x jdh In [109]: for i in range(10): .....: print i .....: .....: 0 1 2 3 4 5 6 7 8 9 """, r""" In [144]: from pylab import * In [145]: ion() # use a semicolon to suppress the output @savefig test_hist.png width=4in In [151]: hist(np.random.randn(10000), 100); @savefig test_plot.png width=4in In [151]: plot(np.random.randn(10000), 'o'); """, r""" # use a semicolon to suppress the output In [151]: plt.clf() @savefig plot_simple.png width=4in In [151]: plot([1,2,3]) @savefig hist_simple.png width=4in In [151]: hist(np.random.randn(10000), 100); """, r""" # update the current fig In [151]: ylabel('number') In [152]: title('normal distribution') @savefig hist_with_text.png In [153]: grid(True) @doctest float In [154]: 0.1 + 0.2 Out[154]: 0.3 @doctest float In [155]: np.arange(16).reshape(4,4) Out[155]: array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) In [1]: x = np.arange(16, dtype=float).reshape(4,4) In [2]: x[0,0] = np.inf In [3]: x[0,1] = np.nan @doctest float In [4]: x Out[4]: array([[ inf, nan, 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) """, ] # skip local-file depending first example: examples = examples[1:] # ipython_directive.DEBUG = True # dbg # options = dict(suppress=True) # dbg options = dict() for example in examples: content = example.split("\n") IPythonDirective( "debug", arguments=None, options=options, content=content, lineno=0, content_offset=None, block_text=None, state=None, state_machine=None, ) # Run test suite as a script if __name__ == "__main__": if not os.path.isdir("_static"): os.mkdir("_static") test() print("All OK? Check figures in _static/")
bsd-3-clause
henrykironde/scikit-learn
sklearn/svm/tests/test_svm.py
116
31653
""" Testing for Support Vector Machine module (sklearn.svm) TODO: remove hard coded numerical results when possible """ import numpy as np import itertools from numpy.testing import assert_array_equal, assert_array_almost_equal from numpy.testing import assert_almost_equal from scipy import sparse from nose.tools import assert_raises, assert_true, assert_equal, assert_false from sklearn.base import ChangedBehaviorWarning from sklearn import svm, linear_model, datasets, metrics, base from sklearn.cross_validation import train_test_split from sklearn.datasets import make_classification, make_blobs from sklearn.metrics import f1_score from sklearn.metrics.pairwise import rbf_kernel from sklearn.utils import check_random_state from sklearn.utils import ConvergenceWarning from sklearn.utils.validation import NotFittedError from sklearn.utils.testing import assert_greater, assert_in, assert_less from sklearn.utils.testing import assert_raises_regexp, assert_warns from sklearn.utils.testing import assert_warns_message, assert_raise_message from sklearn.utils.testing import ignore_warnings # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] Y = [1, 1, 1, 2, 2, 2] T = [[-1, -1], [2, 2], [3, 2]] true_result = [1, 2, 2] # also load the iris dataset iris = datasets.load_iris() rng = check_random_state(42) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] def test_libsvm_parameters(): # Test parameters on classes that make use of libsvm. clf = svm.SVC(kernel='linear').fit(X, Y) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.support_vectors_, (X[1], X[3])) assert_array_equal(clf.intercept_, [0.]) assert_array_equal(clf.predict(X), Y) def test_libsvm_iris(): # Check consistency on dataset iris. # shuffle the dataset so that labels are not ordered for k in ('linear', 'rbf'): clf = svm.SVC(kernel=k).fit(iris.data, iris.target) assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9) assert_array_equal(clf.classes_, np.sort(clf.classes_)) # check also the low-level API model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64)) pred = svm.libsvm.predict(iris.data, *model) assert_greater(np.mean(pred == iris.target), .95) model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64), kernel='linear') pred = svm.libsvm.predict(iris.data, *model, kernel='linear') assert_greater(np.mean(pred == iris.target), .95) pred = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_greater(np.mean(pred == iris.target), .95) # If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence # we should get deteriministic results (assuming that there is no other # thread calling this wrapper calling `srand` concurrently). pred2 = svm.libsvm.cross_validation(iris.data, iris.target.astype(np.float64), 5, kernel='linear', random_seed=0) assert_array_equal(pred, pred2) def test_single_sample_1d(): # Test whether SVCs work on a single sample given as a 1-d array clf = svm.SVC().fit(X, Y) clf.predict(X[0]) clf = svm.LinearSVC(random_state=0).fit(X, Y) clf.predict(X[0]) def test_precomputed(): # SVC with a precomputed kernel. # We test it with a toy dataset and with iris. clf = svm.SVC(kernel='precomputed') # Gram matrix for train data (square matrix) # (we use just a linear kernel) K = np.dot(X, np.array(X).T) clf.fit(K, Y) # Gram matrix for test data (rectangular matrix) KT = np.dot(T, np.array(X).T) pred = clf.predict(KT) assert_raises(ValueError, clf.predict, KT.T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.support_, [1, 3]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. KT = np.zeros_like(KT) for i in range(len(T)): for j in clf.support_: KT[i, j] = np.dot(T[i], X[j]) pred = clf.predict(KT) assert_array_equal(pred, true_result) # same as before, but using a callable function instead of the kernel # matrix. kernel is just a linear kernel kfunc = lambda x, y: np.dot(x, y.T) clf = svm.SVC(kernel=kfunc) clf.fit(X, Y) pred = clf.predict(T) assert_array_equal(clf.dual_coef_, [[-0.25, .25]]) assert_array_equal(clf.intercept_, [0]) assert_array_almost_equal(clf.support_, [1, 3]) assert_array_equal(pred, true_result) # test a precomputed kernel with the iris dataset # and check parameters against a linear SVC clf = svm.SVC(kernel='precomputed') clf2 = svm.SVC(kernel='linear') K = np.dot(iris.data, iris.data.T) clf.fit(K, iris.target) clf2.fit(iris.data, iris.target) pred = clf.predict(K) assert_array_almost_equal(clf.support_, clf2.support_) assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_) assert_array_almost_equal(clf.intercept_, clf2.intercept_) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) # Gram matrix for test data but compute KT[i,j] # for support vectors j only. K = np.zeros_like(K) for i in range(len(iris.data)): for j in clf.support_: K[i, j] = np.dot(iris.data[i], iris.data[j]) pred = clf.predict(K) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) clf = svm.SVC(kernel=kfunc) clf.fit(iris.data, iris.target) assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2) def test_svr(): # Test Support Vector Regression diabetes = datasets.load_diabetes() for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0), svm.NuSVR(kernel='linear', nu=.4, C=10.), svm.SVR(kernel='linear', C=10.), svm.LinearSVR(C=10.), svm.LinearSVR(C=10.), ): clf.fit(diabetes.data, diabetes.target) assert_greater(clf.score(diabetes.data, diabetes.target), 0.02) # non-regression test; previously, BaseLibSVM would check that # len(np.unique(y)) < 2, which must only be done for SVC svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data))) svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data))) def test_linearsvr(): # check that SVR(kernel='linear') and LinearSVC() give # comparable results diabetes = datasets.load_diabetes() lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target) score1 = lsvr.score(diabetes.data, diabetes.target) svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target) score2 = svr.score(diabetes.data, diabetes.target) assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1 assert np.abs(score1 - score2) < 0.1 def test_svr_errors(): X = [[0.0], [1.0]] y = [0.0, 0.5] # Bad kernel clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]])) clf.fit(X, y) assert_raises(ValueError, clf.predict, X) def test_oneclass(): # Test OneClassSVM clf = svm.OneClassSVM() clf.fit(X) pred = clf.predict(T) assert_array_almost_equal(pred, [-1, -1, -1]) assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3) assert_array_almost_equal(clf.dual_coef_, [[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]], decimal=3) assert_raises(ValueError, lambda: clf.coef_) def test_oneclass_decision_function(): # Test OneClassSVM decision function clf = svm.OneClassSVM() rnd = check_random_state(2) # Generate train data X = 0.3 * rnd.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rnd.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) # predict things y_pred_test = clf.predict(X_test) assert_greater(np.mean(y_pred_test == 1), .9) y_pred_outliers = clf.predict(X_outliers) assert_greater(np.mean(y_pred_outliers == -1), .9) dec_func_test = clf.decision_function(X_test) assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1) dec_func_outliers = clf.decision_function(X_outliers) assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1) def test_tweak_params(): # Make sure some tweaking of parameters works. # We change clf.dual_coef_ at run time and expect .predict() to change # accordingly. Notice that this is not trivial since it involves a lot # of C/Python copying in the libsvm bindings. # The success of this test ensures that the mapping between libsvm and # the python classifier is complete. clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, Y) assert_array_equal(clf.dual_coef_, [[-.25, .25]]) assert_array_equal(clf.predict([[-.1, -.1]]), [1]) clf._dual_coef_ = np.array([[.0, 1.]]) assert_array_equal(clf.predict([[-.1, -.1]]), [2]) def test_probability(): # Predict probabilities using SVC # This uses cross validation, so we use a slightly bigger testing set. for clf in (svm.SVC(probability=True, random_state=0, C=1.0), svm.NuSVC(probability=True, random_state=0)): clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal( np.sum(prob_predict, 1), np.ones(iris.data.shape[0])) assert_true(np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8) def test_decision_function(): # Test decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm # multi class: clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(iris.data, iris.target) dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int)]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) # kernel binary: clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo') clf.fit(X, Y) rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma) dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_ assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) def test_decision_function_shape(): # check that decision_function_shape='ovr' gives # correct shape and is consistent with predict clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(iris.data, iris.target) dec = clf.decision_function(iris.data) assert_equal(dec.shape, (len(iris.data), 3)) assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1)) # with five classes: X, y = make_blobs(n_samples=80, centers=5, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovr').fit(X_train, y_train) dec = clf.decision_function(X_test) assert_equal(dec.shape, (len(X_test), 5)) assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1)) # check shape of ovo_decition_function=True clf = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo').fit(X_train, y_train) dec = clf.decision_function(X_train) assert_equal(dec.shape, (len(X_train), 10)) # check deprecation warning clf.decision_function_shape = None msg = "change the shape of the decision function" dec = assert_warns_message(ChangedBehaviorWarning, msg, clf.decision_function, X_train) assert_equal(dec.shape, (len(X_train), 10)) def test_svr_decision_function(): # Test SVR's decision_function # Sanity check, test that decision_function implemented in python # returns the same as the one in libsvm X = iris.data y = iris.target # linear kernel reg = svm.SVR(kernel='linear', C=0.1).fit(X, y) dec = np.dot(X, reg.coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel()) # rbf kernel reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y) rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma) dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_ assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel()) def test_weight(): # Test class weights clf = svm.SVC(class_weight={1: 0.1}) # we give a small weights to class 1 clf.fit(X, Y) # so all predicted values belong to class 2 assert_array_almost_equal(clf.predict(X), [2] * 6) X_, y_ = make_classification(n_samples=200, n_features=10, weights=[0.833, 0.167], random_state=2) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: .1, 1: 10}) clf.fit(X_[:100], y_[:100]) y_pred = clf.predict(X_[100:]) assert_true(f1_score(y_[100:], y_pred) > .3) def test_sample_weights(): # Test weights on individual samples # TODO: check on NuSVR, OneClass, etc. clf = svm.SVC() clf.fit(X, Y) assert_array_equal(clf.predict(X[2]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X, Y, sample_weight=sample_weight) assert_array_equal(clf.predict(X[2]), [2.]) # test that rescaling all samples is the same as changing C clf = svm.SVC() clf.fit(X, Y) dual_coef_no_weight = clf.dual_coef_ clf.set_params(C=100) clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X))) assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_) def test_auto_weight(): # Test class weights for imbalanced data from sklearn.linear_model import LogisticRegression # We take as dataset the two-dimensional projection of iris so # that it is not separable and remove half of predictors from # class 1. # We add one to the targets as a non-regression test: class_weight="balanced" # used to work only when the labels where a range [0..K). from sklearn.utils import compute_class_weight X, y = iris.data[:, :2], iris.target + 1 unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2]) classes = np.unique(y[unbalanced]) class_weights = compute_class_weight('balanced', classes, y[unbalanced]) assert_true(np.argmax(class_weights) == 2) for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0), LogisticRegression()): # check that score is better when class='balanced' is set. y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X) clf.set_params(class_weight='balanced') y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X) assert_true(metrics.f1_score(y, y_pred, average='weighted') <= metrics.f1_score(y, y_pred_balanced, average='weighted')) def test_bad_input(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X, Y2) # Test with arrays that are non-contiguous. for clf in (svm.SVC(), svm.LinearSVC(random_state=0)): Xf = np.asfortranarray(X) assert_false(Xf.flags['C_CONTIGUOUS']) yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T) yf = yf[:, -1] assert_false(yf.flags['F_CONTIGUOUS']) assert_false(yf.flags['C_CONTIGUOUS']) clf.fit(Xf, yf) assert_array_equal(clf.predict(T), true_result) # error for precomputed kernelsx clf = svm.SVC(kernel='precomputed') assert_raises(ValueError, clf.fit, X, Y) # sample_weight bad dimensions clf = svm.SVC() assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1)) # predict with sparse input when trained with dense clf = svm.SVC().fit(X, Y) assert_raises(ValueError, clf.predict, sparse.lil_matrix(X)) Xt = np.array(X).T clf.fit(np.dot(X, Xt), Y) assert_raises(ValueError, clf.predict, X) clf = svm.SVC() clf.fit(X, Y) assert_raises(ValueError, clf.predict, Xt) def test_sparse_precomputed(): clf = svm.SVC(kernel='precomputed') sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]]) try: clf.fit(sparse_gram, [0, 1]) assert not "reached" except TypeError as e: assert_in("Sparse precomputed", str(e)) def test_linearsvc_parameters(): # Test possible parameter combinations in LinearSVC # Generate list of possible parameter combinations losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo'] penalties, duals = ['l1', 'l2', 'bar'], [True, False] X, y = make_classification(n_samples=5, n_features=5) for loss, penalty, dual in itertools.product(losses, penalties, duals): clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual) if ((loss, penalty) == ('hinge', 'l1') or (loss, penalty, dual) == ('hinge', 'l2', False) or (penalty, dual) == ('l1', True) or loss == 'foo' or penalty == 'bar'): assert_raises_regexp(ValueError, "Unsupported set of arguments.*penalty='%s.*" "loss='%s.*dual=%s" % (penalty, loss, dual), clf.fit, X, y) else: clf.fit(X, y) # Incorrect loss value - test if explicit error message is raised assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*", svm.LinearSVC(loss="l3").fit, X, y) # FIXME remove in 1.0 def test_linearsvx_loss_penalty_deprecations(): X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the %s will be removed in %s") # LinearSVC # loss l1/L1 --> hinge assert_warns_message(DeprecationWarning, msg % ("l1", "hinge", "loss='l1'", "1.0"), svm.LinearSVC(loss="l1").fit, X, y) # loss l2/L2 --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("L2", "squared_hinge", "loss='L2'", "1.0"), svm.LinearSVC(loss="L2").fit, X, y) # LinearSVR # loss l1/L1 --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("L1", "epsilon_insensitive", "loss='L1'", "1.0"), svm.LinearSVR(loss="L1").fit, X, y) # loss l2/L2 --> squared_epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("l2", "squared_epsilon_insensitive", "loss='l2'", "1.0"), svm.LinearSVR(loss="l2").fit, X, y) # FIXME remove in 0.18 def test_linear_svx_uppercase_loss_penalty(): # Check if Upper case notation is supported by _fit_liblinear # which is called by fit X, y = [[0.0], [1.0]], [0, 1] msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the uppercase notation will be removed in %s") # loss SQUARED_hinge --> squared_hinge assert_warns_message(DeprecationWarning, msg % ("SQUARED_hinge", "squared_hinge", "0.18"), svm.LinearSVC(loss="SQUARED_hinge").fit, X, y) # penalty L2 --> l2 assert_warns_message(DeprecationWarning, msg.replace("loss", "penalty") % ("L2", "l2", "0.18"), svm.LinearSVC(penalty="L2").fit, X, y) # loss EPSILON_INSENSITIVE --> epsilon_insensitive assert_warns_message(DeprecationWarning, msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive", "0.18"), svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y) def test_linearsvc(): # Test basic routines using LinearSVC clf = svm.LinearSVC(random_state=0).fit(X, Y) # by default should have intercept assert_true(clf.fit_intercept) assert_array_equal(clf.predict(T), true_result) assert_array_almost_equal(clf.intercept_, [0], decimal=3) # the same with l1 penalty clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty with dual formulation clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y) assert_array_equal(clf.predict(T), true_result) # l2 penalty, l1 loss clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0) clf.fit(X, Y) assert_array_equal(clf.predict(T), true_result) # test also decision function dec = clf.decision_function(T) res = (dec > 0).astype(np.int) + 1 assert_array_equal(res, true_result) def test_linearsvc_crammer_singer(): # Test LinearSVC with crammer_singer multi-class svm ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0) cs_clf.fit(iris.data, iris.target) # similar prediction for ovr and crammer-singer: assert_true((ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > .9) # classifiers shouldn't be the same assert_true((ovr_clf.coef_ != cs_clf.coef_).all()) # test decision function assert_array_equal(cs_clf.predict(iris.data), np.argmax(cs_clf.decision_function(iris.data), axis=1)) dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_ assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data)) def test_crammer_singer_binary(): # Test Crammer-Singer formulation in the binary case X, y = make_classification(n_classes=2, random_state=0) for fit_intercept in (True, False): acc = svm.LinearSVC(fit_intercept=fit_intercept, multi_class="crammer_singer", random_state=0).fit(X, y).score(X, y) assert_greater(acc, 0.9) def test_linearsvc_iris(): # Test that LinearSVC gives plausible predictions on the iris dataset # Also, test symbolic class names (classes_). target = iris.target_names[iris.target] clf = svm.LinearSVC(random_state=0).fit(iris.data, target) assert_equal(set(clf.classes_), set(iris.target_names)) assert_greater(np.mean(clf.predict(iris.data) == target), 0.8) dec = clf.decision_function(iris.data) pred = iris.target_names[np.argmax(dec, 1)] assert_array_equal(pred, clf.predict(iris.data)) def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC): # Test that dense liblinear honours intercept_scaling param X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge', dual=False, C=4, tol=1e-7, random_state=0) assert_true(clf.intercept_scaling == 1, clf.intercept_scaling) assert_true(clf.fit_intercept) # when intercept_scaling is low the intercept value is highly "penalized" # by regularization clf.intercept_scaling = 1 clf.fit(X, y) assert_almost_equal(clf.intercept_, 0, decimal=5) # when intercept_scaling is sufficiently high, the intercept value # is not affected by regularization clf.intercept_scaling = 100 clf.fit(X, y) intercept1 = clf.intercept_ assert_less(intercept1, -1) # when intercept_scaling is sufficiently high, the intercept value # doesn't depend on intercept_scaling value clf.intercept_scaling = 1000 clf.fit(X, y) intercept2 = clf.intercept_ assert_array_almost_equal(intercept1, intercept2, decimal=2) def test_liblinear_set_coef(): # multi-class case clf = svm.LinearSVC().fit(iris.data, iris.target) values = clf.decision_function(iris.data) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(iris.data) assert_array_almost_equal(values, values2) # binary-class case X = [[2, 1], [3, 1], [1, 3], [2, 3]] y = [0, 0, 1, 1] clf = svm.LinearSVC().fit(X, y) values = clf.decision_function(X) clf.coef_ = clf.coef_.copy() clf.intercept_ = clf.intercept_.copy() values2 = clf.decision_function(X) assert_array_equal(values, values2) def test_immutable_coef_property(): # Check that primal coef modification are not silently ignored svms = [ svm.SVC(kernel='linear').fit(iris.data, iris.target), svm.NuSVC(kernel='linear').fit(iris.data, iris.target), svm.SVR(kernel='linear').fit(iris.data, iris.target), svm.NuSVR(kernel='linear').fit(iris.data, iris.target), svm.OneClassSVM(kernel='linear').fit(iris.data), ] for clf in svms: assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3)) assert_raises((RuntimeError, ValueError), clf.coef_.__setitem__, (0, 0), 0) def test_linearsvc_verbose(): # stdout: redirect import os stdout = os.dup(1) # save original stdout os.dup2(os.pipe()[1], 1) # replace it # actual call clf = svm.LinearSVC(verbose=1) clf.fit(X, Y) # stdout: restore os.dup2(stdout, 1) # restore original stdout def test_svc_clone_with_callable_kernel(): # create SVM with callable linear kernel, check that results are the same # as with built-in linear kernel svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, decision_function_shape='ovr') # clone for checking clonability with lambda functions.. svm_cloned = base.clone(svm_callable) svm_cloned.fit(iris.data, iris.target) svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0, decision_function_shape='ovr') svm_builtin.fit(iris.data, iris.target) assert_array_almost_equal(svm_cloned.dual_coef_, svm_builtin.dual_coef_) assert_array_almost_equal(svm_cloned.intercept_, svm_builtin.intercept_) assert_array_equal(svm_cloned.predict(iris.data), svm_builtin.predict(iris.data)) assert_array_almost_equal(svm_cloned.predict_proba(iris.data), svm_builtin.predict_proba(iris.data), decimal=4) assert_array_almost_equal(svm_cloned.decision_function(iris.data), svm_builtin.decision_function(iris.data)) def test_svc_bad_kernel(): svc = svm.SVC(kernel=lambda x, y: x) assert_raises(ValueError, svc.fit, X, Y) def test_timeout(): a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, a.fit, X, Y) def test_unfitted(): X = "foo!" # input validation not required when SVM not fitted clf = svm.SVC() assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b", clf.predict, X) clf = svm.NuSVR() assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b", clf.predict, X) # ignore convergence warnings from max_iter=1 @ignore_warnings def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2) def test_linear_svc_convergence_warnings(): # Test that warnings are raised if model does not converge lsvc = svm.LinearSVC(max_iter=2, verbose=1) assert_warns(ConvergenceWarning, lsvc.fit, X, Y) assert_equal(lsvc.n_iter_, 2) def test_svr_coef_sign(): # Test that SVR(kernel="linear") has coef_ with the right sign. # Non-regression test for #2933. X = np.random.RandomState(21).randn(10, 3) y = np.random.RandomState(12).randn(10) for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'), svm.LinearSVR()]: svr.fit(X, y) assert_array_almost_equal(svr.predict(X), np.dot(X, svr.coef_.ravel()) + svr.intercept_) def test_linear_svc_intercept_scaling(): # Test that the right error message is thrown when intercept_scaling <= 0 for i in [-1, 0]: lsvc = svm.LinearSVC(intercept_scaling=i) msg = ('Intercept scaling is %r but needs to be greater than 0.' ' To disable fitting an intercept,' ' set fit_intercept=False.' % lsvc.intercept_scaling) assert_raise_message(ValueError, msg, lsvc.fit, X, Y) def test_lsvc_intercept_scaling_zero(): # Test that intercept_scaling is ignored when fit_intercept is False lsvc = svm.LinearSVC(fit_intercept=False) lsvc.fit(X, Y) assert_equal(lsvc.intercept_, 0.) def test_hasattr_predict_proba(): # Method must be (un)available before or after fit, switched by # `probability` param G = svm.SVC(probability=True) assert_true(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_true(hasattr(G, 'predict_proba')) G = svm.SVC(probability=False) assert_false(hasattr(G, 'predict_proba')) G.fit(iris.data, iris.target) assert_false(hasattr(G, 'predict_proba')) # Switching to `probability=True` after fitting should make # predict_proba available, but calling it must not work: G.probability = True assert_true(hasattr(G, 'predict_proba')) msg = "predict_proba is not available when fitted with probability=False" assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
bsd-3-clause
maheshakya/scikit-learn
examples/applications/face_recognition.py
42
5390
""" =================================================== Faces recognition example using eigenfaces and SVMs =================================================== The dataset used in this example is a preprocessed excerpt of the "Labeled Faces in the Wild", aka LFW_: http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB) .. _LFW: http://vis-www.cs.umass.edu/lfw/ Expected results for the top 5 most represented people in the dataset:: precision recall f1-score support Gerhard_Schroeder 0.91 0.75 0.82 28 Donald_Rumsfeld 0.84 0.82 0.83 33 Tony_Blair 0.65 0.82 0.73 34 Colin_Powell 0.78 0.88 0.83 58 George_W_Bush 0.93 0.86 0.90 129 avg / total 0.86 0.84 0.85 282 """ from __future__ import print_function from time import time import logging import matplotlib.pyplot as plt from sklearn.cross_validation import train_test_split from sklearn.datasets import fetch_lfw_people from sklearn.grid_search import GridSearchCV from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.decomposition import RandomizedPCA from sklearn.svm import SVC print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s') ############################################################################### # Download the data, if not already on disk and load it as numpy arrays lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape # for machine learning we use the 2 data directly (as relative pixel # positions info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the id of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print("Total dataset size:") print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) print("n_classes: %d" % n_classes) ############################################################################### # Split into a training set and a test set using a stratified k fold # split into a training and testing set X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25) ############################################################################### # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled # dataset): unsupervised feature extraction / dimensionality reduction n_components = 150 print("Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])) t0 = time() pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train) print("done in %0.3fs" % (time() - t0)) eigenfaces = pca.components_.reshape((n_components, h, w)) print("Projecting the input data on the eigenfaces orthonormal basis") t0 = time() X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print("done in %0.3fs" % (time() - t0)) ############################################################################### # Train a SVM classification model print("Fitting the classifier to the training set") t0 = time() param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5], 'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], } clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid) clf = clf.fit(X_train_pca, y_train) print("done in %0.3fs" % (time() - t0)) print("Best estimator found by grid search:") print(clf.best_estimator_) ############################################################################### # Quantitative evaluation of the model quality on the test set print("Predicting people's names on the test set") t0 = time() y_pred = clf.predict(X_test_pca) print("done in %0.3fs" % (time() - t0)) print(classification_report(y_test, y_pred, target_names=target_names)) print(confusion_matrix(y_test, y_pred, labels=range(n_classes))) ############################################################################### # Qualitative evaluation of the predictions using matplotlib def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1] true_name = target_names[y_test[i]].rsplit(' ', 1)[-1] return 'predicted: %s\ntrue: %s' % (pred_name, true_name) prediction_titles = [title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0])] plot_gallery(X_test, prediction_titles, h, w) # plot the gallery of the most significative eigenfaces eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) plt.show()
bsd-3-clause
liangz0707/scikit-learn
sklearn/datasets/tests/test_rcv1.py
322
2414
"""Test the rcv1 loader. Skipped if rcv1 is not already downloaded to data_home. """ import errno import scipy.sparse as sp import numpy as np from sklearn.datasets import fetch_rcv1 from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest def test_fetch_rcv1(): try: data1 = fetch_rcv1(shuffle=False, download_if_missing=False) except IOError as e: if e.errno == errno.ENOENT: raise SkipTest("Download RCV1 dataset to run this test.") X1, Y1 = data1.data, data1.target cat_list, s1 = data1.target_names.tolist(), data1.sample_id # test sparsity assert_true(sp.issparse(X1)) assert_true(sp.issparse(Y1)) assert_equal(60915113, X1.data.size) assert_equal(2606875, Y1.data.size) # test shapes assert_equal((804414, 47236), X1.shape) assert_equal((804414, 103), Y1.shape) assert_equal((804414,), s1.shape) assert_equal(103, len(cat_list)) # test ordering of categories first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151'] assert_array_equal(first_categories, cat_list[:6]) # test number of sample for some categories some_categories = ('GMIL', 'E143', 'CCAT') number_non_zero_in_cat = (5, 1206, 381327) for num, cat in zip(number_non_zero_in_cat, some_categories): j = cat_list.index(cat) assert_equal(num, Y1[:, j].data.size) # test shuffling and subset data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77, download_if_missing=False) X2, Y2 = data2.data, data2.target s2 = data2.sample_id # The first 23149 samples are the training samples assert_array_equal(np.sort(s1[:23149]), np.sort(s2)) # test some precise values some_sample_ids = (2286, 3274, 14042) for sample_id in some_sample_ids: idx1 = s1.tolist().index(sample_id) idx2 = s2.tolist().index(sample_id) feature_values_1 = X1[idx1, :].toarray() feature_values_2 = X2[idx2, :].toarray() assert_almost_equal(feature_values_1, feature_values_2) target_values_1 = Y1[idx1, :].toarray() target_values_2 = Y2[idx2, :].toarray() assert_almost_equal(target_values_1, target_values_2)
bsd-3-clause
JsNoNo/scikit-learn
benchmarks/bench_tree.py
297
3617
""" To run this, you'll need to have installed. * scikit-learn Does two benchmarks First, we fix a training set, increase the number of samples to classify and plot number of classified samples as a function of time. In the second benchmark, we increase the number of dimensions of the training set, classify a sample and plot the time taken as a function of the number of dimensions. """ import numpy as np import pylab as pl import gc from datetime import datetime # to store the results scikit_classifier_results = [] scikit_regressor_results = [] mu_second = 0.0 + 10 ** 6 # number of microseconds in a second def bench_scikit_tree_classifier(X, Y): """Benchmark with scikit-learn decision tree classifier""" from sklearn.tree import DecisionTreeClassifier gc.collect() # start time tstart = datetime.now() clf = DecisionTreeClassifier() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_classifier_results.append( delta.seconds + delta.microseconds / mu_second) def bench_scikit_tree_regressor(X, Y): """Benchmark with scikit-learn decision tree regressor""" from sklearn.tree import DecisionTreeRegressor gc.collect() # start time tstart = datetime.now() clf = DecisionTreeRegressor() clf.fit(X, Y).predict(X) delta = (datetime.now() - tstart) # stop time scikit_regressor_results.append( delta.seconds + delta.microseconds / mu_second) if __name__ == '__main__': print('============================================') print('Warning: this is going to take a looong time') print('============================================') n = 10 step = 10000 n_samples = 10000 dim = 10 n_classes = 10 for i in range(n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') n_samples += step X = np.random.randn(n_samples, dim) Y = np.random.randint(0, n_classes, (n_samples,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(n_samples) bench_scikit_tree_regressor(X, Y) xx = range(0, n * step, step) pl.figure('scikit-learn tree benchmark results') pl.subplot(211) pl.title('Learning with varying number of samples') pl.plot(xx, scikit_classifier_results, 'g-', label='classification') pl.plot(xx, scikit_regressor_results, 'r-', label='regression') pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') scikit_classifier_results = [] scikit_regressor_results = [] n = 10 step = 500 start_dim = 500 n_classes = 10 dim = start_dim for i in range(0, n): print('============================================') print('Entering iteration %s of %s' % (i, n)) print('============================================') dim += step X = np.random.randn(100, dim) Y = np.random.randint(0, n_classes, (100,)) bench_scikit_tree_classifier(X, Y) Y = np.random.randn(100) bench_scikit_tree_regressor(X, Y) xx = np.arange(start_dim, start_dim + n * step, step) pl.subplot(212) pl.title('Learning in high dimensional spaces') pl.plot(xx, scikit_classifier_results, 'g-', label='classification') pl.plot(xx, scikit_regressor_results, 'r-', label='regression') pl.legend(loc='upper left') pl.xlabel('number of dimensions') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
vikingMei/mxnet
example/ssd/detect/detector.py
30
7112
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from __future__ import print_function import mxnet as mx import numpy as np from timeit import default_timer as timer from dataset.testdb import TestDB from dataset.iterator import DetIter class Detector(object): """ SSD detector which hold a detection network and wraps detection API Parameters: ---------- symbol : mx.Symbol detection network Symbol model_prefix : str name prefix of trained model epoch : int load epoch of trained model data_shape : int input data resize shape mean_pixels : tuple of float (mean_r, mean_g, mean_b) batch_size : int run detection with batch size ctx : mx.ctx device to use, if None, use mx.cpu() as default context """ def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \ batch_size=1, ctx=None): self.ctx = ctx if self.ctx is None: self.ctx = mx.cpu() load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch) if symbol is None: symbol = load_symbol self.mod = mx.mod.Module(symbol, label_names=None, context=ctx) if not isinstance(data_shape, tuple): data_shape = (data_shape, data_shape) self.data_shape = data_shape self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape[0], data_shape[1]))]) self.mod.set_params(args, auxs) self.mean_pixels = mean_pixels def detect(self, det_iter, show_timer=False): """ detect all images in iterator Parameters: ---------- det_iter : DetIter iterator for all testing images show_timer : Boolean whether to print out detection exec time Returns: ---------- list of detection results """ num_images = det_iter._size if not isinstance(det_iter, mx.io.PrefetchingIter): det_iter = mx.io.PrefetchingIter(det_iter) start = timer() detections = self.mod.predict(det_iter).asnumpy() time_elapsed = timer() - start if show_timer: print("Detection time for {} images: {:.4f} sec".format( num_images, time_elapsed)) result = [] for i in range(detections.shape[0]): det = detections[i, :, :] res = det[np.where(det[:, 0] >= 0)[0]] result.append(res) return result def im_detect(self, im_list, root_dir=None, extension=None, show_timer=False): """ wrapper for detecting multiple images Parameters: ---------- im_list : list of str image path or list of image paths root_dir : str directory of input images, optional if image path already has full directory information extension : str image extension, eg. ".jpg", optional Returns: ---------- list of detection results in format [det0, det1...], det is in format np.array([id, score, xmin, ymin, xmax, ymax]...) """ test_db = TestDB(im_list, root_dir=root_dir, extension=extension) test_iter = DetIter(test_db, 1, self.data_shape, self.mean_pixels, is_train=False) return self.detect(test_iter, show_timer) def visualize_detection(self, img, dets, classes=[], thresh=0.6): """ visualize detections in one image Parameters: ---------- img : numpy.array image, in bgr format dets : numpy.array ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) each row is one object classes : tuple or list of str class names thresh : float score threshold """ import matplotlib.pyplot as plt import random plt.imshow(img) height = img.shape[0] width = img.shape[1] colors = dict() for i in range(dets.shape[0]): cls_id = int(dets[i, 0]) if cls_id >= 0: score = dets[i, 1] if score > thresh: if cls_id not in colors: colors[cls_id] = (random.random(), random.random(), random.random()) xmin = int(dets[i, 2] * width) ymin = int(dets[i, 3] * height) xmax = int(dets[i, 4] * width) ymax = int(dets[i, 5] * height) rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor=colors[cls_id], linewidth=3.5) plt.gca().add_patch(rect) class_name = str(cls_id) if classes and len(classes) > cls_id: class_name = classes[cls_id] plt.gca().text(xmin, ymin - 2, '{:s} {:.3f}'.format(class_name, score), bbox=dict(facecolor=colors[cls_id], alpha=0.5), fontsize=12, color='white') plt.show() def detect_and_visualize(self, im_list, root_dir=None, extension=None, classes=[], thresh=0.6, show_timer=False): """ wrapper for im_detect and visualize_detection Parameters: ---------- im_list : list of str or str image path or list of image paths root_dir : str or None directory of input images, optional if image path already has full directory information extension : str or None image extension, eg. ".jpg", optional Returns: ---------- """ import cv2 dets = self.im_detect(im_list, root_dir, extension, show_timer=show_timer) if not isinstance(im_list, list): im_list = [im_list] assert len(dets) == len(im_list) for k, det in enumerate(dets): img = cv2.imread(im_list[k]) img[:, :, (0, 1, 2)] = img[:, :, (2, 1, 0)] self.visualize_detection(img, det, classes, thresh)
apache-2.0
alistairlow/tensorflow
tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py
116
5164
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests `FeedingQueueRunner` using arrays and `DataFrames`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.client import session from tensorflow.python.estimator.inputs.queues import feeding_functions as ff from tensorflow.python.framework import ops from tensorflow.python.platform import test from tensorflow.python.training import coordinator from tensorflow.python.training import queue_runner_impl try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False def get_rows(array, row_indices): rows = [array[i] for i in row_indices] return np.vstack(rows) class FeedingQueueRunnerTestCase(test.TestCase): """Tests for `FeedingQueueRunner`.""" def testArrayFeeding(self): with ops.Graph().as_default(): array = np.arange(32).reshape([16, 2]) q = ff._enqueue_data(array, capacity=100) batch_size = 3 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for i in range(100): indices = [ j % array.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_dq = get_rows(array, indices) dq = sess.run(dq_op) np.testing.assert_array_equal(indices, dq[0]) np.testing.assert_array_equal(expected_dq, dq[1]) coord.request_stop() coord.join(threads) def testArrayFeedingMultiThread(self): with ops.Graph().as_default(): array = np.arange(256).reshape([128, 2]) q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True) batch_size = 3 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for _ in range(100): dq = sess.run(dq_op) indices = dq[0] expected_dq = get_rows(array, indices) np.testing.assert_array_equal(expected_dq, dq[1]) coord.request_stop() coord.join(threads) def testPandasFeeding(self): if not HAS_PANDAS: return with ops.Graph().as_default(): array1 = np.arange(32) array2 = np.arange(32, 64) df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96)) q = ff._enqueue_data(df, capacity=100) batch_size = 5 dq_op = q.dequeue_many(5) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for i in range(100): indices = [ j % array1.shape[0] for j in range(batch_size * i, batch_size * (i + 1)) ] expected_df_indices = df.index[indices] expected_rows = df.iloc[indices] dq = sess.run(dq_op) np.testing.assert_array_equal(expected_df_indices, dq[0]) for col_num, col in enumerate(df.columns): np.testing.assert_array_equal(expected_rows[col].values, dq[col_num + 1]) coord.request_stop() coord.join(threads) def testPandasFeedingMultiThread(self): if not HAS_PANDAS: return with ops.Graph().as_default(): array1 = np.arange(128, 256) array2 = 2 * array1 df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128)) q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True) batch_size = 5 dq_op = q.dequeue_many(batch_size) with session.Session() as sess: coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord) for _ in range(100): dq = sess.run(dq_op) indices = dq[0] expected_rows = df.iloc[indices] for col_num, col in enumerate(df.columns): np.testing.assert_array_equal(expected_rows[col].values, dq[col_num + 1]) coord.request_stop() coord.join(threads) if __name__ == "__main__": test.main()
apache-2.0
idlead/scikit-learn
examples/bicluster/plot_spectral_biclustering.py
403
2011
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the Spectral Biclustering algorithm. The data is generated with the ``make_checkerboard`` function, then shuffled and passed to the Spectral Biclustering algorithm. The rows and columns of the shuffled matrix are rearranged to show the biclusters found by the algorithm. The outer product of the row and column label vectors shows a representation of the checkerboard structure. """ print(__doc__) # Author: Kemal Eren <kemal@kemaleren.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard from sklearn.datasets import samples_generator as sg from sklearn.cluster.bicluster import SpectralBiclustering from sklearn.metrics import consensus_score n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") data, row_idx, col_idx = sg._shuffle(data, random_state=0) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralBiclustering(n_clusters=n_clusters, method='log', random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.1f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.matshow(np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues) plt.title("Checkerboard structure of rearranged data") plt.show()
bsd-3-clause
asimshankar/tensorflow
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
1
12795
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic word2vec example.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import os import sys import argparse import random from tempfile import gettempdir import zipfile import numpy as np from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector # Give a folder path as an argument with '--log_dir' to save # TensorBoard summaries. Default is a log folder in current directory. current_path = os.path.dirname(os.path.realpath(sys.argv[0])) parser = argparse.ArgumentParser() parser.add_argument( '--log_dir', type=str, default=os.path.join(current_path, 'log'), help='The log directory for TensorBoard summaries.') FLAGS, unparsed = parser.parse_known_args() # Create the directory for TensorBoard variables if there is not. if not os.path.exists(FLAGS.log_dir): os.makedirs(FLAGS.log_dir) # Step 1: Download the data. url = 'http://mattmahoney.net/dc/' # pylint: disable=redefined-outer-name def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" local_filename = os.path.join(gettempdir(), filename) if not os.path.exists(local_filename): local_filename, _ = urllib.request.urlretrieve(url + filename, local_filename) statinfo = os.stat(local_filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception('Failed to verify ' + local_filename + '. Can you get to it with a browser?') return local_filename filename = maybe_download('text8.zip', 31344016) # Read the data into a list of strings. def read_data(filename): """Extract the first file enclosed in a zip file as a list of words.""" with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data vocabulary = read_data(filename) print('Data size', len(vocabulary)) # Step 2: Build the dictionary and replace rare words with UNK token. vocabulary_size = 50000 def build_dataset(words, n_words): """Process raw inputs into a dataset.""" count = [('UNK', -1)] count.extend(collections.Counter(words).most_common(n_words - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary # Filling 4 global variables: # data - list of codes (integers from 0 to vocabulary_size-1). # This is the original text but words are replaced by their codes # count - map of words(strings) to count of occurrences # dictionary - map of words(strings) to their codes(integers) # reverse_dictionary - maps codes(integers) to words(strings) data, count, dictionary, reverse_dictionary = build_dataset( vocabulary, vocabulary_size) del vocabulary # Hint to reduce memory. print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) data_index = 0 # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin if data_index + span > len(data): data_index = 0 buffer.extend(data[data_index:data_index + span]) data_index += span for i in range(batch_size // num_skips): context_words = [w for w in range(span) if w != skip_window] words_to_use = random.sample(context_words, num_skips) for j, context_word in enumerate(words_to_use): batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[context_word] if data_index == len(data): buffer.extend(data[0:span]) data_index = span else: buffer.append(data[data_index]) data_index += 1 # Backtrack a little bit to avoid skipping words in the end of a batch data_index = (data_index + len(data) - span) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) # Step 4: Build and train a skip-gram model. batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. num_sampled = 64 # Number of negative examples to sample. # We pick a random validation set to sample nearest neighbors. Here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. These 3 variables are used only for # displaying model accuracy, they don't affect calculation. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) graph = tf.Graph() with graph.as_default(): # Input data. with tf.name_scope('inputs'): train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. with tf.name_scope('embeddings'): embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss with tf.name_scope('weights'): nce_weights = tf.Variable( tf.truncated_normal( [vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) with tf.name_scope('biases'): nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. # Explanation of the meaning of NCE loss: # http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/ with tf.name_scope('loss'): loss = tf.reduce_mean( tf.nn.nce_loss( weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) # Add the loss value as a scalar to summary. tf.summary.scalar('loss', loss) # Construct the SGD optimizer using a learning rate of 1.0. with tf.name_scope('optimizer'): optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Merge all summaries. merged = tf.summary.merge_all() # Add variable initializer. init = tf.global_variables_initializer() # Create a saver. saver = tf.train.Saver() # Step 5: Begin training. num_steps = 100001 with tf.Session(graph=graph) as session: # Open a writer to write summaries. writer = tf.summary.FileWriter(FLAGS.log_dir, session.graph) # We must initialize all variables before we use them. init.run() print('Initialized') average_loss = 0 for step in xrange(num_steps): batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # Define metadata variable. run_metadata = tf.RunMetadata() # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() # Also, evaluate the merged op to get all summaries from the returned "summary" variable. # Feed metadata variable to session for visualizing the graph in TensorBoard. _, summary, loss_val = session.run( [optimizer, merged, loss], feed_dict=feed_dict, run_metadata=run_metadata) average_loss += loss_val # Add returned summaries to writer in each step. writer.add_summary(summary, step) # Add metadata to visualize the graph for the last run. if step == (num_steps - 1): writer.add_run_metadata(run_metadata, 'step%d' % step) if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print('Average loss at step ', step, ': ', average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in xrange(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = 'Nearest to %s:' % valid_word for k in xrange(top_k): close_word = reverse_dictionary[nearest[k]] log_str = '%s %s,' % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() # Write corresponding labels for the embeddings. with open(FLAGS.log_dir + '/metadata.tsv', 'w') as f: for i in xrange(vocabulary_size): f.write(reverse_dictionary[i] + '\n') # Save the model for checkpoints. saver.save(session, os.path.join(FLAGS.log_dir, 'model.ckpt')) # Create a configuration for visualizing embeddings with the labels in TensorBoard. config = projector.ProjectorConfig() embedding_conf = config.embeddings.add() embedding_conf.tensor_name = embeddings.name embedding_conf.metadata_path = os.path.join(FLAGS.log_dir, 'metadata.tsv') projector.visualize_embeddings(writer, config) writer.close() # Step 6: Visualize the embeddings. # pylint: disable=missing-docstring # Function to draw visualization of distance between embeddings. def plot_with_labels(low_dim_embs, labels, filename): assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings' plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate( label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) try: # pylint: disable=g-import-not-at-top from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE( perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact') plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png')) except ImportError as ex: print('Please install sklearn, matplotlib, and scipy to show embeddings.') print(ex)
apache-2.0
droundy/deft
papers/histogram/figs/animate-dos.py
1
5310
#!/usr/bin/python2 import matplotlib, sys if 'show' not in sys.argv: matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy, time, os, glob, colors, argparse matplotlib.rc('text', usetex=True) import readnew parser = argparse.ArgumentParser(description='Animate the entropy') parser.add_argument('subdir', metavar='s000', type=str, help='the "seed" directory, typically s000') parser.add_argument('periodic_whatever', metavar='periodic-w1.30-ff...', type=str, help='the name of the job') parser.add_argument('methods', metavar='METHOD', type=str, nargs='+', help='the methods to animate') parser.add_argument('--all-frames', action='store_true', help="plot every frame!") args = parser.parse_args() print(args) subdirname = args.subdir filename = args.periodic_whatever suffixes = args.methods print((sys.argv)) if subdirname[:len('data/')] == 'data/': subdirname = subdirname[len('data/'):] print(('cutting redundant "data/" from first argument:', subdirname)) moviedir = 'figs/movies/%s/%s-dos' % (subdirname, filename) os.system('rm -rf ' + moviedir) assert not os.system('mkdir -p ' + moviedir) fig, ax = plt.subplots() mine = 1e100 maxe = -1e100 numframes = 0 dataformat = 'data/%s/%s-%%s-movie/%%06d' % (subdirname, filename) lastframe = -1 alldone = False for frame in range(1, 100000): if alldone: break for suffix in suffixes: basename = dataformat % (suffix, frame) try: e, lndos = readnew.e_lndos(basename) except (KeyboardInterrupt, SystemExit): raise except: alldone = True break numframes = frame+1 if len(e[lndos != lndos[-1]]) > 1: mine = min(mine, e[lndos != lndos[-1]].min() - 5) if len(e[lndos != lndos[0]]): maxe = max(maxe, e[lndos != lndos[0]].max()+5) if numframes % 25 == 0 and frame != lastframe: print(('counting %dth frame' % numframes)) lastframe = frame bestframe = sorted(glob.glob('data/%s/%s-%s-movie/*-lndos.dat' % (subdirname, filename, suffixes[0])))[-1] best_e, best_lndos = readnew.e_lndos(bestframe) print(('best data is', bestframe)) maxlndos = best_lndos.max() minlndos = best_lndos.min() print(('counted %d frames' % numframes)) print(('mine', mine)) print(('maxe', maxe)) print(('minlndos', minlndos)) print(('maxlndos', maxlndos)) skipby = 1 maxframes = 200 if numframes > maxframes and not args.all_frames: skipby = numframes // maxframes numframes = numframes // skipby print(('only showing 1/%d of the frames' % skipby)) print(('numframes', numframes)) for frame in range(1, numframes+1): if frame % 25 == 0: print(('working on frame %d/%d' % (frame, numframes))) plt.cla() ax.plot(best_e, best_lndos, ':', color='0.5') for suffix_index in range(len(suffixes)): suffix = suffixes[suffix_index] basename = dataformat % (suffix, frame*skipby) try: e, lndos, ps, lndostm = readnew.e_lndos_ps_lndostm(basename) colors.plot(e, lndos, method=suffix) #if lndostm is not None and suffix[:2] != 'sa': # colors.plot(e, lndostm, method=suffix+'-tm') datname = basename+'-lndos.dat' min_T = readnew.minT(datname) too_lo, too_hi = readnew.too_low_high_energy(datname) ax.axvline(-readnew.max_entropy_state(datname), color='r', linestyle=':') min_important_energy = int(readnew.min_important_energy(datname)) ax.axvline(-min_important_energy, color='b', linestyle=':') if too_lo is not None and suffix[:3] == 'sad': ax.axvline(-too_lo, color='b', linestyle='--') if too_lo is not None and suffix[:3] == 'sad': ax.axvline(-too_hi, color='r', linestyle='--') # Uncomment the following to plot a line at the # min_important_energy with slope determined by min_T # ax.plot(e, (e+min_important_energy)/min_T + lndos[min_important_energy], colors[suffix_index]+'--') # ax.axvline(-readnew.converged_state(datname), color=colors.color(suffix), linestyle=':') # Uncomment the following to plot the lnw along with the lndos # e, lnw = readnew.e_lnw(basename) # ax.plot(e, -lnw, colors[suffix_index]+':') except (KeyboardInterrupt, SystemExit): raise except Exception as e: print(e) pass ax.set_xlabel(r'$E$') ax.set_ylim(1.1*minlndos, maxlndos+5) # ax.set_xlim(-5, -0.3) ax.set_xlim(mine, maxe) ax.set_ylabel(r'$\ln DOS$') # ax.legend(loc='best').get_frame().set_alpha(0.25) if too_lo is not None: plt.title(r'lv movie from %s ($T_{\min} = %g$, $E_{lo} = %g$)' % (filename, min_T, too_lo)) else: plt.title(r'lv movie from %s ($T_{\min} = %g$)' % (filename, min_T)) colors.legend(loc='lower right') fname = '%s/frame%06d.png' % (moviedir, frame) plt.savefig(fname) duration = 10.0 # seconds avconv = "avconv -y -r %g -i %s/frame%%06d.png -b 1000k %s/movie.mp4" % (numframes/duration, moviedir, moviedir) os.system(avconv) # make the movie print(avconv)
gpl-2.0
rlong011/trading-with-python
lib/extra.py
77
2540
''' Created on Apr 28, 2013 Copyright: Jev Kuznetsov License: BSD ''' from __future__ import print_function import sys import urllib import os import xlrd # module for excel file reading import pandas as pd class ProgressBar: def __init__(self, iterations): self.iterations = iterations self.prog_bar = '[]' self.fill_char = '*' self.width = 50 self.__update_amount(0) def animate(self, iteration): print('\r', self, end='') sys.stdout.flush() self.update_iteration(iteration + 1) def update_iteration(self, elapsed_iter): self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0) self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations) def __update_amount(self, new_amount): percent_done = int(round((new_amount / 100.0) * 100.0)) all_full = self.width - 2 num_hashes = int(round((percent_done / 100.0) * all_full)) self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']' pct_place = (len(self.prog_bar) // 2) - len(str(percent_done)) pct_string = '%d%%' % percent_done self.prog_bar = self.prog_bar[0:pct_place] + \ (pct_string + self.prog_bar[pct_place + len(pct_string):]) def __str__(self): return str(self.prog_bar) def getSpyHoldings(dataDir): ''' get SPY holdings from the net, uses temp data storage to save xls file ''' dest = os.path.join(dataDir,"spy_holdings.xls") if os.path.exists(dest): print('File found, skipping download') else: print('saving to', dest) urllib.urlretrieve ("https://www.spdrs.com/site-content/xls/SPY_All_Holdings.xls?fund=SPY&docname=All+Holdings&onyx_code1=1286&onyx_code2=1700", dest) # download xls file and save it to data directory # parse wb = xlrd.open_workbook(dest) # open xls file, create a workbook sh = wb.sheet_by_index(0) # select first sheet data = {'name':[], 'symbol':[], 'weight':[],'sector':[]} for rowNr in range(5,505): # cycle through the rows v = sh.row_values(rowNr) # get all row values data['name'].append(v[0]) data['symbol'].append(v[1]) # symbol is in the second column, append it to the list data['weight'].append(float(v[2])) data['sector'].append(v[3]) return pd.DataFrame(data)
bsd-3-clause
miskopo/Logik
benchmark/run_benchmark.py
1
2167
import pandas as pd from controller.controller_unit import Controller from click import progressbar class Benchmark: def __init__(self, arg_parser): self.results = pd.DataFrame(columns=['Solver name', 'Wins', 'Attempts']) self.args = arg_parser def __call__(self, *args, **kwargs): self.generate_data() self.process_data() self.save_data_output() def generate_data(self): # TODO: Separate tests into four lengths and plot it as subplots or whatever, I don't care for solver in Controller.solvers: with progressbar( range(self.args.number_of_tests), show_percent=True, show_eta=True, label="Running benchmark for {}:\t".format(Controller.solvers[solver].__name__)) as bar: for i in bar: game = Controller( solver, self.args, number_of_colors=self.args.number_of_colors, attempts=self.args.attempts, pattern_size=self.args.pattern_size) result = game() self.results = self.results.append( pd.DataFrame([[Controller.solvers[solver].__name__, result[0], result[1]]], columns=self.results.columns), ignore_index=True) def process_data(self): self.results = self.results.loc[self.results['Wins']] del self.results['Wins'] self.results['Attempts'] = pd.to_numeric(self.results['Attempts']) self.results = self.results.groupby('Solver name').mean() def save_data_output(self): # print(self.results) plot = self.results.plot(kind='bar', title="Average of attempts needed to solve game", grid=True, rot=0, legend=False) fig = plot.get_figure() fig.savefig('view/graph.png')
gpl-3.0
rupakc/Kaggle-Compendium
Las Vegas Trip Advisor Review/trip_baseline.py
1
2617
import pandas as pd from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn import metrics import numpy as np from sklearn.preprocessing import LabelEncoder def get_ensemble_models(): rf = RandomForestClassifier(n_estimators=51,max_depth=5,min_samples_split=3,random_state=42) grad = GradientBoostingClassifier(random_state=42) ada = AdaBoostClassifier(random_state=42) bag = BaggingClassifier(n_estimators=51,random_state=42) extra = ExtraTreesClassifier(n_estimators=51,random_state=42,max_depth=5) classifier_list = [rf,grad,ada,bag,extra] classifier_name_list = ['Random Forests','Gradient Boosting','AdaBoost','Bagging','Extra Trees'] return classifier_list,classifier_name_list def evaluate_models(trained_model,trained_model_name,X_test,y_test): predicted_values = trained_model.predict(X_test) print ('---------- For Model Name : ', trained_model_name, ' ---------\n') print (metrics.classification_report(y_test,predicted_values)) print (metrics.accuracy_score(y_test,predicted_values)) print (metrics.matthews_corrcoef(y_test,predicted_values)) print ('-------------------------------------\n') def label_encode_frame(dataframe): columns = dataframe.columns encoder = LabelEncoder() for column in columns: if type(dataframe[column][0]) is np.nan: for i in range(len(dataframe)): if i > 1000: break if type(dataframe[column][i]) is str: dataframe[column] = encoder.fit_transform(dataframe[column].values) break elif type(dataframe[column][0]) is str: dataframe[column] = encoder.fit_transform(dataframe[column].values) return dataframe train_frame = pd.read_csv('train.csv', sep=';') del train_frame['Nr. rooms'] encoded_frame = label_encode_frame(train_frame) target_class_labels = list(encoded_frame['Score'].values) del train_frame['Score'] feature_values = encoded_frame.values X_train,X_test,y_train,y_test = train_test_split(feature_values,target_class_labels,test_size=0.1,random_state=42) classifier_list,classifier_name_list = get_ensemble_models() for classifier,classifier_name in zip(classifier_list,classifier_name_list): classifier.fit(X_train,y_train) evaluate_models(classifier,classifier_name,X_test,y_test)
mit
cmoutard/mne-python
mne/stats/tests/test_cluster_level.py
3
20442
import os import numpy as np from numpy.testing import (assert_equal, assert_array_equal, assert_array_almost_equal) from nose.tools import assert_true, assert_raises from scipy import sparse, linalg, stats from mne.fixes import partial import warnings from mne.parallel import _force_serial from mne.stats.cluster_level import (permutation_cluster_test, permutation_cluster_1samp_test, spatio_temporal_cluster_test, spatio_temporal_cluster_1samp_test, ttest_1samp_no_p, summarize_clusters_stc) from mne.utils import run_tests_if_main, slow_test, _TempDir, catch_logging warnings.simplefilter('always') # enable b/c these tests throw warnings n_space = 50 def _get_conditions(): noise_level = 20 n_time_1 = 20 n_time_2 = 13 normfactor = np.hanning(20).sum() rng = np.random.RandomState(42) condition1_1d = rng.randn(n_time_1, n_space) * noise_level for c in condition1_1d: c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor condition2_1d = rng.randn(n_time_2, n_space) * noise_level for c in condition2_1d: c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor pseudoekp = 10 * np.hanning(25)[None, :] condition1_1d[:, 25:] += pseudoekp condition2_1d[:, 25:] -= pseudoekp condition1_2d = condition1_1d[:, :, np.newaxis] condition2_2d = condition2_1d[:, :, np.newaxis] return condition1_1d, condition2_1d, condition1_2d, condition2_2d def test_cache_dir(): """Test use of cache dir """ tempdir = _TempDir() orig_dir = os.getenv('MNE_CACHE_DIR', None) orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None) rng = np.random.RandomState(0) X = rng.randn(9, 2, 10) try: os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K' os.environ['MNE_CACHE_DIR'] = tempdir # Fix error for #1507: in-place when memmapping with catch_logging() as log_file: permutation_cluster_1samp_test( X, buffer_size=None, n_jobs=2, n_permutations=1, seed=0, stat_fun=ttest_1samp_no_p, verbose=False) # ensure that non-independence yields warning stat_fun = partial(ttest_1samp_no_p, sigma=1e-3) assert_true('independently' not in log_file.getvalue()) permutation_cluster_1samp_test( X, buffer_size=10, n_jobs=2, n_permutations=1, seed=0, stat_fun=stat_fun, verbose=False) assert_true('independently' in log_file.getvalue()) finally: if orig_dir is not None: os.environ['MNE_CACHE_DIR'] = orig_dir else: del os.environ['MNE_CACHE_DIR'] if orig_size is not None: os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size else: del os.environ['MNE_MEMMAP_MIN_SIZE'] def test_permutation_step_down_p(): """Test cluster level permutations with step_down_p """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph # noqa except ImportError: return rng = np.random.RandomState(0) # subjects, time points, spatial points X = rng.randn(9, 2, 10) # add some significant points X[:, 0:2, 0:2] += 2 # span two time points and two spatial points X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude thresh = 2 # make sure it works when we use ALL points in step-down t, clusters, p, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=1.0) # make sure using step-down will actually yield improvements sometimes t, clusters, p_old, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.0) assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster t, clusters, p_new, H0 = \ permutation_cluster_1samp_test(X, threshold=thresh, step_down_p=0.05) assert_equal(np.sum(p_new < 0.05), 2) # time one rescued assert_true(np.all(p_old >= p_new)) def test_cluster_permutation_test(): """Test cluster level permutations tests """ condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() for condition1, condition2 in zip((condition1_1d, condition1_2d), (condition2_1d, condition2_2d)): T_obs, clusters, cluster_p_values, hist = permutation_cluster_test( [condition1, condition2], n_permutations=100, tail=1, seed=1, buffer_size=None) assert_equal(np.sum(cluster_p_values < 0.05), 1) T_obs, clusters, cluster_p_values, hist = permutation_cluster_test( [condition1, condition2], n_permutations=100, tail=0, seed=1, buffer_size=None) assert_equal(np.sum(cluster_p_values < 0.05), 1) # test with 2 jobs and buffer_size enabled buffer_size = condition1.shape[1] // 10 T_obs, clusters, cluster_p_values_buff, hist =\ permutation_cluster_test([condition1, condition2], n_permutations=100, tail=0, seed=1, n_jobs=2, buffer_size=buffer_size) assert_array_equal(cluster_p_values, cluster_p_values_buff) @slow_test def test_cluster_permutation_t_test(): """Test cluster level permutations T-test """ condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() # use a very large sigma to make sure Ts are not independent stat_funs = [ttest_1samp_no_p, partial(ttest_1samp_no_p, sigma=1e-1)] for stat_fun in stat_funs: for condition1 in (condition1_1d, condition1_2d): # these are so significant we can get away with fewer perms T_obs, clusters, cluster_p_values, hist =\ permutation_cluster_1samp_test(condition1, n_permutations=100, tail=0, seed=1, buffer_size=None) assert_equal(np.sum(cluster_p_values < 0.05), 1) T_obs_pos, c_1, cluster_p_values_pos, _ =\ permutation_cluster_1samp_test(condition1, n_permutations=100, tail=1, threshold=1.67, seed=1, stat_fun=stat_fun, buffer_size=None) T_obs_neg, _, cluster_p_values_neg, _ =\ permutation_cluster_1samp_test(-condition1, n_permutations=100, tail=-1, threshold=-1.67, seed=1, stat_fun=stat_fun, buffer_size=None) assert_array_equal(T_obs_pos, -T_obs_neg) assert_array_equal(cluster_p_values_pos < 0.05, cluster_p_values_neg < 0.05) # test with 2 jobs and buffer_size enabled buffer_size = condition1.shape[1] // 10 T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \ permutation_cluster_1samp_test(-condition1, n_permutations=100, tail=-1, threshold=-1.67, seed=1, n_jobs=2, stat_fun=stat_fun, buffer_size=buffer_size) assert_array_equal(T_obs_neg, T_obs_neg_buff) assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff) def test_cluster_permutation_with_connectivity(): """Test cluster level permutations with connectivity matrix """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() n_pts = condition1_1d.shape[1] # we don't care about p-values in any of these, so do fewer permutations args = dict(seed=None, max_step=1, exclude=None, step_down_p=0, t_power=1, threshold=1.67, check_disjoint=False, n_permutations=50) did_warn = False for X1d, X2d, func, spatio_temporal_func in \ [(condition1_1d, condition1_2d, permutation_cluster_1samp_test, spatio_temporal_cluster_1samp_test), ([condition1_1d, condition2_1d], [condition1_2d, condition2_2d], permutation_cluster_test, spatio_temporal_cluster_test)]: out = func(X1d, **args) connectivity = grid_to_graph(1, n_pts) out_connectivity = func(X1d, connectivity=connectivity, **args) assert_array_equal(out[0], out_connectivity[0]) for a, b in zip(out_connectivity[1], out[1]): assert_array_equal(out[0][a], out[0][b]) assert_true(np.all(a[b])) # test spatio-temporal w/o time connectivity (repeat spatial pattern) connectivity_2 = sparse.coo_matrix( linalg.block_diag(connectivity.asfptype().todense(), connectivity.asfptype().todense())) if isinstance(X1d, list): X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d] else: X1d_2 = np.concatenate((X1d, X1d), axis=1) out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args) # make sure we were operating on the same values split = len(out[0]) assert_array_equal(out[0], out_connectivity_2[0][:split]) assert_array_equal(out[0], out_connectivity_2[0][split:]) # make sure we really got 2x the number of original clusters n_clust_orig = len(out[1]) assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig) # Make sure that we got the old ones back data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]]) data_2 = set([np.sum(out_connectivity_2[0][a]) for a in out_connectivity_2[1][:]]) assert_true(len(data_1.intersection(data_2)) == len(data_1)) # now use the other algorithm if isinstance(X1d, list): X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2] else: X1d_3 = np.reshape(X1d_2, (-1, 2, n_space)) out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=0, threshold=1.67, check_disjoint=True) # make sure we were operating on the same values split = len(out[0]) assert_array_equal(out[0], out_connectivity_3[0][0]) assert_array_equal(out[0], out_connectivity_3[0][1]) # make sure we really got 2x the number of original clusters assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig) # Make sure that we got the old ones back data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]]) data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in out_connectivity_3[1]]) assert_true(len(data_1.intersection(data_2)) == len(data_1)) # test new versus old method out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=2, threshold=1.67) out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=1, threshold=1.67) # clusters could be in a different order sums_4 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_4[1]] sums_5 = [np.sum(out_connectivity_4[0][a]) for a in out_connectivity_5[1]] sums_4 = np.sort(sums_4) sums_5 = np.sort(sums_5) assert_array_almost_equal(sums_4, sums_5) if not _force_serial: assert_raises(ValueError, spatio_temporal_func, X1d_3, n_permutations=1, connectivity=connectivity, max_step=1, threshold=1.67, n_jobs=-1000) # not enough TFCE params assert_raises(KeyError, spatio_temporal_func, X1d_3, connectivity=connectivity, threshold=dict(me='hello')) # too extreme a start threshold with warnings.catch_warnings(record=True) as w: spatio_temporal_func(X1d_3, connectivity=connectivity, threshold=dict(start=10, step=1)) if not did_warn: assert_true(len(w) == 1) did_warn = True # too extreme a start threshold assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=-1, threshold=dict(start=1, step=-1)) assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=-1, threshold=dict(start=-1, step=1)) # wrong type for threshold assert_raises(TypeError, spatio_temporal_func, X1d_3, connectivity=connectivity, threshold=[]) # wrong value for tail assert_raises(ValueError, spatio_temporal_func, X1d_3, connectivity=connectivity, tail=2) # make sure it actually found a significant point out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50, connectivity=connectivity, max_step=1, threshold=dict(start=1, step=1)) assert_true(np.min(out_connectivity_6[2]) < 0.05) @slow_test def test_permutation_connectivity_equiv(): """Test cluster level permutations with and without connectivity """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return rng = np.random.RandomState(0) # subjects, time points, spatial points n_time = 2 n_space = 4 X = rng.randn(6, n_time, n_space) # add some significant points X[:, :, 0:2] += 10 # span two time points and two spatial points X[:, 1, 3] += 20 # span one time point max_steps = [1, 1, 1, 2] # This will run full algorithm in two ways, then the ST-algorithm in 2 ways # All of these should give the same results conns = [None, grid_to_graph(n_time, n_space), grid_to_graph(1, n_space), grid_to_graph(1, n_space)] stat_map = None thresholds = [2, dict(start=1.5, step=1.0)] sig_counts = [2, 5] sdps = [0, 0.05, 0.05] ots = ['mask', 'mask', 'indices'] stat_fun = partial(ttest_1samp_no_p, sigma=1e-3) for thresh, count in zip(thresholds, sig_counts): cs = None ps = None for max_step, conn in zip(max_steps, conns): for sdp, ot in zip(sdps, ots): t, clusters, p, H0 = \ permutation_cluster_1samp_test( X, threshold=thresh, connectivity=conn, n_jobs=2, max_step=max_step, stat_fun=stat_fun, step_down_p=sdp, out_type=ot) # make sure our output datatype is correct if ot == 'mask': assert_true(isinstance(clusters[0], np.ndarray)) assert_true(clusters[0].dtype == bool) assert_array_equal(clusters[0].shape, X.shape[1:]) else: # ot == 'indices' assert_true(isinstance(clusters[0], tuple)) # make sure all comparisons were done; for TFCE, no perm # should come up empty if count == 8: assert_true(not np.any(H0 == 0)) inds = np.where(p < 0.05)[0] assert_true(len(inds) == count) this_cs = [clusters[ii] for ii in inds] this_ps = p[inds] this_stat_map = np.zeros((n_time, n_space), dtype=bool) for ci, c in enumerate(this_cs): if isinstance(c, tuple): this_c = np.zeros((n_time, n_space), bool) for x, y in zip(c[0], c[1]): this_stat_map[x, y] = True this_c[x, y] = True this_cs[ci] = this_c c = this_c this_stat_map[c] = True if cs is None: ps = this_ps cs = this_cs if stat_map is None: stat_map = this_stat_map assert_array_equal(ps, this_ps) assert_true(len(cs) == len(this_cs)) for c1, c2 in zip(cs, this_cs): assert_array_equal(c1, c2) assert_array_equal(stat_map, this_stat_map) @slow_test def spatio_temporal_cluster_test_connectivity(): """Test spatio-temporal cluster permutations """ try: try: from sklearn.feature_extraction.image import grid_to_graph except ImportError: from scikits.learn.feature_extraction.image import grid_to_graph except ImportError: return condition1_1d, condition2_1d, condition1_2d, condition2_2d = \ _get_conditions() rng = np.random.RandomState(0) noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10) data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1]) noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10) data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1]) conn = grid_to_graph(data1_2d.shape[-1], 1) threshold = dict(start=4.0, step=2) T_obs, clusters, p_values_conn, hist = \ spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn, n_permutations=50, tail=1, seed=1, threshold=threshold, buffer_size=None) buffer_size = data1_2d.size // 10 T_obs, clusters, p_values_no_conn, hist = \ spatio_temporal_cluster_test([data1_2d, data2_2d], n_permutations=50, tail=1, seed=1, threshold=threshold, n_jobs=2, buffer_size=buffer_size) assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05)) # make sure results are the same without buffer_size T_obs, clusters, p_values2, hist2 = \ spatio_temporal_cluster_test([data1_2d, data2_2d], n_permutations=50, tail=1, seed=1, threshold=threshold, n_jobs=2, buffer_size=None) assert_array_equal(p_values_no_conn, p_values2) def ttest_1samp(X): """Returns T-values """ return stats.ttest_1samp(X, 0)[0] def test_summarize_clusters(): """Test cluster summary stcs """ clu = (np.random.random([1, 20484]), [(np.array([0]), np.array([0, 2, 4]))], np.array([0.02, 0.1]), np.array([12, -14, 30])) stc_sum = summarize_clusters_stc(clu) assert_true(stc_sum.data.shape[1] == 2) clu[2][0] = 0.3 assert_raises(RuntimeError, summarize_clusters_stc, clu) run_tests_if_main()
bsd-3-clause
lcpt/xc
verif/tests/materials/prestressing/test_layout2D_frictLoss_prestress_tendon.py
1
3559
# -*- coding: utf-8 -*- '''Home made test to check the accuracy of the 2D spline interpolation to be used in the layout of prestressing tendons and to check the calculation of prestress losss due to firction. ''' __author__= "Ana Ortega (AO_O) " __copyright__= "Copyright 2016, AO_O" __license__= "GPL" __version__= "3.0" __email__= "ana.ortega@xcengineering.xyz" import numpy as np import matplotlib.pyplot as plt from scipy import interpolate from scipy.spatial import distance import math from materials.prestressing import prestressed_concrete as presconc #Geometry lBeam=20 #beam span [m] #Parabola eEnds=0 #eccentricity of cables at both ends of the beam eMidspan=-0.3 #eccentricity of cables at midspan # Material properties #Prestressing steel sigmap=1600e6 #breaking strength [Pa] sigmapk=1400e6 #characteristic strength [Pa] #Prestressing process mu=0.18 #coefficient of friction between the cables and their sheating k=0.0015 #wobble coefficient per meter length of cable sigmap0max=1400e6 #Initial stress of cable [Pa] # Interpolation n_points_rough=5 #number of points provided to the interpolation algorithm n_points_fine=101 #number of points interpolated # Approximation of the loss of prestressing due to friction # Assimilating the parabolic profile of the cable to a circular profile # the angular deviation is constant in the beam length and can be # expressed as: alphaUnit=8*abs(eMidspan)/lBeam**2 #alpha/x [rad/m] #Exact parabola from model.geometry import geom_utils a,b,c=geom_utils.fit_parabola(x=np.array([0,lBeam/2.0,lBeam]), y=np.array([eEnds,eMidspan,eEnds])) x_parab_rough,y_parab_rough,z_parab_rough=geom_utils.eq_points_parabola(0,lBeam,n_points_rough,a,b,c,0) x_parab_fine,y_parab_fine,z_parab_fine=geom_utils.eq_points_parabola(0,lBeam,n_points_fine,a,b,c,0) aprox_cum_angle=alphaUnit*x_parab_fine aprox_length_sequence=[0]+[distance.euclidean((x_parab_fine[i],y_parab_fine[i],z_parab_fine[i]),(x_parab_fine[i+1],y_parab_fine[i+1],z_parab_fine[i+1])) for i in range(len(x_parab_fine)-1)] aprox_cumulative_length=np.cumsum(aprox_length_sequence) aprox_cum_loss=np.array([sigmap0max*(1-math.exp(-mu*aprox_cum_angle[i]-k*aprox_cumulative_length[i])) for i in range(len(aprox_cum_angle))]) #Tendon definition, layout and friction losses tendon=presconc.PrestressTendon([]) tendon.roughCoordMtr=np.array([x_parab_rough,y_parab_rough,z_parab_rough]) #Interpolated 3D spline tendon.pntsInterpTendon(n_points_fine,1) #Cumulative lengths of the sequence of segments cumulative_length=tendon.getCumLength() ratio1= np.mean((cumulative_length-aprox_cumulative_length)**2)/np.mean(cumulative_length) # Cumulative deviation cumulative_angl=tendon.getCumAngle() ratio2= np.mean((cumulative_angl-aprox_cum_angle)**2)/np.mean(cumulative_angl) # Losses of prestressing due to friction lssFrict=tendon.getLossFriction(coefFric=mu,k=k,sigmaP0_extr1=sigmap0max,sigmaP0_extr2=0.0) ratio3= np.mean((lssFrict-aprox_cum_loss)**2)/np.mean(lssFrict) ''' #Plot fig1,ax2d=tendon.plot2D(XaxisValues='X',symbolRougPoints='b*',symbolFinePoints='r*',symbolTendon='g-') fig1.show() fig2,ax2d=tendon.plot2D(XaxisValues='X',symbolRougPoints=None,symbolFinePoints=None,symbolTendon=None,resultsToPlot=[[lssFrict,'m-','Immediate loss due to friction']]) fig2.savefig('fig2.png') ''' import os from miscUtils import LogMessages as lmsg fname= os.path.basename(__file__) if (abs(ratio1)<1e-6) & (abs(ratio2)<1e-8) & (abs(ratio3)<0.113): print "test ",fname,": ok." else: lmsg.error(fname+' ERROR.')
gpl-3.0
ChanderG/scikit-learn
examples/cluster/plot_dbscan.py
346
2479
# -*- coding: utf-8 -*- """ =================================== Demo of DBSCAN clustering algorithm =================================== Finds core samples of high density and expands clusters from them. """ print(__doc__) import numpy as np from sklearn.cluster import DBSCAN from sklearn import metrics from sklearn.datasets.samples_generator import make_blobs from sklearn.preprocessing import StandardScaler ############################################################################## # Generate sample data centers = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4, random_state=0) X = StandardScaler().fit_transform(X) ############################################################################## # Compute DBSCAN db = DBSCAN(eps=0.3, min_samples=10).fit(X) core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) print('Estimated number of clusters: %d' % n_clusters_) print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)) print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)) print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)) print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels)) print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels)) ############################################################################## # Plot result import matplotlib.pyplot as plt # Black removed and is used for noise instead. unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = 'k' class_member_mask = (labels == k) xy = X[class_member_mask & core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14) xy = X[class_member_mask & ~core_samples_mask] plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=6) plt.title('Estimated number of clusters: %d' % n_clusters_) plt.show()
bsd-3-clause
droundy/deft
papers/thesis-vischer/figs/ideal-gas.py
1
1563
from __future__ import division import matplotlib matplotlib.use('Agg') import numpy as np import matplotlib.pyplot as plt matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']}) matplotlib.rc('text', usetex=True) # Constants R=3.034E-10 #m, sphere radius for water M=2.989E-23 #kg, water mass qe = 1.19E-19 hbar = 1.055E-34 # hbar kb = 1.381E-23 # boltzmann N = 60 # spheres V = N*(4/3)*np.pi*R**3/(.000001) #m^3, volume (size required for \eta = .25) with N=60 # Array generation colors = ['1', '.75', 'r', '.5', '.25'] # water is red for detail ms = [9.11E-31, 1.67E-27, M, 80, 1.98E30] # masses to plot mlabels = ['$m_e$', '$m_P$', '$m_{water}$', '$m_{person}$', '$M_{\odot}$'] # pretty latex labels Ts = np.arange(1.0, 300, .1) # K, temperatures, avoiding 0 because no one will notice E = np.zeros_like(Ts) cv = np.zeros_like(Ts) # Calculate free energies for each mass and every temperature for i in range(0, len(ms)): for j in range(0, len(Ts)): Lambda = hbar/np.sqrt(kb*ms[i]*Ts[j]/(2*np.pi)) Z = V/Lambda**3 E[j] = (-N*kb*Ts[j]*np.log(Z) + N*kb*Ts[j]*np.log(N) -N*kb*Ts[j])/qe cv[j] = E[j]/Ts[j] + 1.5*N*kb print("cv is, ", cv) plt.figure('cv') plt.semilogx(Ts, cv/N/kb, color = colors[i], marker = 'o', label = mlabels[i]) plt.figure('cv') plt.title('Ideal gas free energy for $N=60$, $\eta = .25$') plt.ylabel('$E$ (eV)') plt.xlabel('$T$ (K)') #plt.ylim(-800, 100) plt.legend(loc='best') plt.tight_layout(pad=0.2) plt.savefig('ideal-F-vs-T.pdf') plt.show()
gpl-2.0
devanshdalal/scikit-learn
sklearn/linear_model/sag.py
18
11273
"""Solvers for Ridge and LogisticRegression using SAG algorithm""" # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org> # # License: BSD 3 clause import numpy as np import warnings from ..exceptions import ConvergenceWarning from ..utils import check_array from ..utils.extmath import row_norms from .base import make_dataset from .sag_fast import sag def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept): """Compute automatic step size for SAG solver The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is the max sum of squares for over all samples. Parameters ---------- max_squared_sum : float Maximum squared sum of X over samples. alpha_scaled : float Constant that multiplies the regularization term, scaled by 1. / n_samples, the number of samples. loss : string, in {"log", "squared"} The loss function used in SAG solver. fit_intercept : bool Specifies if a constant (a.k.a. bias or intercept) will be added to the decision function. Returns ------- step_size : float Step size used in SAG solver. References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient https://hal.inria.fr/hal-00860051/document """ if loss in ('log', 'multinomial'): # inverse Lipschitz constant for log loss return 4.0 / (max_squared_sum + int(fit_intercept) + 4.0 * alpha_scaled) elif loss == 'squared': # inverse Lipschitz constant for squared loss return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled) else: raise ValueError("Unknown loss function for SAG solver, got %s " "instead of 'log' or 'squared'" % loss) def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., max_iter=1000, tol=0.001, verbose=0, random_state=None, check_input=True, max_squared_sum=None, warm_start_mem=None): """SAG solver for Ridge and LogisticRegression SAG stands for Stochastic Average Gradient: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a constant learning rate. IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the same scale. You can normalize the data by using sklearn.preprocessing.StandardScaler on your data before passing it to the fit method. This implementation works with data represented as dense numpy arrays or sparse scipy arrays of floating point values for the features. It will fit the data according to squared loss or log loss. The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using the squared euclidean norm L2. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values. With loss='multinomial', y must be label encoded (see preprocessing.LabelEncoder). sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). loss : 'log' | 'squared' | 'multinomial' Loss function that will be optimized: -'log' is the binary logistic loss, as used in LogisticRegression. -'squared' is the squared loss, as used in Ridge. -'multinomial' is the multinomial logistic loss, as used in LogisticRegression. .. versionadded:: 0.18 *loss='multinomial'* alpha : float, optional Constant that multiplies the regularization term. Defaults to 1. max_iter : int, optional The max number of passes over the training data if the stopping criteria is not reached. Defaults to 1000. tol : double, optional The stopping criteria for the weights. The iterations will stop when max(change in weights) / max(weights) < tol. Defaults to .001 verbose : integer, optional The verbosity level. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. warm_start_mem : dict, optional The initialization parameters used for warm starting. Warm starting is currently used in LogisticRegression but not in Ridge. It contains: - 'coef': the weight vector, with the intercept in last line if the intercept is fitted. - 'gradient_memory': the scalar gradient for all seen samples. - 'sum_gradient': the sum of gradient over all seen samples, for each feature. - 'intercept_sum_gradient': the sum of gradient over all seen samples, for the intercept. - 'seen': array of boolean describing the seen samples. - 'num_seen': the number of seen samples. Returns ------- coef_ : array, shape (n_features) Weight vector. n_iter_ : int The number of full pass on all samples. warm_start_mem : dict Contains a 'coef' key with the fitted result, and possibly the fitted intercept at the end of the array. Contains also other keys used for warm starting. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> X = np.random.randn(n_samples, n_features) >>> y = np.random.randn(n_samples) >>> clf = linear_model.Ridge(solver='sag') >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='sag', tol=0.001) >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> clf = linear_model.LogisticRegression(solver='sag') >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='sag', tol=0.0001, verbose=0, warm_start=False) References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient https://hal.inria.fr/hal-00860051/document See also -------- Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and LogisticRegression, SGDClassifier, LinearSVC, Perceptron """ if warm_start_mem is None: warm_start_mem = {} # Ridge default max_iter is None if max_iter is None: max_iter = 1000 if check_input: X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C') y = check_array(y, dtype=np.float64, ensure_2d=False, order='C') n_samples, n_features = X.shape[0], X.shape[1] # As in SGD, the alpha is scaled by n_samples. alpha_scaled = float(alpha) / n_samples # if loss == 'multinomial', y should be label encoded. n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1 # initialization if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float64, order='C') if 'coef' in warm_start_mem.keys(): coef_init = warm_start_mem['coef'] else: # assume fit_intercept is False coef_init = np.zeros((n_features, n_classes), dtype=np.float64, order='C') # coef_init contains possibly the intercept_init at the end. # Note that Ridge centers the data before fitting, so fit_intercept=False. fit_intercept = coef_init.shape[0] == (n_features + 1) if fit_intercept: intercept_init = coef_init[-1, :] coef_init = coef_init[:-1, :] else: intercept_init = np.zeros(n_classes, dtype=np.float64) if 'intercept_sum_gradient' in warm_start_mem.keys(): intercept_sum_gradient = warm_start_mem['intercept_sum_gradient'] else: intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64) if 'gradient_memory' in warm_start_mem.keys(): gradient_memory_init = warm_start_mem['gradient_memory'] else: gradient_memory_init = np.zeros((n_samples, n_classes), dtype=np.float64, order='C') if 'sum_gradient' in warm_start_mem.keys(): sum_gradient_init = warm_start_mem['sum_gradient'] else: sum_gradient_init = np.zeros((n_features, n_classes), dtype=np.float64, order='C') if 'seen' in warm_start_mem.keys(): seen_init = warm_start_mem['seen'] else: seen_init = np.zeros(n_samples, dtype=np.int32, order='C') if 'num_seen' in warm_start_mem.keys(): num_seen_init = warm_start_mem['num_seen'] else: num_seen_init = 0 dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state) if max_squared_sum is None: max_squared_sum = row_norms(X, squared=True).max() step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept) if step_size * alpha_scaled == 1: raise ZeroDivisionError("Current sag implementation does not handle " "the case step_size * alpha_scaled == 1") num_seen, n_iter_ = sag(dataset, coef_init, intercept_init, n_samples, n_features, n_classes, tol, max_iter, loss, step_size, alpha_scaled, sum_gradient_init, gradient_memory_init, seen_init, num_seen_init, fit_intercept, intercept_sum_gradient, intercept_decay, verbose) if n_iter_ == max_iter: warnings.warn("The max_iter was reached which means " "the coef_ did not converge", ConvergenceWarning) if fit_intercept: coef_init = np.vstack((coef_init, intercept_init)) warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init, 'intercept_sum_gradient': intercept_sum_gradient, 'gradient_memory': gradient_memory_init, 'seen': seen_init, 'num_seen': num_seen} if loss == 'multinomial': coef_ = coef_init.T else: coef_ = coef_init[:, 0] return coef_, n_iter_, warm_start_mem
bsd-3-clause
peterwilletts24/Python-Scripts
plot_scripts/EMBRACE/plot_geopotential_dkbhu.py
1
12746
""" Load mean geopotential heights and plot in colour """ import os, sys import matplotlib.pyplot as plt import matplotlib.cm as mpl_cm from mpl_toolkits.basemap import Basemap import iris import iris.analysis.cartography import numpy as np import imp import h5py import cartopy.crs as ccrs import scipy.interpolate from textwrap import wrap model_name_convert_title = imp.load_source('util', '/nfs/a90/eepdw/python_scripts/model_name_convert_title.py') def main(): def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat): """ Convert rotated-pole lons and lats to unrotated ones. Example:: lons, lats = unrotate_pole(grid_lons, grid_lats, pole_lon, pole_lat) .. note:: Uses proj.4 to perform the conversion. """ src_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon, pole_latitude=pole_lat) target_proj = ccrs.Geodetic() res = target_proj.transform_points(x=rotated_lons, y=rotated_lats, src_crs=src_proj) unrotated_lon = res[..., 0] unrotated_lat = res[..., 1] return unrotated_lon, unrotated_lat # Set rotated pole longitude and latitude, not ideal but easier than trying to find how to get iris to tell me what it is. plot_type='mean' plot_diags=['temp', 'sp_hum'] plot_levels = [925, 850, 700, 500] #plot_levels = [925] #experiment_ids = ['djzny', 'djznq', 'djzns', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ] #experiment_ids = ['dklyu', 'dkmbq', 'dklwu', 'dklzq' ] experiment_ids = ['djzny'] p_levels = [1000, 950, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20, 10] for experiment_id in experiment_ids: expmin1 = experiment_id[:-1] for pl in plot_diags: plot_diag=pl fname_h = '/nfs/a90/eepdw/Mean_State_Plot_Data/Mean_Heights_Temps_etc/408_pressure_levels_interp_pressure_%s_%s' % (experiment_id, plot_type) fname_d = '/nfs/a90/eepdw/Mean_State_Plot_Data/Mean_Heights_Temps_etc/%s_pressure_levels_interp_%s_%s' % (plot_diag, experiment_id, plot_type) print fname_h print fname_d # Height data file with h5py.File(fname_h, 'r') as i: mh = i['%s' % plot_type] mean_heights = mh[. . .] print mean_heights.shape with h5py.File(fname_d, 'r') as i: mh = i['%s' % plot_type] mean_var = mh[. . .] print mean_var.shape f_oro = '/nfs/a90/eepdw/Mean_State_Plot_Data/pp_files/%s/%s/33.pp' % (expmin1, experiment_id) oro = iris.load_cube(f_oro) fu = '/nfs/a90/eepdw/Mean_State_Plot_Data/pp_files/%s/%s/30201_mean.pp' % (expmin1, experiment_id) u_wind,v_wind = iris.load(fu) print u_wind.shape lat_w = u_wind.coord('grid_latitude').points lon_w = u_wind.coord('grid_longitude').points p_levs = u_wind.coord('pressure').points lat = oro.coord('grid_latitude').points lon = oro.coord('grid_longitude').points lon_low= np.min(lon) # Wind may have different number of grid points so need to do this twice lat_w = u_wind.coord('grid_latitude').points lon_w = u_wind.coord('grid_longitude').points p_levs = u_wind.coord('pressure').points lat = oro.coord('grid_latitude').points lon = oro.coord('grid_longitude').points cs_w = u_wind.coord_system('CoordSystem') cs = oro.coord_system('CoordSystem') if isinstance(cs_w, iris.coord_systems.RotatedGeogCS): print ' Wind - %s - Unrotate pole %s' % (experiment_id, cs_w) lons_w, lats_w = np.meshgrid(lon_w, lat_w) lons_w,lats_w = iris.analysis.cartography.unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude) lon_w=lons_w[0] lat_w=lats_w[:,0] csur_w=cs_w.ellipsoid for i, coord in enumerate (u_wind.coords()): if coord.standard_name=='grid_latitude': lat_dim_coord_uwind = i if coord.standard_name=='grid_longitude': lon_dim_coord_uwind = i u_wind.remove_coord('grid_latitude') u_wind.remove_coord('grid_longitude') u_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w),lat_dim_coord_uwind ) u_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w), lon_dim_coord_uwind) v_wind.remove_coord('grid_latitude') v_wind.remove_coord('grid_longitude') v_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w), lat_dim_coord_uwind) v_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w),lon_dim_coord_uwind ) if isinstance(cs, iris.coord_systems.RotatedGeogCS): print ' 33.pp - %s - Unrotate pole %s' % (experiment_id, cs) lons, lats = np.meshgrid(lon, lat) lon_low= np.min(lons) lon_high = np.max(lons) lat_low = np.min(lats) lat_high = np.max(lats) lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high)) lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude) lon_corner_u,lat_corner_u = iris.analysis.cartography.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude) #lon_highu,lat_highu = iris.analysis.cartography.unrotate_pole(lon_high, lat_high, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude) lon=lons[0] lat=lats[:,0] print lon_corners print lat_corners print lon_corner_u print lat_corner_u print lon_corner_u[0,0] print lon_corner_u[0,1] print lat_corner_u[0,0] print lat_corner_u[1,0] lon_low = lon_corner_u[0,0] lon_high = lon_corner_u[0,1] lat_low = lat_corner_u[0,0] lat_high = lat_corner_u[1,0] csur=cs.ellipsoid for i, coord in enumerate (oro.coords()): if coord.standard_name=='grid_latitude': lat_dim_coord_oro = i if coord.standard_name=='grid_longitude': lon_dim_coord_oro = i oro.remove_coord('grid_latitude') oro.remove_coord('grid_longitude') oro.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_oro) oro.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_oro) print oro else: lons, lats = np.meshgrid(lon, lat) lons_w, lats_w = np.meshgrid(lon_w, lat_w) lon_low= np.min(lons) lon_high = np.max(lons) lat_low = np.min(lats) lat_high = np.max(lats) # 2 degree lats lon lists for wind regridding lat_wind_1deg = np.arange(lat_low,lat_high, 2) lon_wind_1deg = np.arange(lon_low,lon_high, 2) for p in plot_levels: m_title = 'Height of %s-hPa level (m)' % (p) # Set pressure height contour min/max if p == 925: clev_min = 680. clev_max = 810. elif p == 850: clev_min = 1435. clev_max = 1530. elif p == 700: clev_min = 3090. clev_max = 3155. elif p == 500: clev_min = 5800. clev_max = 5890. else: print 'Contour min/max not set for this pressure level' # Set potential temperature min/max if p == 925: clevpt_min = 295. clevpt_max = 310. elif p == 850: clevpt_min = 300. clevpt_max = 320. elif p == 700: clevpt_min = 310. clevpt_max = 325. elif p == 500: clevpt_min = 321. clevpt_max = 335. else: print 'Potential temperature min/max not set for this pressure level' # Set specific humidity min/max if p == 925: clevsh_min = 0.012 clevsh_max = 0.022 elif p == 850: clevsh_min = 0.0035 clevsh_max = 0.018 elif p == 700: clevsh_min = 0.002 clevsh_max = 0.012 elif p == 500: clevsh_min = 0.002 clevsh_max = 0.006 else: print 'Specific humidity min/max not set for this pressure level' #clevs_col = np.arange(clev_min, clev_max) clevs_lin = np.linspace(clev_min, clev_max, num=20) s = np.searchsorted(p_levels[::-1], p) sc = np.searchsorted(p_levs, p) # Set plot contour lines for pressure levels plt_h = mean_heights[:,:,-(s+1)] plt_h[plt_h==0] = np.nan # Set plot colours for variable plt_v = mean_var[:,:,-(s+1)] plt_v[plt_v==0] = np.nan #c_max = int(np.max(plt_h[~np.isnan(plt_h)])) #c_min = int(np.min(plt_h[~np.isnan(plt_h) & ])) # Set u,v for winds, linear interpolate to approx. 1 degree grid # Does not work on iris1.0 as on Leeds computers. Does work on later versions #u_interp = u_wind[sc,:,:] #v_interp = v_wind[sc,:,:]. #sample_points = [('grid_latitude', np.arange(lat_low,lat_high,2)), ('grid_longitude', np.arange(lon_low,lon_high,2))] #u = iris.analysis.interpolate.linear(u_interp, sample_points).data #v = iris.analysis.interpolate.linear(v_interp, sample_points).data u_interp = u_wind[sc,:,:].data v_interp = v_wind[sc,:,:].data lons_w2deg, lats_w2deg = np.meshgrid(lon_wind_1deg, lat_wind_1deg) print lats_w.shape print u_interp.shape fl_la_lo = (lats_w.flatten(),lons_w.flatten()) u = scipy.interpolate.griddata(fl_la_lo, u_interp.flatten(), (lats_w2deg, lons_w2deg), method='cubic') v = scipy.interpolate.griddata(fl_la_lo, v_interp.flatten(), (lats_w2deg, lons_w2deg), method='cubic') m =\ Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high,projection='mill', rsphere=6371229) x, y = m(lons, lats) x_w, y_w = m(lons_w2deg, lats_w2deg) #print x_w.shape fig=plt.figure(figsize=(8,8)) ax = fig.add_axes([0.05,0.05,0.9,0.85],axisbg='#262626') m.drawcountries(color='#262626') m.drawcoastlines(linewidth=0.5,color='#262626' ) #m.fillcontinents(color='#CCFF99') m.drawparallels(np.arange(-80,81,10),labels=[1,1,0,0]) m.drawmeridians(np.arange(0,360,10),labels=[0,0,0,1]) cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='#262626',linewidths=0.5) if plot_diag=='temp': plt_v = np.ma.masked_outside(mean_var[:,:,-(s+1)], clevpt_max+20, clevpt_min-20) cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max), cmap=plt.cm.RdBu_r, extend='both') cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%d') cbar.set_label('K') plt.suptitle('Height, Potential Temperature and Wind Vectors at %s hPa'% (p), fontsize=10) elif plot_diag=='sp_hum': plt_v = np.ma.masked_outside(mean_var[:,:,-(s+1)], clevsh_max+20, clevsh_min-20) cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max), cmap=plt.cm.RdBu_r, extend='both') cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f') cbar.set_label('kg/kg') plt.suptitle('Height, Specific Humidity and Wind Vectors at %s hPa'% (p), fontsize=10) wind = m.quiver(x_w,y_w, u, v, scale=400, color='#262626') qk = plt.quiverkey(wind, 0.1, 0.1, 5, '5 m/s', labelpos='W') plt.clabel(cs_lin, fontsize=10, fmt='%d', color='black') #plt.title('%s\n%s' % (m_title, model_name_convert_title.main(experiment_id)), fontsize=10) plt.title('\n'.join(wrap('%s' % (model_name_convert_title.main(experiment_id)), 80)), fontsize=10) plt.show() if not os.path.exists('/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/%s/%s' % (experiment_id, plot_diag)): os.makedirs('/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/%s/%s' % (experiment_id, plot_diag)) #plt.savefig('/nfs/a90/eepdw/Mean_State_Plot_Data/Figures/%s/%s/geop_height_%shPa_%s_%s.png' % (experiment_id, plot_diag, p, experiment_id, plot_diag), format='png', bbox_inches='tight') if __name__ == '__main__': main()
mit
IreneShivaei/specline
linefit.py
1
11995
import numpy as np from astropy.io import fits from astropy.wcs import WCS from astropy.table import Table, Column from glob import glob import time import multiprocessing as mp import argparse from astropy.cosmology import FlatLambdaCDM #you can also use pre-defined parameters, e.g.: from astropy.cosmology import WMAP7 import astropy.units as u import pdb from scipy import optimize from astropy.stats import sigma_clip #define the cosmology (if you import WMAP7, you don't need this line) cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3) import matplotlib.pyplot as plt def read_spec(file): sp=fits.open(file) header = sp[1].header wcs = WCS(header) #read flux and flux err use=np.where(sp[8].data > 0) spec=sp[4].data specerr=sp[6].data/np.sqrt(sp[8].data) #convert pixel values to wavelength #make index array index = np.arange(spec.size) # or np.arange(header['NAXIS1']) specwl = wcs.wcs_pix2world(index[:,np.newaxis], 0) specwl=np.reshape(specwl, specwl.size) #reshape to have a 1d array return header,spec[use],specerr[use],specwl[use] def find_ha_arrs(specwl,spec,specerr): #find the wavelength, flux, and flux err range for Ha sind1 = np.where(specwl == 6555.)[0] find1 = np.where(specwl == 6575.)[0] start = np.where(specwl == 6400.)[0] sind0 = 0. if spec[start] != 0: sind0=start else: for i in range(200): if spec[start+i] !=0: break sind0=start+i find0 = np.where(specwl == 6530.)[0] sind2 = np.where(specwl == 6600.)[0] final = np.where(specwl == 6680.)[0] find2 = 0 if spec[final] != 0: find2=final else: for i in reversed(range(200)): if spec[final+i] !=0: break find2=final+i sind0=sind0[0]; sind1=sind1[0]; sind2=sind2[0] find0=find0[0]; find1=find1[0]; find2=find2[0] w0=specwl[sind0:find0]; w1=specwl[sind1:find1]; w2=specwl[sind2:find2] wind_ha=np.append(np.append(w0,w1),w2) s0=spec[sind0:find0]; s1=spec[sind1:find1]; s2=spec[sind2:find2] spec_ha0=np.append(np.append(s0,s1),s2) e0=specerr[sind0:find0]; e1=specerr[sind1:find1]; e2=specerr[sind2:find2] e=np.append(np.append(e0,e1),e2) return wind_ha,spec_ha0,e def find_hb_arrs(specwl,spec,specerr): #find the wavelength, flux, and flux err range for Hb sind1 = np.where(specwl == 4750.)[0] find1 = np.where(specwl == 4950.)[0] sind1=sind1[0]; find1=find1[0] wind_hb=specwl[sind1:find1] spec_hb0=spec[sind1:find1] e=specerr[sind1:find1] return wind_hb,spec_hb0,e def find_oiii_arrs(specwl,spec,specerr): #find the wavelength, flux, and flux err range for OIII5008 sind1 = np.where(specwl == 4979.)[0] find1 = np.where(specwl == 5060.)[0] sind1=sind1[0]; find1=find1[0] wind_oiii=specwl[sind1:find1] spec_oiii0=spec[sind1:find1] e=specerr[sind1:find1] return wind_oiii,spec_oiii0,e def find_oii_arrs(specwl,spec,specerr): #find the wavelength, flux, and flux err range for OII3727 sind1 = np.where(specwl == 3675.)[0] find1 = np.where(specwl == 3775.)[0] sind1=sind1[0]; find1=find1[0] wind_oii=specwl[sind1:find1] spec_oii0=spec[sind1:find1] e=specerr[sind1:find1] return wind_oii,spec_oii0,e def fit_gauss(p0,wave,spec,e): ''' Fit a gaussian function with a continuum to data. Input ===== p0: initial guess for the guassian parameters in this form: [amplitude,center,width,continuum] wave: wavelength array over which the fit should be done spec: flux array corresponding to the wavelength array e: flux error array Output ====== p1: the best fit parameters in this form: [amplitude,center,width,continuum] ''' #define a gaussian function and an error function for the xi^2 minimization gauss = lambda p, t : p[3] + p[0]*np.exp(-(t-p[1])**2/(2*p[2]**2)) errfunc = lambda p, t, y, err: (y-gauss(p,t))/err #minimize the sum of squares of errfunc p1, success = optimize.leastsq(errfunc, p0[:], args=(wave,spec,e)) if success > 4.: print('# Not a good fit, success = ',success) return p1 def fit_double_gauss(p0,wave,spec,e): ''' Fit a double-gaussian function with a continuum to data. Input ===== p0: initial guess for the guassian parameters in this form: [amplitude1,center1,width1,amplitude2,center2,width2,continuum] wave: wavelength array over which the fit should be done spec: flux array corresponding to the wavelength array e: flux error array Output ====== p1: the best fit parameters in this form: [amplitude,center,width,continuum] ''' #define a gaussian function and an error function for the xi^2 minimization gauss = lambda p, t : p[6] + p[0]*np.exp(-(t-p[1])**2/(2*p[2]**2)) + p[3]*np.exp(-(t-p[4])**2/(2*p[5]**2)) errfunc = lambda p, t, y, err: (y-gauss(p,t))/err #minimize the sum of squares of errfunc p1, success = optimize.leastsq(errfunc, p0[:], args=(wave,spec,e)) if success > 4.: print('# Not a good fit, success = ',success) return p1 def ha(header, spec, specerr, specwl): wind_ha,spec_ha0,e=find_ha_arrs(specwl,spec,specerr) e=np.interp(wind_ha,wind_ha[e != 0],e[e != 0]) if ((np.log10(spec_ha0.max()) > 2.) | (np.log10(spec_ha0.max()) < -1.)): normfac=1./spec_ha0.max() else: normfac=1. p0 = [.4, 6564., 1., .1] # Initial guess for the parameters [amplitude,center,width,continuum] pert=1000 lha_arr=np.zeros(pert) gauss = lambda p, t : p[3] + p[0]*np.exp(-(t-p[1])**2/(2*p[2]**2)) errfunc = lambda p, t, y, err: (y-gauss(p,t))/err for p in range(pert): spec_ha = e*np.random.randn(spec_ha0.size)+spec_ha0 e_pert=np.zeros(spec_ha0.size)+1e-10 p1=fit_gauss(p0,wind_ha,spec_ha*normfac,e_pert)#e*normfac) #area under the line: area=np.trapz(gauss(p1,np.arange(6555,6575,.1))-p1[3],np.arange(6555,6575,.1)) lha_arr[p] = area/normfac #p1, success = optimize.leastsq(errfunc, p0[:], args=(wind_ha,spec_ha0*normfac,e*normfac)) #plt.clf #plt.figure() #plt.errorbar(wind_ha,spec_ha0,e,fmt='.',capsize=2,markersize=3,ecolor='grey') #plt.plot(wind_ha,gauss(p1,wind_ha), '-',color='orange') lha_arr_clipped = sigma_clip(lha_arr[lha_arr > 0.], sigma=5,masked=False,axis=None) lha=np.mean(lha_arr_clipped) #print('L(Ha) = {0:3.3}'.format(lha)) #print('Detection at {0:4.4}'.format(lha_arr.mean()/np.std(lha_arr)),' sigma') #Balmer correction #print('L(Ha_abs)/L(Ha) = {0:4.4}'.format(header['haabs']/lha)) lha = lha + header['haabs'] #print('L(Ha)+L(Ha_abs) = {0:3.4}'.format(lha),' +/- {0:0.3}'.format(lha_arr.std())) sfrha = 0.079 * lha / 1.8 #in Chabrier sfrhaerr = 0.079 * lha_arr_clipped.std() / 1.8 #in Chabrier print('#') print('# L(Ha), L(Ha_abs), L(Ha)_err, SFR(Ha)_observed, SFR(Ha)_err_observed') print('{0:e}'.format(lha),' {0:e}'.format(header['haabs']),' {0:e}'.format(lha_arr_clipped.std()),'{0:f}'.format(sfrha),'{0:f}'.format(sfrhaerr)) return lha,header['haabs'],lha_arr.std() def hb(header, spec, specerr, specwl): wind_hb,spec_hb0,e=find_hb_arrs(specwl,spec,specerr) e=np.interp(wind_hb,wind_hb[e != 0],e[e != 0]) if ((np.log10(spec_hb0.max()) > 2.) | (np.log10(spec_hb0.max()) < -1.)): normfac=1./spec_hb0.max() else: normfac=1. p0 = [10, 4863., 1., .1] # Initial guess for the parameters [amplitude,center,width,continuum] import sys## np.set_printoptions(threshold=sys.maxsize)## pert=1000 lhb_arr=np.zeros(pert) gauss = lambda p, t : p[3] + p[0]*np.exp(-(t-p[1])**2/(2*p[2]**2)) errfunc = lambda p, t, y, err: (y-gauss(p,t))/err for p in range(pert): spec_hb = e*np.random.randn(spec_hb0.size)+spec_hb0 e_pert=np.zeros(spec_hb0.size)+1e-10 p1=fit_gauss(p0,wind_hb,spec_hb*normfac,e_pert)#e*normfac) #area under the line: area=np.trapz(gauss(p1,np.arange(4760,4940,.1))-p1[3],np.arange(4760,4940,.1)) lhb_arr[p] = area/normfac lhb_arr_clipped = sigma_clip(lhb_arr[lhb_arr > 0.], sigma=5,masked=False,axis=None) lhb=np.mean(lhb_arr_clipped) #print('L(Hb) = {0:3.3}'.format(lhb)) #print('Detection at {0:4.4}'.format(lhb_arr.mean()/np.std(lhb_arr)),' sigma') #Balmer correction #print('L(Hb_abs)/L(Hb) = {0:4.4}'.format(header['hbabs']/lhb)) lhb = lhb + header['hbabs'] #print('L(Hb)+L(Hb_abs) = {0:3.4}'.format(lhb),' +/- {0:0.3}'.format(lhb_arr.std())) print('#') print('# L(Hb), L(Hb_abs), L(Hb)_err') print('{0:e}'.format(lhb),' {0:e}'.format(header['hbabs']),' {0:e}'.format(lhb_arr.std())) return lhb,header['hbabs'],lhb_arr_clipped.std() def oiii(header, spec, specerr, specwl): wind_oiii,spec_oiii0,e=find_oiii_arrs(specwl,spec,specerr) if ((np.log10(spec_oiii0.max()) > 2.) | (np.log10(spec_oiii0.max()) < -1.)): normfac=1./spec_oiii0.max() else: normfac=1. p0 = [1., 5008., 1., .1] # Initial guess for the parameters [amplitude,center,width,continuum] pert=1000 loiii_arr=np.zeros(pert) gauss = lambda p, t : p[3] + p[0]*np.exp(-(t-p[1])**2/(2*p[2]**2)) errfunc = lambda p, t, y, err: (y-gauss(p,t))/err for p in range(pert): spec_oiii = e*np.random.randn(spec_oiii0.size)+spec_oiii0 e_pert=np.zeros(spec_oiii0.size)+1e-10 p1=fit_gauss(p0,wind_oiii,spec_oiii*normfac,e_pert)#e*normfac) #area under the line: area=np.trapz(gauss(p1,np.arange(4979,5050,.1))-p1[3],np.arange(4979,5050,.1)) loiii_arr[p] = area/normfac loiii_arr_clipped = sigma_clip(loiii_arr[loiii_arr > 0.], sigma=5,masked=False,axis=None) loiii=np.mean(loiii_arr_clipped) print('#') print('# L(OIII), L(OIII)_err') print('{0:e}'.format(loiii),' {0:e}'.format(loiii_arr_clipped.std())) return loiii,loiii_arr.std() def oii(header, spec, specerr, specwl): wind_oii,spec_oii0,e=find_oii_arrs(specwl,spec,specerr) if ((np.log10(spec_oii0.max()) > 2.) | (np.log10(spec_oii0.max()) < -1.)): normfac=1./spec_oii0.max() else: normfac=1. p0 = [1., 3727., 1., 1., 3729., 1., .1] # Initial guess for the parameters [amplitude1,center1,width1,amplitude2,center2,width2,continuum] pert=1000 loii_arr=np.zeros(pert) gauss = lambda p, t : p[6] + p[0]*np.exp(-(t-p[1])**2/(2*p[2]**2)) + p[3]*np.exp(-(t-p[4])**2/(2*p[5]**2)) errfunc = lambda p, t, y, err: (y-gauss(p,t))/err for p in range(pert): spec_oii = e*np.random.randn(spec_oii0.size)+spec_oii0 e_pert=np.zeros(spec_oii0.size)+1e-10 p1=fit_gauss(p0,wind_oii,spec_oii*normfac,e_pert)#e*normfac) #area under the line: area=np.trapz(gauss(p1,np.arange(3690,3745,.1))-p1[6],np.arange(3690,3745,.1)) loii_arr[p] = area/normfac loii_arr_clipped = sigma_clip(loii_arr[loii_arr > 0.], sigma=5,masked=False,axis=None) loii=np.mean(loii_arr_clipped) print('#') print('# L(OII), L(OII)_err') print('{0:e}'.format(loii),' {0:e}'.format(loii_arr_clipped.std())) return loii,loii_arr.std() def main(file,*arg): import sys file=sys.argv[1] header, spec, specerr, specwl = read_spec(file) if sys.argv[2] == 'Ha': ha(header, spec, specerr, specwl) if sys.argv[2] == 'Hb': hb(header, spec, specerr, specwl) if sys.argv[2] == 'oiii': oiii(header, spec, specerr, specwl) if sys.argv[2] == 'oii': oii(header, spec, specerr, specwl) if len(sys.argv) == 4: if sys.argv[3] == 'Ha': ha(header, spec, specerr, specwl) if sys.argv[3] == 'Hb': hb(header, spec, specerr, specwl) if sys.argv[3] == 'oiii': oiii(header, spec, specerr, specwl) if sys.argv[3] == 'oii': oii(header, spec, specerr, specwl) import sys file=sys.argv[1] main(file)
mit
Silmathoron/nest-simulator
pynest/nest/lib/hl_api_spatial.py
1
45069
# -*- coding: utf-8 -*- # # hl_api_spatial.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """ Functions relating to spatial properties of nodes """ import numpy as np from ..ll_api import * from .. import pynestkernel as kernel from .hl_api_helper import * from .hl_api_connections import GetConnections from .hl_api_parallel_computing import NumProcesses, Rank from .hl_api_types import NodeCollection try: import matplotlib as mpl import matplotlib.path as mpath import matplotlib.patches as mpatches HAVE_MPL = True except ImportError: HAVE_MPL = False __all__ = [ 'CreateMask', 'Displacement', 'Distance', 'DumpLayerConnections', 'DumpLayerNodes', 'FindCenterElement', 'FindNearestElement', 'GetPosition', 'GetTargetNodes', 'GetTargetPositions', 'PlotLayer', 'PlotProbabilityParameter', 'PlotTargets', 'SelectNodesByMask', ] def CreateMask(masktype, specs, anchor=None): """ Create a spatial mask for connections. Masks are used when creating connections. A mask describes the area of the pool population that is searched for to connect for any given node in the driver population. Several mask types are available. Examples are the grid region, the rectangular, circular or doughnut region. The command :py:func:`.CreateMask` creates a `Mask` object which may be combined with other `Mask` objects using Boolean operators. The mask is specified in a dictionary. ``Mask`` objects can be passed to :py:func:`.Connect` in a connection dictionary with the key `'mask'`. Parameters ---------- masktype : str, ['rectangular' | 'circular' | 'doughnut' | 'elliptical'] for 2D masks, ['box' | 'spherical' | 'ellipsoidal] for 3D masks, ['grid'] only for grid-based layers in 2D. The mask name corresponds to the geometrical shape of the mask. There are different types for 2- and 3-dimensional layers. specs : dict Dictionary specifying the parameters of the provided `masktype`, see **Mask types**. anchor : [tuple/list of floats | dict with the keys `'column'` and \ `'row'` (for grid masks only)], optional, default: None By providing anchor coordinates, the location of the mask relative to the driver node can be changed. The list of coordinates has a length of 2 or 3 dependent on the number of dimensions. Returns ------- Mask: Object representing the mask See also -------- Connect Notes ----- - All angles must be given in degrees. **Mask types** Available mask types (`masktype`) and their corresponding parameter dictionaries: * 2D free and grid-based layers :: 'rectangular' : {'lower_left' : [float, float], 'upper_right' : [float, float], 'azimuth_angle': float # default:0.0} #or 'circular' : {'radius' : float} #or 'doughnut' : {'inner_radius' : float, 'outer_radius' : float} #or 'elliptical' : {'major_axis' : float, 'minor_axis' : float, 'azimuth_angle' : float, # default: 0.0, 'anchor' : [float, float], # default: [0.0, 0.0]} * 3D free and grid-based layers :: 'box' : {'lower_left' : [float, float, float], 'upper_right' : [float, float, float], 'azimuth_angle: float # default: 0.0, 'polar_angle : float # defualt: 0.0} #or 'spherical' : {'radius' : float} #or 'ellipsoidal' : {'major_axis' : float, 'minor_axis' : float, 'polar_axis' : float 'azimuth_angle' : float, # default: 0.0, 'polar_angle' : float, # default: 0.0, 'anchor' : [float, float, float], # default: [0.0, 0.0, 0.0]}} * 2D grid-based layers only :: 'grid' : {'rows' : float, 'columns' : float} By default the top-left corner of a grid mask, i.e., the grid mask element with grid index [0, 0], is aligned with the driver node. It can be changed by means of the 'anchor' parameter: :: 'anchor' : {'row' : float, 'column' : float} **Example** :: import nest # create a grid-based layer l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # create a circular mask m = nest.CreateMask('circular', {'radius': 0.2}) # connectivity specifications conndict = {'rule': 'pairwise_bernoulli', 'p': 1.0, 'mask': m} # connect layer l with itself according to the specifications nest.Connect(l, l, conndict) """ if anchor is None: return sli_func('CreateMask', {masktype: specs}) else: return sli_func('CreateMask', {masktype: specs, 'anchor': anchor}) def GetPosition(nodes): """ Return the spatial locations of nodes. Parameters ---------- nodes : NodeCollection `NodeCollection` of nodes we want the positions to Returns ------- tuple or tuple of tuple(s): Tuple of position with 2- or 3-elements or list of positions See also -------- Displacement: Get vector of lateral displacement between nodes. Distance: Get lateral distance between nodes. DumpLayerConnections: Write connectivity information to file. DumpLayerNodes: Write node positions to file. Notes ----- - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` only works for nodes local to the current MPI process, if used in a MPI-parallel simulation. Example ------- :: import nest # Reset kernel nest.ResetKernel # create a NodeCollection with spatial extent s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # retrieve positions of all (local) nodes belonging to the population pos = nest.GetPosition(s_nodes) # retrieve positions of the first node in the NodeCollection pos = nest.GetPosition(s_nodes[0]) # retrieve positions of a subset of nodes in the population pos = nest.GetPosition(s_nodes[2:18]) """ if not isinstance(nodes, NodeCollection): raise TypeError("nodes must be a NodeCollection with spatial extent") return sli_func('GetPosition', nodes) def Displacement(from_arg, to_arg): """ Get vector of lateral displacement from node(s)/Position(s) `from_arg` to node(s) `to_arg`. Displacement is the shortest displacement, taking into account periodic boundary conditions where applicable. If explicit positions are given in the `from_arg` list, they are interpreted in the `to_arg` population. - If one of `from_arg` or `to_arg` has length 1, and the other is longer, the displacement from/to the single item to all other items is given. - If `from_arg` and `to_arg` both have more than two elements, they have to be of the same length and the displacement between each pair is returned. Parameters ---------- from_arg : NodeCollection or tuple/list with tuple(s)/list(s) of floats `NodeCollection` of node IDs or tuple/list of position(s) to_arg : NodeCollection `NodeCollection` of node IDs Returns ------- tuple: Displacement vectors between pairs of nodes in `from_arg` and `to_arg` See also -------- Distance: Get lateral distances between nodes. DumpLayerConnections: Write connectivity information to file. GetPosition: Return the spatial locations of nodes. Notes ----- - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` only works for nodes local to the current MPI process, if used in a MPI-parallel simulation. **Example** :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # displacement between node 2 and 3 print(nest.Displacement(s_nodes[1], s_nodes[2])) # displacment between the position (0.0., 0.0) and node 2 print(nest.Displacement([(0.0, 0.0)], s_nodes[1])) """ if not isinstance(to_arg, NodeCollection): raise TypeError("to_arg must be a NodeCollection") if isinstance(from_arg, np.ndarray): from_arg = (from_arg, ) if (len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(to_arg)): raise ValueError("to_arg and from_arg must have same size unless one have size 1.") return sli_func('Displacement', from_arg, to_arg) def Distance(from_arg, to_arg): """ Get lateral distances from node(s)/position(s) `from_arg` to node(s) `to_arg`. The distance between two nodes is the length of its displacement. If explicit positions are given in the `from_arg` list, they are interpreted in the `to_arg` population. Distance is the shortest distance, taking into account periodic boundary conditions where applicable. - If one of `from_arg` or `to_arg` has length 1, and the other is longer, the displacement from/to the single item to all other items is given. - If `from_arg` and `to_arg` both have more than two elements, they have to be of the same length and the distance for each pair is returned. Parameters ---------- from_arg : NodeCollection or tuple/list with tuple(s)/list(s) of floats `NodeCollection` of node IDs or tuple/list of position(s) to_arg : NodeCollection `NodeCollection` of node IDs Returns ------- tuple: Distances between `from` and `to` See also -------- Displacement: Get vector of lateral displacements between nodes. DumpLayerConnections: Write connectivity information to file. GetPosition: Return the spatial locations of nodes. Notes ----- - The functions :py:func:`.GetPosition`, :py:func:`.Displacement` and :py:func:`.Distance` only works for nodes local to the current MPI process, if used in a MPI-parallel simulation. Example ------- :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # distance between node 2 and 3 print(nest.Distance(s_nodes[1], s_nodes[2])) # distance between the position (0.0., 0.0) and node 2 print(nest.Distance([(0.0, 0.0)], s_nodes[1])) """ if not isinstance(to_arg, NodeCollection): raise TypeError("to_arg must be a NodeCollection") if isinstance(from_arg, np.ndarray): from_arg = (from_arg, ) if (len(from_arg) > 1 and len(to_arg) > 1 and not len(from_arg) == len(to_arg)): raise ValueError("to_arg and from_arg must have same size unless one have size 1.") return sli_func('Distance', from_arg, to_arg) def FindNearestElement(layer, locations, find_all=False): """ Return the node(s) closest to the `locations` in the given `layer`. This function works for fixed grid layer only. * If `locations` is a single 2-element array giving a grid location, return a `NodeCollection` of `layer` elements at the given location. * If `locations` is a list of coordinates, the function returns a list of `NodeCollection` of the nodes at all locations. Parameters ---------- layer : NodeCollection `NodeCollection` of spatially distributed node IDs locations : tuple(s)/list(s) of tuple(s)/list(s) 2-element list with coordinates of a single position, or list of 2-element list of positions find_all : bool, default: False If there are several nodes with same minimal distance, return only the first found, if `False`. If `True`, instead of returning a single `NodeCollection`, return a list of `NodeCollection` containing all nodes with minimal distance. Returns ------- NodeCollection: `NodeCollection` of node IDs if locations is a 2-element list with coordinates of a single position list: list of `NodeCollection` if find_all is True or locations contains more than one position See also -------- FindCenterElement: Return NodeCollection of node closest to center of layers. GetPosition: Return the spatial locations of nodes. Example ------- :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # get node ID of element closest to some location nest.FindNearestElement(s_nodes, [3.0, 4.0], True) """ if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") if not len(layer) > 0: raise ValueError("layer cannot be empty") if not is_iterable(locations): raise TypeError("locations must be coordinate array or list of coordinate arrays") # Ensure locations is sequence, keeps code below simpler if not is_iterable(locations[0]): locations = (locations, ) result = [] for loc in locations: d = Distance(np.array(loc), layer) if not find_all: dx = np.argmin(d) # finds location of one minimum result.append(layer[dx]) else: minnode = list(layer[:1]) minval = d[0] for idx in range(1, len(layer)): if d[idx] < minval: minnode = [layer[idx]] minval = d[idx] elif np.abs(d[idx] - minval) <= 1e-14 * minval: minnode.append(layer[idx]) result.append(minnode) if len(result) == 1: result = result[0] return result def _rank_specific_filename(basename): """Returns file name decorated with rank.""" if NumProcesses() == 1: return basename else: np = NumProcesses() np_digs = len(str(np - 1)) # for pretty formatting rk = Rank() dot = basename.find('.') if dot < 0: return '%s-%0*d' % (basename, np_digs, rk) else: return '%s-%0*d%s' % (basename[:dot], np_digs, rk, basename[dot:]) def DumpLayerNodes(layer, outname): """ Write `node ID` and position data of `layer` to file. Write `node ID` and position data to `outname` file. For each node in `layer`, a line with the following information is written: :: node ID x-position y-position [z-position] If `layer` contains several `node IDs`, data for all nodes in `layer` will be written to a single file. Parameters ---------- layer : NodeCollection `NodeCollection` of spatially distributed node IDs outname : str Name of file to write to (existing files are overwritten) See also -------- DumpLayerConnections: Write connectivity information to file. GetPosition: Return the spatial locations of nodes. Notes ----- * If calling this function from a distributed simulation, this function will write to one file per MPI rank. * File names are formed by adding the MPI Rank into the file name before the file name suffix. * Each file stores data for nodes local to that file. Example ------- :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # write layer node positions to file nest.DumpLayerNodes(s_nodes, 'positions.txt') """ if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") sli_func(""" (w) file exch DumpLayerNodes close """, layer, _rank_specific_filename(outname)) def DumpLayerConnections(source_layer, target_layer, synapse_model, outname): """ Write connectivity information to file. This function writes connection information to file for all outgoing connections from the given layers with the given synapse model. For each connection, one line is stored, in the following format: :: source_node_id target_node_id weight delay dx dy [dz] where (dx, dy [, dz]) is the displacement from source to target node. If targets do not have positions (eg spike detectors outside any layer), NaN is written for each displacement coordinate. Parameters ---------- source_layers : NodeCollection `NodeCollection` of spatially distributed node IDs target_layers : NodeCollection `NodeCollection` of (spatially distributed) node IDs synapse_model : str NEST synapse model outname : str Name of file to write to (will be overwritten if it exists) See also -------- DumpLayerNodes: Write layer node positions to file. GetPosition: Return the spatial locations of nodes. GetConnections: Return connection identifiers between sources and targets Notes ----- * If calling this function from a distributed simulation, this function will write to one file per MPI rank. * File names are formed by inserting the MPI Rank into the file name before the file name suffix. * Each file stores data for local nodes. **Example** :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) nest.Connect(s_nodes, s_nodes, {'rule': 'pairwise_bernoulli', 'p': 1.0}, {'synapse_model': 'static_synapse'}) # write connectivity information to file nest.DumpLayerConnections(s_nodes, s_nodes, 'static_synapse', 'conns.txt') """ if not isinstance(source_layer, NodeCollection): raise TypeError("source_layer must be a NodeCollection") if not isinstance(target_layer, NodeCollection): raise TypeError("target_layer must be a NodeCollection") sli_func(""" /oname Set cvlit /synmod Set /lyr_target Set /lyr_source Set oname (w) file lyr_source lyr_target synmod DumpLayerConnections close """, source_layer, target_layer, synapse_model, _rank_specific_filename(outname)) def FindCenterElement(layer): """ Return `NodeCollection` of node closest to center of `layer`. Parameters ---------- layer : NodeCollection `NodeCollection` with spatially distributed node IDs Returns ------- NodeCollection: `NodeCollection` of the node closest to the center of the `layer`, as specified by `layer` parameters given in ``layer.spatial``. If several nodes are equally close to the center, an arbitrary one of them is returned. See also -------- FindNearestElement: Return the node(s) closest to the location(s) in the given `layer`. GetPosition: Return the spatial locations of nodes. Example ------- :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[5, 5])) # get NodeCollection of the element closest to the center of the layer nest.FindCenterElement(s_nodes) """ if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection") nearest_to_center = FindNearestElement(layer, layer.spatial['center'])[0] index = layer.index(nearest_to_center.get('global_id')) return layer[index:index+1] def GetTargetNodes(sources, tgt_layer, syn_model=None): """ Obtain targets of `sources` in given `target` population. For each neuron in `sources`, this function finds all target elements in `tgt_layer`. If `syn_model` is not given (default), all targets are returned, otherwise only targets of specific type. Parameters ---------- sources : NodeCollection NodeCollection with node IDs of `sources` tgt_layer : NodeCollection NodeCollection with node IDs of `tgt_layer` syn_model : [None | str], optional, default: None Return only target positions for a given synapse model. Returns ------- tuple of NodeCollection: Tuple of `NodeCollections` of target neurons fulfilling the given criteria, one `NodeCollection` per source node ID in `sources`. See also -------- GetTargetPositions: Obtain positions of targets in a given target layer connected to given source. GetConnections: Return connection identifiers between sources and targets Notes ----- * For distributed simulations, this function only returns targets on the local MPI process. Example ------- :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) # connectivity specifications with a mask conndict = {'rule': 'pairwise_bernoulli', 'p': 1., 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], 'upper_right': [2.0, 1.0]}}} # connect population s_nodes with itself according to the given # specifications nest.Connect(s_nodes, s_nodes, conndict) # get the node IDs of the targets of a source neuron nest.GetTargetNodes(s_nodes[4], s_nodes) """ if not isinstance(sources, NodeCollection): raise TypeError("sources must be a NodeCollection.") if not isinstance(tgt_layer, NodeCollection): raise TypeError("tgt_layer must be a NodeCollection") conns = GetConnections(sources, tgt_layer, synapse_model=syn_model) # Re-organize conns into one list per source, containing only target node IDs. src_tgt_map = dict((snode_id, []) for snode_id in sources.tolist()) for src, tgt in zip(conns.sources(), conns.targets()): src_tgt_map[src].append(tgt) for src in src_tgt_map.keys(): src_tgt_map[src] = NodeCollection(list(np.unique(src_tgt_map[src]))) # convert dict to nested list in same order as sources return tuple(src_tgt_map[snode_id] for snode_id in sources.tolist()) def GetTargetPositions(sources, tgt_layer, syn_model=None): """ Obtain positions of targets to a given `NodeCollection` of `sources`. For each neuron in `sources`, this function finds all target elements in `tgt_layer`. If `syn_model` is not given (default), all targets are returned, otherwise only targets of specific type. Parameters ---------- sources : NodeCollection `NodeCollection` with node ID(s) of source neurons tgt_layer : NodeCollection `NodeCollection` of tgt_layer syn_type : [None | str], optional, default: None Return only target positions for a given synapse model. Returns ------- list of list(s) of tuple(s) of floats: Positions of target neurons fulfilling the given criteria as a nested list, containing one list of positions per node in sources. See also -------- GetTargetNodes: Obtain targets of a `NodeCollection` of sources in a given target population. Notes ----- * For distributed simulations, this function only returns targets on the local MPI process. Example ------- :: import nest # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) # connectivity specifications with a mask conndict = {'rule': 'pairwise_bernoulli', 'p': 1., 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], 'upper_right': [2.0, 1.0]}}} # connect population s_nodes with itself according to the given # specifications nest.Connect(s_nodes, s_nodes, conndict) # get the positions of the targets of a source neuron nest.GetTargetPositions(s_nodes[5], s_nodes) """ if not isinstance(sources, NodeCollection): raise TypeError("sources must be a NodeCollection.") # Find positions to all nodes in target layer pos_all_tgts = GetPosition(tgt_layer) first_tgt_node_id = tgt_layer[0].get('global_id') connections = GetConnections(sources, tgt_layer, synapse_model=syn_model) srcs = connections.get('source') tgts = connections.get('target') if isinstance(srcs, int): srcs = [srcs] if isinstance(tgts, int): tgts = [tgts] # Make dictionary where the keys are the source node_ids, which is mapped to a # list with the positions of the targets connected to the source. src_tgt_pos_map = dict((snode_id, []) for snode_id in sources.tolist()) for i in range(len(connections)): tgt_indx = tgts[i] - first_tgt_node_id src_tgt_pos_map[srcs[i]].append(pos_all_tgts[tgt_indx]) # Turn dict into list in same order as sources return [src_tgt_pos_map[snode_id] for snode_id in sources.tolist()] def SelectNodesByMask(layer, anchor, mask_obj): """ Obtain the node IDs inside a masked area of a spatially distributed population. The function finds and returns all the node IDs inside a given mask of a `layer`. The node IDs are returned as a `NodeCollection`. The function works on both 2-dimensional and 3-dimensional masks and layers. All mask types are allowed, including combined masks. Parameters ---------- layer : NodeCollection `NodeCollection` with node IDs of the `layer` to select nodes from. anchor : tuple/list of double List containing center position of the layer. This is the point from where we start to search. mask_obj: object `Mask` object specifying chosen area. Returns ------- NodeCollection: `NodeCollection` of nodes/elements inside the mask. """ if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection.") mask_datum = mask_obj._datum node_id_list = sli_func('SelectNodesByMask', layer, anchor, mask_datum) return NodeCollection(node_id_list) def _draw_extent(ax, xctr, yctr, xext, yext): """Draw extent and set aspect ration, limits""" # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest import matplotlib.pyplot as plt # thin gray line indicating extent llx, lly = xctr - xext / 2.0, yctr - yext / 2.0 urx, ury = llx + xext, lly + yext ax.add_patch( plt.Rectangle((llx, lly), xext, yext, fc='none', ec='0.5', lw=1, zorder=1)) # set limits slightly outside extent ax.set(aspect='equal', xlim=(llx - 0.05 * xext, urx + 0.05 * xext), ylim=(lly - 0.05 * yext, ury + 0.05 * yext), xticks=tuple(), yticks=tuple()) def _shifted_positions(pos, ext): """Get shifted positions corresponding to boundary conditions.""" return [[pos[0] + ext[0], pos[1]], [pos[0] - ext[0], pos[1]], [pos[0], pos[1] + ext[1]], [pos[0], pos[1] - ext[1]], [pos[0] + ext[0], pos[1] - ext[1]], [pos[0] - ext[0], pos[1] + ext[1]], [pos[0] + ext[0], pos[1] + ext[1]], [pos[0] - ext[0], pos[1] - ext[1]]] def PlotLayer(layer, fig=None, nodecolor='b', nodesize=20): """ Plot all nodes in a `layer`. Parameters ---------- layer : NodeCollection `NodeCollection` of spatially distributed nodes fig : [None | matplotlib.figure.Figure object], optional, default: None Matplotlib figure to plot to. If not given, a new figure is created. nodecolor : [None | any matplotlib color], optional, default: 'b' Color for nodes nodesize : float, optional, default: 20 Marker size for nodes Returns ------- `matplotlib.figure.Figure` object See also -------- PlotProbabilityParameter: Create a plot of the connection probability and/or mask. PlotTargets: Plot all targets of a given source. matplotlib.figure.Figure : matplotlib Figure class Notes ----- * Do **not** use this function in distributed simulations. Example ------- :: import nest import matplotlib.pyplot as plt # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) # plot layer with all its nodes nest.PlotLayer(s_nodes) plt.show() """ # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest import matplotlib.pyplot as plt if not HAVE_MPL: raise ImportError('Matplotlib could not be imported') if not isinstance(layer, NodeCollection): raise TypeError("layer must be a NodeCollection.") # get layer extent ext = layer.spatial['extent'] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext xctr, yctr = layer.spatial['center'] # extract position information, transpose to list of x and y pos xpos, ypos = zip(*GetPosition(layer)) if fig is None: fig = plt.figure() ax = fig.add_subplot(111) else: ax = fig.gca() ax.scatter(xpos, ypos, s=nodesize, facecolor=nodecolor, edgecolor='none') _draw_extent(ax, xctr, yctr, xext, yext) elif len(ext) == 3: # 3D layer from mpl_toolkits.mplot3d import Axes3D # extract position information, transpose to list of x,y,z pos pos = zip(*GetPosition(layer)) if fig is None: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') else: ax = fig.gca() ax.scatter(*pos, s=nodesize, c=nodecolor) plt.draw_if_interactive() else: raise ValueError("unexpected dimension of layer") return fig def PlotTargets(src_nrn, tgt_layer, syn_type=None, fig=None, mask=None, probability_parameter=None, src_color='red', src_size=50, tgt_color='blue', tgt_size=20, mask_color='yellow', probability_cmap='Greens'): """ Plot all targets of source neuron `src_nrn` in a target layer `tgt_layer`. Parameters ---------- src_nrn : NodeCollection `NodeCollection` of source neuron (as single-element NodeCollection) tgt_layer : NodeCollection `NodeCollection` of tgt_layer syn_type : [None | str], optional, default: None Show only targets connected with a given synapse type fig : [None | matplotlib.figure.Figure object], optional, default: None Matplotlib figure to plot to. If not given, a new figure is created. mask : [None | dict], optional, default: None Draw mask with targets; see :py:func:`.PlotProbabilityParameter` for details. probability_parameter : [None | Parameter], optional, default: None Draw connection probability with targets; see :py:func:`.PlotProbabilityParameter` for details. src_color : [None | any matplotlib color], optional, default: 'red' Color used to mark source node position src_size : float, optional, default: 50 Size of source marker (see scatter for details) tgt_color : [None | any matplotlib color], optional, default: 'blue' Color used to mark target node positions tgt_size : float, optional, default: 20 Size of target markers (see scatter for details) mask_color : [None | any matplotlib color], optional, default: 'red' Color used for line marking mask probability_cmap : [None | any matplotlib cmap color], optional, default: 'Greens' Color used for lines marking probability parameter. Returns ------- matplotlib.figure.Figure object See also -------- GetTargetNodes: Obtain targets of a sources in a given target layer. GetTargetPositions: Obtain positions of targets of sources in a given target layer. probability_parameter: Add indication of connection probability and mask to axes. PlotLayer: Plot all nodes in a spatially distributed population. matplotlib.pyplot.scatter : matplotlib scatter plot. Notes ----- * Do **not** use this function in distributed simulations. **Example** :: import nest import matplotlib.pyplot as plt # create a spatial population s_nodes = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid(shape=[11, 11], extent=[11., 11.])) # connectivity specifications with a mask conndict = {'rule': 'pairwise_bernoulli', 'p': 1., 'mask': {'rectangular': {'lower_left' : [-2.0, -1.0], 'upper_right': [2.0, 1.0]}}} # connect population s_nodes with itself according to the given # specifications nest.Connect(s_nodes, s_nodes, conndict) # plot the targets of a source neuron nest.PlotTargets(s_nodes[4], s_nodes) plt.show() """ # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest import matplotlib.pyplot as plt if not HAVE_MPL: raise ImportError('Matplotlib could not be imported') if not isinstance(src_nrn, NodeCollection) or len(src_nrn) != 1: raise TypeError("src_nrn must be a single element NodeCollection.") if not isinstance(tgt_layer, NodeCollection): raise TypeError("tgt_layer must be a NodeCollection.") # get position of source srcpos = GetPosition(src_nrn) # get layer extent ext = tgt_layer.spatial['extent'] if len(ext) == 2: # 2D layer # get layer extent and center, x and y xext, yext = ext xctr, yctr = tgt_layer.spatial['center'] if fig is None: fig = plt.figure() ax = fig.add_subplot(111) else: ax = fig.gca() # get positions, reorganize to x and y vectors tgtpos = GetTargetPositions(src_nrn, tgt_layer, syn_type) if tgtpos: xpos, ypos = zip(*tgtpos[0]) ax.scatter(xpos, ypos, s=tgt_size, facecolor=tgt_color, edgecolor='none') ax.scatter(srcpos[:1], srcpos[1:], s=src_size, facecolor=src_color, edgecolor='none', alpha=0.4, zorder=-10) if mask is not None or probability_parameter is not None: edges = [xctr - xext, xctr + xext, yctr - yext, yctr + yext] PlotProbabilityParameter(src_nrn, probability_parameter, mask=mask, edges=edges, ax=ax, prob_cmap=probability_cmap, mask_color=mask_color) _draw_extent(ax, xctr, yctr, xext, yext) else: # 3D layer from mpl_toolkits.mplot3d import Axes3D if fig is None: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') else: ax = fig.gca() # get positions, reorganize to x,y,z vectors tgtpos = GetTargetPositions(src_nrn, tgt_layer, syn_type) if tgtpos: xpos, ypos, zpos = zip(*tgtpos[0]) ax.scatter3D(xpos, ypos, zpos, s=tgt_size, facecolor=tgt_color, edgecolor='none') ax.scatter3D(srcpos[:1], srcpos[1:2], srcpos[2:], s=src_size, facecolor=src_color, edgecolor='none', alpha=0.4, zorder=-10) plt.draw_if_interactive() return fig def _create_mask_patches(mask, periodic, extent, source_pos, face_color='yellow'): """Create Matplotlib Patch objects representing the mask""" # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest import matplotlib.pyplot as plt edge_color = 'black' alpha = 0.2 line_width = 2 mask_patches = [] if 'anchor' in mask: offs = np.array(mask['anchor']) else: offs = np.array([0., 0.]) if 'circular' in mask: r = mask['circular']['radius'] patch = plt.Circle(source_pos + offs, radius=r, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs, extent): patch = plt.Circle(pos, radius=r, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) elif 'doughnut' in mask: # Mmm... doughnut def make_doughnut_patch(pos, r_out, r_in, ec, fc, alpha): def make_circle(r): t = np.arange(0, np.pi * 2.0, 0.01) t = t.reshape((len(t), 1)) x = r * np.cos(t) y = r * np.sin(t) return np.hstack((x, y)) outside_verts = make_circle(r_out)[::-1] inside_verts = make_circle(r_in) codes = np.ones(len(inside_verts), dtype=mpath.Path.code_type) * mpath.Path.LINETO codes[0] = mpath.Path.MOVETO vertices = np.concatenate([outside_verts, inside_verts]) vertices += pos all_codes = np.concatenate((codes, codes)) path = mpath.Path(vertices, all_codes) return mpatches.PathPatch(path, fc=fc, ec=ec, alpha=alpha, lw=line_width) r_in = mask['doughnut']['inner_radius'] r_out = mask['doughnut']['outer_radius'] pos = source_pos + offs patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs, extent): patch = make_doughnut_patch(pos, r_in, r_out, edge_color, face_color, alpha) mask_patches.append(patch) elif 'rectangular' in mask: ll = np.array(mask['rectangular']['lower_left']) ur = np.array(mask['rectangular']['upper_right']) pos = source_pos + ll + offs if 'azimuth_angle' in mask['rectangular']: angle = mask['rectangular']['azimuth_angle'] angle_rad = angle * np.pi / 180 cs = np.cos([angle_rad])[0] sn = np.sin([angle_rad])[0] pos = [pos[0] * cs - pos[1] * sn, pos[0] * sn + pos[1] * cs] else: angle = 0.0 patch = plt.Rectangle(pos, ur[0] - ll[0], ur[1] - ll[1], angle=angle, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + ll + offs, extent): patch = plt.Rectangle(pos, ur[0] - ll[0], ur[1] - ll[1], angle=angle, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) elif 'elliptical' in mask: width = mask['elliptical']['major_axis'] height = mask['elliptical']['minor_axis'] if 'azimuth_angle' in mask['elliptical']: angle = mask['elliptical']['azimuth_angle'] else: angle = 0.0 if 'anchor' in mask['elliptical']: anchor = mask['elliptical']['anchor'] else: anchor = np.array([0., 0.]) patch = mpl.patches.Ellipse(source_pos + offs + anchor, width, height, angle=angle, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) if periodic: for pos in _shifted_positions(source_pos + offs + anchor, extent): patch = mpl.patches.Ellipse(pos, width, height, angle=angle, fc=face_color, ec=edge_color, alpha=alpha, lw=line_width) mask_patches.append(patch) else: raise ValueError('Mask type cannot be plotted with this version of PyNEST.') return mask_patches def PlotProbabilityParameter(source, parameter=None, mask=None, edges=[-0.5, 0.5, -0.5, 0.5], shape=[100, 100], ax=None, prob_cmap='Greens', mask_color='yellow'): """ Create a plot of the connection probability and/or mask. A probability plot is created based on a `Parameter` and a `source`. The `Parameter` should have a distance dependency. The `source` must be given as a `NodeCollection` with a single node ID. Optionally a `mask` can also be plotted. Parameters ---------- source : NodeCollection Single node ID `NodeCollection` to use as source. parameter : Parameter `Parameter` the probability is based on. mask : Dictionary Optional specification of a connection mask. Connections will only be made to nodes inside the mask. See :py:func:`.CreateMask` for options on how to specify the mask. edges : list/tuple List of four edges of the region to plot. The values are given as [x_min, x_max, y_min, y_max]. shape : list/tuple Number of `Parameter` values to calculate in each direction. ax : matplotlib.axes.AxesSubplot, A matplotlib axes instance to plot in. If none is given, a new one is created. """ # import pyplot here and not at toplevel to avoid preventing users # from changing matplotlib backend after importing nest import matplotlib.pyplot as plt if not HAVE_MPL: raise ImportError('Matplotlib could not be imported') if parameter is None and mask is None: raise ValueError('At least one of parameter or mask must be specified') if ax is None: fig, ax = plt.subplots() ax.set_xlim(*edges[:2]) ax.set_ylim(*edges[2:]) if parameter is not None: z = np.zeros(shape[::-1]) for i, x in enumerate(np.linspace(edges[0], edges[1], shape[0])): positions = [[x, y] for y in np.linspace(edges[2], edges[3], shape[1])] values = parameter.apply(source, positions) z[:, i] = np.array(values) img = ax.imshow(np.minimum(np.maximum(z, 0.0), 1.0), extent=edges, origin='lower', cmap=prob_cmap, vmin=0., vmax=1.) plt.colorbar(img, ax=ax, fraction=0.046, pad=0.04) if mask is not None: periodic = source.spatial['edge_wrap'] extent = source.spatial['extent'] source_pos = GetPosition(source) patches = _create_mask_patches(mask, periodic, extent, source_pos, face_color=mask_color) for patch in patches: patch.set_zorder(0.5) ax.add_patch(patch)
gpl-2.0
DmitryYurov/BornAgain
Examples/python/fitting/ex02_AdvancedExamples/find_background.py
2
2990
""" Fitting example: looking for background and scale factors. Real data contains some "unknown" background and scale factor. In the fit we are trying to find cylinder radius and height, scale and background factors. """ import numpy as np from matplotlib import pyplot as plt import bornagain as ba from bornagain import deg, angstrom, nm def get_sample(params): """ Build the sample representing cylinders on top of substrate without interference. """ radius = params["radius"] height = params["height"] m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0) m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8) m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8) cylinder_ff = ba.FormFactorCylinder(radius, height) cylinder = ba.Particle(m_particle, cylinder_ff) layout = ba.ParticleLayout() layout.addParticle(cylinder) air_layer = ba.Layer(m_air) air_layer.addLayout(layout) substrate_layer = ba.Layer(m_substrate, 0) multi_layer = ba.MultiLayer() multi_layer.addLayer(air_layer) multi_layer.addLayer(substrate_layer) return multi_layer def get_simulation(params): """ Create and return GISAXS simulation with beam and detector defined """ background = params["background"] scale = params["scale"] simulation = ba.GISASSimulation() simulation.setDetectorParameters(100, -1.0*deg, 1.0*deg, 100, 0.0*deg, 2.0*deg) simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg) simulation.setBeamIntensity(1e12*scale) simulation.setBackground(ba.ConstantBackground(background)) simulation.setSample(get_sample(params)) return simulation def create_real_data(): """ Generating "real" data by adding noise, background and scale to the simulated data. Cylinder radius is set to 5nm, cylinder height to 10nm. During the fit we will try to find cylinder height and radius and scale, background factors. """ params = {'radius': 5.0*nm, 'height': 10.0*nm, 'scale': 2.0, 'background': 1000} simulation = get_simulation(params) simulation.runSimulation() # retrieving simulated data in the form of numpy array return simulation.result().array() def run_fitting(): """ main function to run fitting """ real_data = create_real_data() fit_objective = ba.FitObjective() fit_objective.addSimulationAndData(get_simulation, real_data, 1.0) fit_objective.initPrint(10) fit_objective.initPlot(10) params = ba.Parameters() params.add("radius", 5.*nm, vary=False) params.add("height", 9.*nm, min=8.*nm, max=12.*nm) params.add("scale", 1.5, min=1.0, max=3.0) params.add("background", 200, min=100.0, max=2000.0, step=100.0) minimizer = ba.Minimizer() result = minimizer.minimize(fit_objective.evaluate, params) fit_objective.finalize(result) if __name__ == '__main__': run_fitting() plt.show()
gpl-3.0
kaichogami/scikit-learn
sklearn/tests/test_kernel_approximation.py
78
7586
import numpy as np from scipy.sparse import csr_matrix from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal, assert_raises from sklearn.utils.testing import assert_less_equal from sklearn.metrics.pairwise import kernel_metrics from sklearn.kernel_approximation import RBFSampler from sklearn.kernel_approximation import AdditiveChi2Sampler from sklearn.kernel_approximation import SkewedChi2Sampler from sklearn.kernel_approximation import Nystroem from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel # generate data rng = np.random.RandomState(0) X = rng.random_sample(size=(300, 50)) Y = rng.random_sample(size=(300, 50)) X /= X.sum(axis=1)[:, np.newaxis] Y /= Y.sum(axis=1)[:, np.newaxis] def test_additive_chi2_sampler(): # test that AdditiveChi2Sampler approximates kernel on random data # compute exact kernel # abbreviations for easier formula X_ = X[:, np.newaxis, :] Y_ = Y[np.newaxis, :, :] large_kernel = 2 * X_ * Y_ / (X_ + Y_) # reduce to n_samples_x x n_samples_y by summing over features kernel = (large_kernel.sum(axis=2)) # approximate kernel mapping transform = AdditiveChi2Sampler(sample_steps=3) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) X_sp_trans = transform.fit_transform(csr_matrix(X)) Y_sp_trans = transform.transform(csr_matrix(Y)) assert_array_equal(X_trans, X_sp_trans.A) assert_array_equal(Y_trans, Y_sp_trans.A) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) # test error on invalid sample_steps transform = AdditiveChi2Sampler(sample_steps=4) assert_raises(ValueError, transform.fit, X) # test that the sample interval is set correctly sample_steps_available = [1, 2, 3] for sample_steps in sample_steps_available: # test that the sample_interval is initialized correctly transform = AdditiveChi2Sampler(sample_steps=sample_steps) assert_equal(transform.sample_interval, None) # test that the sample_interval is changed in the fit method transform.fit(X) assert_not_equal(transform.sample_interval_, None) # test that the sample_interval is set correctly sample_interval = 0.3 transform = AdditiveChi2Sampler(sample_steps=4, sample_interval=sample_interval) assert_equal(transform.sample_interval, sample_interval) transform.fit(X) assert_equal(transform.sample_interval_, sample_interval) def test_skewed_chi2_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel c = 0.03 # abbreviations for easier formula X_c = (X + c)[:, np.newaxis, :] Y_c = (Y + c)[np.newaxis, :, :] # we do it in log-space in the hope that it's more stable # this array is n_samples_x x n_samples_y big x n_features log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) - np.log(X_c + Y_c)) # reduce to n_samples_x x n_samples_y by summing over features in log-space kernel = np.exp(log_kernel.sum(axis=2)) # approximate kernel mapping transform = SkewedChi2Sampler(skewedness=c, n_components=1000, random_state=42) X_trans = transform.fit_transform(X) Y_trans = transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) assert_array_almost_equal(kernel, kernel_approx, 1) # test error is raised on negative input Y_neg = Y.copy() Y_neg[0, 0] = -1 assert_raises(ValueError, transform.transform, Y_neg) def test_rbf_sampler(): # test that RBFSampler approximates kernel on random data # compute exact kernel gamma = 10. kernel = rbf_kernel(X, Y, gamma=gamma) # approximate kernel mapping rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42) X_trans = rbf_transform.fit_transform(X) Y_trans = rbf_transform.transform(Y) kernel_approx = np.dot(X_trans, Y_trans.T) error = kernel - kernel_approx assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased np.abs(error, out=error) assert_less_equal(np.max(error), 0.1) # nothing too far off assert_less_equal(np.mean(error), 0.05) # mean is fairly close def test_input_validation(): # Regression test: kernel approx. transformers should work on lists # No assertions; the old versions would simply crash X = [[1, 2], [3, 4], [5, 6]] AdditiveChi2Sampler().fit(X).transform(X) SkewedChi2Sampler().fit(X).transform(X) RBFSampler().fit(X).transform(X) X = csr_matrix(X) RBFSampler().fit(X).transform(X) def test_nystroem_approximation(): # some basic tests rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 4)) # With n_components = n_samples this is exact X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X) K = rbf_kernel(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) trans = Nystroem(n_components=2, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test callable kernel linear_kernel = lambda X, Y: np.dot(X, Y.T) trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) # test that available kernels fit and transform kernels_available = kernel_metrics() for kern in kernels_available: trans = Nystroem(n_components=2, kernel=kern, random_state=rnd) X_transformed = trans.fit(X).transform(X) assert_equal(X_transformed.shape, (X.shape[0], 2)) def test_nystroem_singular_kernel(): # test that nystroem works with singular kernel matrix rng = np.random.RandomState(0) X = rng.rand(10, 20) X = np.vstack([X] * 2) # duplicate samples gamma = 100 N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X) X_transformed = N.transform(X) K = rbf_kernel(X, gamma=gamma) assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T)) assert_true(np.all(np.isfinite(Y))) def test_nystroem_poly_kernel_params(): # Non-regression: Nystroem should pass other parameters beside gamma. rnd = np.random.RandomState(37) X = rnd.uniform(size=(10, 4)) K = polynomial_kernel(X, degree=3.1, coef0=.1) nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0], degree=3.1, coef0=.1) X_transformed = nystroem.fit_transform(X) assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K) def test_nystroem_callable(): # Test Nystroem on a callable. rnd = np.random.RandomState(42) n_samples = 10 X = rnd.uniform(size=(n_samples, 4)) def logging_histogram_kernel(x, y, log): """Histogram kernel that writes to a log.""" log.append(1) return np.minimum(x, y).sum() kernel_log = [] X = list(X) # test input validation Nystroem(kernel=logging_histogram_kernel, n_components=(n_samples - 1), kernel_params={'log': kernel_log}).fit(X) assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
bsd-3-clause
trungnt13/scikit-learn
sklearn/pipeline.py
162
21103
""" The :mod:`sklearn.pipeline` module implements utilities to build a composite estimator, as a chain of transforms and estimators. """ # Author: Edouard Duchesnay # Gael Varoquaux # Virgile Fritsch # Alexandre Gramfort # Lars Buitinck # Licence: BSD from collections import defaultdict import numpy as np from scipy import sparse from .base import BaseEstimator, TransformerMixin from .externals.joblib import Parallel, delayed from .externals import six from .utils import tosequence from .utils.metaestimators import if_delegate_has_method from .externals.six import iteritems __all__ = ['Pipeline', 'FeatureUnion'] class Pipeline(BaseEstimator): """Pipeline of transforms with a final estimator. Sequentially apply a list of transforms and a final estimator. Intermediate steps of the pipeline must be 'transforms', that is, they must implement fit and transform methods. The final estimator only needs to implement fit. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a '__', as in the example below. Read more in the :ref:`User Guide <pipeline>`. Parameters ---------- steps : list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. Attributes ---------- named_steps : dict Read-only attribute to access any step parameter by user given name. Keys are step names and values are steps parameters. Examples -------- >>> from sklearn import svm >>> from sklearn.datasets import samples_generator >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.feature_selection import f_regression >>> from sklearn.pipeline import Pipeline >>> # generate some data to play with >>> X, y = samples_generator.make_classification( ... n_informative=5, n_redundant=0, random_state=42) >>> # ANOVA SVM-C >>> anova_filter = SelectKBest(f_regression, k=5) >>> clf = svm.SVC(kernel='linear') >>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)]) >>> # You can set the parameters using the names issued >>> # For instance, fit using a k of 10 in the SelectKBest >>> # and a parameter 'C' of the svm >>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y) ... # doctest: +ELLIPSIS Pipeline(steps=[...]) >>> prediction = anova_svm.predict(X) >>> anova_svm.score(X, y) # doctest: +ELLIPSIS 0.77... >>> # getting the selected features chosen by anova_filter >>> anova_svm.named_steps['anova'].get_support() ... # doctest: +NORMALIZE_WHITESPACE array([ True, True, True, False, False, True, False, True, True, True, False, False, True, False, True, False, False, False, False, True], dtype=bool) """ # BaseEstimator interface def __init__(self, steps): names, estimators = zip(*steps) if len(dict(steps)) != len(steps): raise ValueError("Provided step names are not unique: %s" % (names,)) # shallow copy of steps self.steps = tosequence(steps) transforms = estimators[:-1] estimator = estimators[-1] for t in transforms: if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All intermediate steps of the chain should " "be transforms and implement fit and transform" " '%s' (type %s) doesn't)" % (t, type(t))) if not hasattr(estimator, "fit"): raise TypeError("Last step of chain should implement fit " "'%s' (type %s) doesn't)" % (estimator, type(estimator))) @property def _estimator_type(self): return self.steps[-1][1]._estimator_type def get_params(self, deep=True): if not deep: return super(Pipeline, self).get_params(deep=False) else: out = self.named_steps for name, step in six.iteritems(self.named_steps): for key, value in six.iteritems(step.get_params(deep=True)): out['%s__%s' % (name, key)] = value out.update(super(Pipeline, self).get_params(deep=False)) return out @property def named_steps(self): return dict(self.steps) @property def _final_estimator(self): return self.steps[-1][1] # Estimator interface def _pre_transform(self, X, y=None, **fit_params): fit_params_steps = dict((step, {}) for step, _ in self.steps) for pname, pval in six.iteritems(fit_params): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval Xt = X for name, transform in self.steps[:-1]: if hasattr(transform, "fit_transform"): Xt = transform.fit_transform(Xt, y, **fit_params_steps[name]) else: Xt = transform.fit(Xt, y, **fit_params_steps[name]) \ .transform(Xt) return Xt, fit_params_steps[self.steps[-1][0]] def fit(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) self.steps[-1][-1].fit(Xt, y, **fit_params) return self def fit_transform(self, X, y=None, **fit_params): """Fit all the transforms one after the other and transform the data, then use fit_transform on transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) if hasattr(self.steps[-1][-1], 'fit_transform'): return self.steps[-1][-1].fit_transform(Xt, y, **fit_params) else: return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict(self, X): """Applies transforms to the data, and the predict method of the final estimator. Valid only if the final estimator implements predict. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt) @if_delegate_has_method(delegate='_final_estimator') def fit_predict(self, X, y=None, **fit_params): """Applies fit_predict of last step in pipeline after transforms. Applies fit_transforms of a pipeline to the data, followed by the fit_predict method of the final estimator in the pipeline. Valid only if the final estimator implements fit_predict. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. """ Xt, fit_params = self._pre_transform(X, y, **fit_params) return self.steps[-1][-1].fit_predict(Xt, y, **fit_params) @if_delegate_has_method(delegate='_final_estimator') def predict_proba(self, X): """Applies transforms to the data, and the predict_proba method of the final estimator. Valid only if the final estimator implements predict_proba. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_proba(Xt) @if_delegate_has_method(delegate='_final_estimator') def decision_function(self, X): """Applies transforms to the data, and the decision_function method of the final estimator. Valid only if the final estimator implements decision_function. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].decision_function(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict_log_proba(self, X): """Applies transforms to the data, and the predict_log_proba method of the final estimator. Valid only if the final estimator implements predict_log_proba. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_log_proba(Xt) @if_delegate_has_method(delegate='_final_estimator') def transform(self, X): """Applies transforms to the data, and the transform method of the final estimator. Valid only if the final estimator implements transform. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. """ Xt = X for name, transform in self.steps: Xt = transform.transform(Xt) return Xt @if_delegate_has_method(delegate='_final_estimator') def inverse_transform(self, X): """Applies inverse transform to the data. Starts with the last step of the pipeline and applies ``inverse_transform`` in inverse order of the pipeline steps. Valid only if all steps of the pipeline implement inverse_transform. Parameters ---------- X : iterable Data to inverse transform. Must fulfill output requirements of the last step of the pipeline. """ if X.ndim == 1: X = X[None, :] Xt = X for name, step in self.steps[::-1]: Xt = step.inverse_transform(Xt) return Xt @if_delegate_has_method(delegate='_final_estimator') def score(self, X, y=None): """Applies transforms to the data, and the score method of the final estimator. Valid only if the final estimator implements score. Parameters ---------- X : iterable Data to score. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. """ Xt = X for name, transform in self.steps[:-1]: Xt = transform.transform(Xt) return self.steps[-1][-1].score(Xt, y) @property def classes_(self): return self.steps[-1][-1].classes_ @property def _pairwise(self): # check if first estimator expects pairwise input return getattr(self.steps[0][1], '_pairwise', False) def _name_estimators(estimators): """Generate names for estimators.""" names = [type(estimator).__name__.lower() for estimator in estimators] namecount = defaultdict(int) for est, name in zip(estimators, names): namecount[name] += 1 for k, v in list(six.iteritems(namecount)): if v == 1: del namecount[k] for i in reversed(range(len(estimators))): name = names[i] if name in namecount: names[i] += "-%d" % namecount[name] namecount[name] -= 1 return list(zip(names, estimators)) def make_pipeline(*steps): """Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, they will be given names automatically based on their types. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE Pipeline(steps=[('standardscaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('gaussiannb', GaussianNB())]) Returns ------- p : Pipeline """ return Pipeline(_name_estimators(steps)) def _fit_one_transformer(transformer, X, y): return transformer.fit(X, y) def _transform_one(transformer, name, X, transformer_weights): if transformer_weights is not None and name in transformer_weights: # if we have a weight for this transformer, muliply output return transformer.transform(X) * transformer_weights[name] return transformer.transform(X) def _fit_transform_one(transformer, name, X, y, transformer_weights, **fit_params): if transformer_weights is not None and name in transformer_weights: # if we have a weight for this transformer, muliply output if hasattr(transformer, 'fit_transform'): X_transformed = transformer.fit_transform(X, y, **fit_params) return X_transformed * transformer_weights[name], transformer else: X_transformed = transformer.fit(X, y, **fit_params).transform(X) return X_transformed * transformer_weights[name], transformer if hasattr(transformer, 'fit_transform'): X_transformed = transformer.fit_transform(X, y, **fit_params) return X_transformed, transformer else: X_transformed = transformer.fit(X, y, **fit_params).transform(X) return X_transformed, transformer class FeatureUnion(BaseEstimator, TransformerMixin): """Concatenates results of multiple transformer objects. This estimator applies a list of transformer objects in parallel to the input data, then concatenates the results. This is useful to combine several feature extraction mechanisms into a single transformer. Read more in the :ref:`User Guide <feature_union>`. Parameters ---------- transformer_list: list of (string, transformer) tuples List of transformer objects to be applied to the data. The first half of each tuple is the name of the transformer. n_jobs: int, optional Number of jobs to run in parallel (default 1). transformer_weights: dict, optional Multiplicative weights for features per transformer. Keys are transformer names, values the weights. """ def __init__(self, transformer_list, n_jobs=1, transformer_weights=None): self.transformer_list = transformer_list self.n_jobs = n_jobs self.transformer_weights = transformer_weights def get_feature_names(self): """Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform. """ feature_names = [] for name, trans in self.transformer_list: if not hasattr(trans, 'get_feature_names'): raise AttributeError("Transformer %s does not provide" " get_feature_names." % str(name)) feature_names.extend([name + "__" + f for f in trans.get_feature_names()]) return feature_names def fit(self, X, y=None): """Fit all transformers using X. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data, used to fit transformers. """ transformers = Parallel(n_jobs=self.n_jobs)( delayed(_fit_one_transformer)(trans, X, y) for name, trans in self.transformer_list) self._update_transformer_list(transformers) return self def fit_transform(self, X, y=None, **fit_params): """Fit all transformers using X, transform the data and concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ result = Parallel(n_jobs=self.n_jobs)( delayed(_fit_transform_one)(trans, name, X, y, self.transformer_weights, **fit_params) for name, trans in self.transformer_list) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def transform(self, X): """Transform X separately by each transformer, concatenate results. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ Xs = Parallel(n_jobs=self.n_jobs)( delayed(_transform_one)(trans, name, X, self.transformer_weights) for name, trans in self.transformer_list) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def get_params(self, deep=True): if not deep: return super(FeatureUnion, self).get_params(deep=False) else: out = dict(self.transformer_list) for name, trans in self.transformer_list: for key, value in iteritems(trans.get_params(deep=True)): out['%s__%s' % (name, key)] = value out.update(super(FeatureUnion, self).get_params(deep=False)) return out def _update_transformer_list(self, transformers): self.transformer_list[:] = [ (name, new) for ((name, old), new) in zip(self.transformer_list, transformers) ] # XXX it would be nice to have a keyword-only n_jobs argument to this function, # but that's not allowed in Python 2.x. def make_union(*transformers): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, n_components=None, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) Returns ------- f : FeatureUnion """ return FeatureUnion(_name_estimators(transformers))
bsd-3-clause
qiudebo/13learn
code/matplotlib/test_properties_layout.py
1
2420
#!/usr/bin/python # -*- coding: utf-8 -*- from matplotlib import pyplot as plt import matplotlib.patches as patches if __name__ == '__main__': # build a rectangle in axes coords left, width = .25, .5 bottom, height = .25, .5 right = left + width top = bottom + height fig = plt.figure() ax = fig.add_axes([0, 0, 1, 1]) # axes coordinates are 0,0 is bottom left and 1,1 is upper right p = patches.Rectangle( (left, bottom), width, height, fill=False, transform=ax.transAxes, clip_on=False ) ax.add_patch(p) ax.text(left, bottom, 'left top', horizontalalignment='left', verticalalignment='top', transform=ax.transAxes) ax.text(left, bottom, 'left bottom', horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes) ax.text(right, top, 'right bottom', horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes) ax.text(right, top, 'right top', horizontalalignment='right', verticalalignment='top', transform=ax.transAxes) ax.text(right, bottom, 'center top', horizontalalignment='center', verticalalignment='top', transform=ax.transAxes) ax.text(left, 0.5 * (bottom + top), 'right center', horizontalalignment='right', verticalalignment='center', rotation='vertical', transform=ax.transAxes) ax.text(left, 0.5 * (bottom + top), 'left center', horizontalalignment='left', verticalalignment='center', rotation='vertical', transform=ax.transAxes) ax.text(0.5 * (left + right), 0.5 * (bottom + top), 'middle', horizontalalignment='center', verticalalignment='center', fontsize=20, color='red', transform=ax.transAxes) ax.text(right, 0.5 * (bottom + top), 'centered', horizontalalignment='center', verticalalignment='center', rotation='vertical', transform=ax.transAxes) ax.text(left, top, 'rotated\nwith newlines', horizontalalignment='center', verticalalignment='center', rotation=45, transform=ax.transAxes) ax.set_axis_off() plt.show()
mit
dborzov/fredholm
fredholm/self-consistent.py
1
1695
from numpy import log, pi, arange, exp from scipy.optimize import brentq import matplotlib.pyplot as plot from matplotlib import rc import equation def diagram_sum(x, d): return 4.*pi/log(d**2 *2.*x) def diagram_sum_3body(x, d): point=equation.equation(3.*x,'2D',20.,0.1,d) point.solve() g3=point.g3 del point return 4.*pi/log(d**2 *2.*x) + g3 drange=arange(0.6,5.,0.05) xx=[d for d in drange] ee=[1/d**2 for d in drange] yy=[brentq(lambda mu:mu - diagram_sum(mu,d),(0.5+0.01)/(d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-3) for d in drange] drange=arange(0.6,5.6,1.0) zx=[d for d in drange] ze=[1/d**2 for d in drange] zz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange] drange=arange(0.7,1.5,0.1) wx=[d for d in drange] we=[1/d**2 for d in drange] wz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange] drange=arange(0.6,0.7,0.025) fx=[d for d in drange] fe=[1/d**2 for d in drange] fz=[brentq(lambda mu:mu - diagram_sum_3body(mu,d),(1+0.01)/(2.*d**2),0.5/d**2 *exp(8 * pi * d**2), xtol=1e-2) for d in drange] plot.plot(xx,yy) plot.plot(zx,zz,'o') plot.plot(wx,wz,'o') plot.plot(fx,fz,'o') plot.xlabel('d, bound state size parameter') plot.ylabel(r'$\mu$, self-consistent potential') plot.savefig('results/potential_self-consistent.pdf') plot.close() plot.plot(ee,yy) plot.plot(ze,zz,'o') plot.plot(we,wz,'o') plot.plot(fe,fz,'o') rc('text', usetex=True) plot.xlabel(r'$\frac{1}{d^2}$, bound state energy') plot.ylabel(r'$\mu$, self-consistent potential') plot.savefig('results/potential_energy_parameter.pdf')
mit
mikebenfield/scikit-learn
sklearn/gaussian_process/kernels.py
31
67169
"""Kernels for Gaussian process regression and classification. The kernels in this module allow kernel-engineering, i.e., they can be combined via the "+" and "*" operators or be exponentiated with a scalar via "**". These sum and product expressions can also contain scalar values, which are automatically converted to a constant kernel. All kernels allow (analytic) gradient-based hyperparameter optimization. The space of hyperparameters can be specified by giving lower und upper boundaries for the value of each hyperparameter (the search space is thus rectangular). Instead of specifying bounds, hyperparameters can also be declared to be "fixed", which causes these hyperparameters to be excluded from optimization. """ # Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD 3 clause # Note: this module is strongly inspired by the kernel module of the george # package. from abc import ABCMeta, abstractmethod from collections import namedtuple import math import numpy as np from scipy.special import kv, gamma from scipy.spatial.distance import pdist, cdist, squareform from ..metrics.pairwise import pairwise_kernels from ..externals import six from ..base import clone from sklearn.externals.funcsigs import signature def _check_length_scale(X, length_scale): length_scale = np.squeeze(length_scale).astype(float) if np.ndim(length_scale) > 1: raise ValueError("length_scale cannot be of dimension greater than 1") if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]: raise ValueError("Anisotropic kernel must have the same number of " "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1])) return length_scale class Hyperparameter(namedtuple('Hyperparameter', ('name', 'value_type', 'bounds', 'n_elements', 'fixed'))): """A kernel hyperparameter's specification in form of a namedtuple. .. versionadded:: 0.18 Attributes ---------- name : string The name of the hyperparameter. Note that a kernel using a hyperparameter with name "x" must have the attributes self.x and self.x_bounds value_type : string The type of the hyperparameter. Currently, only "numeric" hyperparameters are supported. bounds : pair of floats >= 0 or "fixed" The lower and upper bound on the parameter. If n_elements>1, a pair of 1d array with n_elements each may be given alternatively. If the string "fixed" is passed as bounds, the hyperparameter's value cannot be changed. n_elements : int, default=1 The number of elements of the hyperparameter value. Defaults to 1, which corresponds to a scalar hyperparameter. n_elements > 1 corresponds to a hyperparameter which is vector-valued, such as, e.g., anisotropic length-scales. fixed : bool, default: None Whether the value of this hyperparameter is fixed, i.e., cannot be changed during hyperparameter tuning. If None is passed, the "fixed" is derived based on the given bounds. """ # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __init__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None): if not isinstance(bounds, six.string_types) or bounds != "fixed": bounds = np.atleast_2d(bounds) if n_elements > 1: # vector-valued parameter if bounds.shape[0] == 1: bounds = np.repeat(bounds, n_elements, 0) elif bounds.shape[0] != n_elements: raise ValueError("Bounds on %s should have either 1 or " "%d dimensions. Given are %d" % (name, n_elements, bounds.shape[0])) if fixed is None: fixed = isinstance(bounds, six.string_types) and bounds == "fixed" return super(Hyperparameter, cls).__new__( cls, name, value_type, bounds, n_elements, fixed) # This is mainly a testing utility to check that two hyperparameters # are equal. def __eq__(self, other): return (self.name == other.name and self.value_type == other.value_type and np.all(self.bounds == other.bounds) and self.n_elements == other.n_elements and self.fixed == other.fixed) class Kernel(six.with_metaclass(ABCMeta)): """Base class for all kernels. .. versionadded:: 0.18 """ def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ params = dict() # introspect the constructor arguments to find the model parameters # to represent cls = self.__class__ init = getattr(cls.__init__, 'deprecated_original', cls.__init__) init_sign = signature(init) args, varargs = [], [] for parameter in init_sign.parameters.values(): if (parameter.kind != parameter.VAR_KEYWORD and parameter.name != 'self'): args.append(parameter.name) if parameter.kind == parameter.VAR_POSITIONAL: varargs.append(parameter.name) if len(varargs) != 0: raise RuntimeError("scikit-learn kernels should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s doesn't follow this convention." % (cls, )) for arg in args: params[arg] = getattr(self, arg, None) return params def set_params(self, **params): """Set the parameters of this kernel. The method works on simple kernels as well as on nested kernels. The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in six.iteritems(params): split = key.split('__', 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError('Invalid parameter %s for kernel %s. ' 'Check the list of available parameters ' 'with `kernel.get_params().keys()`.' % (name, self)) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError('Invalid parameter %s for kernel %s. ' 'Check the list of available parameters ' 'with `kernel.get_params().keys()`.' % (key, self.__class__.__name__)) setattr(self, key, value) return self def clone_with_theta(self, theta): """Returns a clone of self with given hyperparameters theta. """ cloned = clone(self) cloned.theta = theta return cloned @property def n_dims(self): """Returns the number of non-fixed hyperparameters of the kernel.""" return self.theta.shape[0] @property def hyperparameters(self): """Returns a list of all hyperparameter specifications.""" r = [] for attr in dir(self): if attr.startswith("hyperparameter_"): r.append(getattr(self, attr)) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ theta = [] params = self.get_params() for hyperparameter in self.hyperparameters: if not hyperparameter.fixed: theta.append(params[hyperparameter.name]) if len(theta) > 0: return np.log(np.hstack(theta)) else: return np.array([]) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ params = self.get_params() i = 0 for hyperparameter in self.hyperparameters: if hyperparameter.fixed: continue if hyperparameter.n_elements > 1: # vector-valued parameter params[hyperparameter.name] = np.exp( theta[i:i + hyperparameter.n_elements]) i += hyperparameter.n_elements else: params[hyperparameter.name] = np.exp(theta[i]) i += 1 if i != len(theta): raise ValueError("theta has not the correct number of entries." " Should be %d; given are %d" % (i, len(theta))) self.set_params(**params) @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ bounds = [] for hyperparameter in self.hyperparameters: if not hyperparameter.fixed: bounds.append(hyperparameter.bounds) if len(bounds) > 0: return np.log(np.vstack(bounds)) else: return np.array([]) def __add__(self, b): if not isinstance(b, Kernel): return Sum(self, ConstantKernel(b)) return Sum(self, b) def __radd__(self, b): if not isinstance(b, Kernel): return Sum(ConstantKernel(b), self) return Sum(b, self) def __mul__(self, b): if not isinstance(b, Kernel): return Product(self, ConstantKernel(b)) return Product(self, b) def __rmul__(self, b): if not isinstance(b, Kernel): return Product(ConstantKernel(b), self) return Product(b, self) def __pow__(self, b): return Exponentiation(self, b) def __eq__(self, b): if type(self) != type(b): return False params_a = self.get_params() params_b = b.get_params() for key in set(list(params_a.keys()) + list(params_b.keys())): if np.any(params_a.get(key, None) != params_b.get(key, None)): return False return True def __repr__(self): return "{0}({1})".format(self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta))) @abstractmethod def __call__(self, X, Y=None, eval_gradient=False): """Evaluate the kernel.""" @abstractmethod def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ @abstractmethod def is_stationary(self): """Returns whether the kernel is stationary. """ class NormalizedKernelMixin(object): """Mixin for kernels which are normalized: k(X, X)=1. .. versionadded:: 0.18 """ def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return np.ones(X.shape[0]) class StationaryKernelMixin(object): """Mixin for kernels which are stationary: k(X, Y)= f(X-Y). .. versionadded:: 0.18 """ def is_stationary(self): """Returns whether the kernel is stationary. """ return True class CompoundKernel(Kernel): """Kernel which is composed of a set of other kernels. .. versionadded:: 0.18 """ def __init__(self, kernels): self.kernels = kernels def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return dict(kernels=self.kernels) @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return np.hstack([kernel.theta for kernel in self.kernels]) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ k_dims = self.k1.n_dims for i, kernel in enumerate(self.kernels): kernel.theta = theta[i * k_dims:(i + 1) * k_dims] @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ return np.vstack([kernel.bounds for kernel in self.kernels]) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Note that this compound kernel returns the results of all simple kernel stacked along an additional axis. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y, n_kernels) Kernel k(X, Y) K_gradient : array, shape (n_samples_X, n_samples_X, n_dims, n_kernels) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K = [] K_grad = [] for kernel in self.kernels: K_single, K_grad_single = kernel(X, Y, eval_gradient) K.append(K_single) K_grad.append(K_grad_single[..., np.newaxis]) return np.dstack(K), np.concatenate(K_grad, 3) else: return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels]) def __eq__(self, b): if type(self) != type(b) or len(self.kernels) != len(b.kernels): return False return np.all([self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))]) def is_stationary(self): """Returns whether the kernel is stationary. """ return np.all([kernel.is_stationary() for kernel in self.kernels]) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X, n_kernels) Diagonal of kernel k(X, X) """ return np.vstack([kernel.diag(X) for kernel in self.kernels]).T class KernelOperator(Kernel): """Base class for all kernel operators. .. versionadded:: 0.18 """ def __init__(self, k1, k2): self.k1 = k1 self.k2 = k2 def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ params = dict(k1=self.k1, k2=self.k2) if deep: deep_items = self.k1.get_params().items() params.update(('k1__' + k, val) for k, val in deep_items) deep_items = self.k2.get_params().items() params.update(('k2__' + k, val) for k, val in deep_items) return params @property def hyperparameters(self): """Returns a list of all hyperparameter.""" r = [] for hyperparameter in self.k1.hyperparameters: r.append(Hyperparameter("k1__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) for hyperparameter in self.k2.hyperparameters: r.append(Hyperparameter("k2__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return np.append(self.k1.theta, self.k2.theta) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ k1_dims = self.k1.n_dims self.k1.theta = theta[:k1_dims] self.k2.theta = theta[k1_dims:] @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ if self.k1.bounds.size == 0: return self.k2.bounds if self.k2.bounds.size == 0: return self.k1.bounds return np.vstack((self.k1.bounds, self.k2.bounds)) def __eq__(self, b): if type(self) != type(b): return False return (self.k1 == b.k1 and self.k2 == b.k2) \ or (self.k1 == b.k2 and self.k2 == b.k1) def is_stationary(self): """Returns whether the kernel is stationary. """ return self.k1.is_stationary() and self.k2.is_stationary() class Sum(KernelOperator): """Sum-kernel k1 + k2 of two kernels k1 and k2. The resulting kernel is defined as k_sum(X, Y) = k1(X, Y) + k2(X, Y) .. versionadded:: 0.18 Parameters ---------- k1 : Kernel object The first base-kernel of the sum-kernel k2 : Kernel object The second base-kernel of the sum-kernel """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 + K2, np.dstack((K1_gradient, K2_gradient)) else: return self.k1(X, Y) + self.k2(X, Y) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.k1.diag(X) + self.k2.diag(X) def __repr__(self): return "{0} + {1}".format(self.k1, self.k2) class Product(KernelOperator): """Product-kernel k1 * k2 of two kernels k1 and k2. The resulting kernel is defined as k_prod(X, Y) = k1(X, Y) * k2(X, Y) .. versionadded:: 0.18 Parameters ---------- k1 : Kernel object The first base-kernel of the product-kernel k2 : Kernel object The second base-kernel of the product-kernel """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis])) else: return self.k1(X, Y) * self.k2(X, Y) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.k1.diag(X) * self.k2.diag(X) def __repr__(self): return "{0} * {1}".format(self.k1, self.k2) class Exponentiation(Kernel): """Exponentiate kernel by given exponent. The resulting kernel is defined as k_exp(X, Y) = k(X, Y) ** exponent .. versionadded:: 0.18 Parameters ---------- kernel : Kernel object The base kernel exponent : float The exponent for the base kernel """ def __init__(self, kernel, exponent): self.kernel = kernel self.exponent = exponent def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ params = dict(kernel=self.kernel, exponent=self.exponent) if deep: deep_items = self.kernel.get_params().items() params.update(('kernel__' + k, val) for k, val in deep_items) return params @property def hyperparameters(self): """Returns a list of all hyperparameter.""" r = [] for hyperparameter in self.kernel.hyperparameters: r.append(Hyperparameter("kernel__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements)) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return self.kernel.theta @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array, shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ self.kernel.theta = theta @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array, shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ return self.kernel.bounds def __eq__(self, b): if type(self) != type(b): return False return (self.kernel == b.kernel and self.exponent == b.exponent) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ if eval_gradient: K, K_gradient = self.kernel(X, Y, eval_gradient=True) K_gradient *= \ self.exponent * K[:, :, np.newaxis] ** (self.exponent - 1) return K ** self.exponent, K_gradient else: K = self.kernel(X, Y, eval_gradient=False) return K ** self.exponent def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.kernel.diag(X) ** self.exponent def __repr__(self): return "{0} ** {1}".format(self.kernel, self.exponent) def is_stationary(self): """Returns whether the kernel is stationary. """ return self.kernel.is_stationary() class ConstantKernel(StationaryKernelMixin, Kernel): """Constant kernel. Can be used as part of a product-kernel where it scales the magnitude of the other factor (kernel) or as part of a sum-kernel, where it modifies the mean of the Gaussian process. k(x_1, x_2) = constant_value for all x_1, x_2 .. versionadded:: 0.18 Parameters ---------- constant_value : float, default: 1.0 The constant value which defines the covariance: k(x_1, x_2) = constant_value constant_value_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on constant_value """ def __init__(self, constant_value=1.0, constant_value_bounds=(1e-5, 1e5)): self.constant_value = constant_value self.constant_value_bounds = constant_value_bounds @property def hyperparameter_constant_value(self): return Hyperparameter( "constant_value", "numeric", self.constant_value_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: Y = X elif eval_gradient: raise ValueError("Gradient can only be evaluated when Y is None.") K = self.constant_value * np.ones((X.shape[0], Y.shape[0])) if eval_gradient: if not self.hyperparameter_constant_value.fixed: return (K, self.constant_value * np.ones((X.shape[0], X.shape[0], 1))) else: return K, np.empty((X.shape[0], X.shape[0], 0)) else: return K def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.constant_value * np.ones(X.shape[0]) def __repr__(self): return "{0:.3g}**2".format(np.sqrt(self.constant_value)) class WhiteKernel(StationaryKernelMixin, Kernel): """White kernel. The main use-case of this kernel is as part of a sum-kernel where it explains the noise-component of the signal. Tuning its parameter corresponds to estimating the noise-level. k(x_1, x_2) = noise_level if x_1 == x_2 else 0 .. versionadded:: 0.18 Parameters ---------- noise_level : float, default: 1.0 Parameter controlling the noise level noise_level_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on noise_level """ def __init__(self, noise_level=1.0, noise_level_bounds=(1e-5, 1e5)): self.noise_level = noise_level self.noise_level_bounds = noise_level_bounds @property def hyperparameter_noise_level(self): return Hyperparameter( "noise_level", "numeric", self.noise_level_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is not None and eval_gradient: raise ValueError("Gradient can only be evaluated when Y is None.") if Y is None: K = self.noise_level * np.eye(X.shape[0]) if eval_gradient: if not self.hyperparameter_noise_level.fixed: return (K, self.noise_level * np.eye(X.shape[0])[:, :, np.newaxis]) else: return K, np.empty((X.shape[0], X.shape[0], 0)) else: return K else: return np.zeros((X.shape[0], Y.shape[0])) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.noise_level * np.ones(X.shape[0]) def __repr__(self): return "{0}(noise_level={1:.3g})".format(self.__class__.__name__, self.noise_level) class RBF(StationaryKernelMixin, NormalizedKernelMixin, Kernel): """Radial-basis function kernel (aka squared-exponential kernel). The RBF kernel is a stationary kernel. It is also known as the "squared exponential" kernel. It is parameterized by a length-scale parameter length_scale>0, which can either be a scalar (isotropic variant of the kernel) or a vector with the same number of dimensions as the inputs X (anisotropic variant of the kernel). The kernel is given by: k(x_i, x_j) = exp(-1 / 2 d(x_i / length_scale, x_j / length_scale)^2) This kernel is infinitely differentiable, which implies that GPs with this kernel as covariance function have mean square derivatives of all orders, and are thus very smooth. .. versionadded:: 0.18 Parameters ----------- length_scale : float or array with shape (n_features,), default: 1.0 The length scale of the kernel. If a float, an isotropic kernel is used. If an array, an anisotropic kernel is used where each dimension of l defines the length-scale of the respective feature dimension. length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale """ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5)): self.length_scale = length_scale self.length_scale_bounds = length_scale_bounds @property def anisotropic(self): return np.iterable(self.length_scale) and len(self.length_scale) > 1 @property def hyperparameter_length_scale(self): if self.anisotropic: return Hyperparameter("length_scale", "numeric", self.length_scale_bounds, len(self.length_scale)) return Hyperparameter( "length_scale", "numeric", self.length_scale_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) length_scale = _check_length_scale(X, self.length_scale) if Y is None: dists = pdist(X / length_scale, metric='sqeuclidean') K = np.exp(-.5 * dists) # convert from upper-triangular matrix to square matrix K = squareform(K) np.fill_diagonal(K, 1) else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X / length_scale, Y / length_scale, metric='sqeuclidean') K = np.exp(-.5 * dists) if eval_gradient: if self.hyperparameter_length_scale.fixed: # Hyperparameter l kept fixed return K, np.empty((X.shape[0], X.shape[0], 0)) elif not self.anisotropic or length_scale.shape[0] == 1: K_gradient = \ (K * squareform(dists))[:, :, np.newaxis] return K, K_gradient elif self.anisotropic: # We need to recompute the pairwise dimension-wise distances K_gradient = (X[:, np.newaxis, :] - X[np.newaxis, :, :]) ** 2 \ / (length_scale ** 2) K_gradient *= K[..., np.newaxis] return K, K_gradient else: return K def __repr__(self): if self.anisotropic: return "{0}(length_scale=[{1}])".format( self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.length_scale))) else: # isotropic return "{0}(length_scale={1:.3g})".format( self.__class__.__name__, np.ravel(self.length_scale)[0]) class Matern(RBF): """ Matern kernel. The class of Matern kernels is a generalization of the RBF and the absolute exponential kernel parameterized by an additional parameter nu. The smaller nu, the less smooth the approximated function is. For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5 to the absolute exponential kernel. Important intermediate values are nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable functions). See Rasmussen and Williams 2006, pp84 for details regarding the different variants of the Matern kernel. .. versionadded:: 0.18 Parameters ----------- length_scale : float or array with shape (n_features,), default: 1.0 The length scale of the kernel. If a float, an isotropic kernel is used. If an array, an anisotropic kernel is used where each dimension of l defines the length-scale of the respective feature dimension. length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale nu: float, default: 1.5 The parameter nu controlling the smoothness of the learned function. The smaller nu, the less smooth the approximated function is. For nu=inf, the kernel becomes equivalent to the RBF kernel and for nu=0.5 to the absolute exponential kernel. Important intermediate values are nu=1.5 (once differentiable functions) and nu=2.5 (twice differentiable functions). Note that values of nu not in [0.5, 1.5, 2.5, inf] incur a considerably higher computational cost (appr. 10 times higher) since they require to evaluate the modified Bessel function. Furthermore, in contrast to l, nu is kept fixed to its initial value and not optimized. """ def __init__(self, length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5): super(Matern, self).__init__(length_scale, length_scale_bounds) self.nu = nu def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) length_scale = _check_length_scale(X, self.length_scale) if Y is None: dists = pdist(X / length_scale, metric='euclidean') else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X / length_scale, Y / length_scale, metric='euclidean') if self.nu == 0.5: K = np.exp(-dists) elif self.nu == 1.5: K = dists * math.sqrt(3) K = (1. + K) * np.exp(-K) elif self.nu == 2.5: K = dists * math.sqrt(5) K = (1. + K + K ** 2 / 3.0) * np.exp(-K) else: # general case; expensive to evaluate K = dists K[K == 0.0] += np.finfo(float).eps # strict zeros result in nan tmp = (math.sqrt(2 * self.nu) * K) K.fill((2 ** (1. - self.nu)) / gamma(self.nu)) K *= tmp ** self.nu K *= kv(self.nu, tmp) if Y is None: # convert from upper-triangular matrix to square matrix K = squareform(K) np.fill_diagonal(K, 1) if eval_gradient: if self.hyperparameter_length_scale.fixed: # Hyperparameter l kept fixed K_gradient = np.empty((X.shape[0], X.shape[0], 0)) return K, K_gradient # We need to recompute the pairwise dimension-wise distances if self.anisotropic: D = (X[:, np.newaxis, :] - X[np.newaxis, :, :])**2 \ / (length_scale ** 2) else: D = squareform(dists**2)[:, :, np.newaxis] if self.nu == 0.5: K_gradient = K[..., np.newaxis] * D \ / np.sqrt(D.sum(2))[:, :, np.newaxis] K_gradient[~np.isfinite(K_gradient)] = 0 elif self.nu == 1.5: K_gradient = \ 3 * D * np.exp(-np.sqrt(3 * D.sum(-1)))[..., np.newaxis] elif self.nu == 2.5: tmp = np.sqrt(5 * D.sum(-1))[..., np.newaxis] K_gradient = 5.0 / 3.0 * D * (tmp + 1) * np.exp(-tmp) else: # approximate gradient numerically def f(theta): # helper function return self.clone_with_theta(theta)(X, Y) return K, _approx_fprime(self.theta, f, 1e-10) if not self.anisotropic: return K, K_gradient[:, :].sum(-1)[:, :, np.newaxis] else: return K, K_gradient else: return K def __repr__(self): if self.anisotropic: return "{0}(length_scale=[{1}], nu={2:.3g})".format( self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.length_scale)), self.nu) else: return "{0}(length_scale={1:.3g}, nu={2:.3g})".format( self.__class__.__name__, np.ravel(self.length_scale)[0], self.nu) class RationalQuadratic(StationaryKernelMixin, NormalizedKernelMixin, Kernel): """Rational Quadratic kernel. The RationalQuadratic kernel can be seen as a scale mixture (an infinite sum) of RBF kernels with different characteristic length-scales. It is parameterized by a length-scale parameter length_scale>0 and a scale mixture parameter alpha>0. Only the isotropic variant where length_scale is a scalar is supported at the moment. The kernel given by: k(x_i, x_j) = (1 + d(x_i, x_j)^2 / (2*alpha * length_scale^2))^-alpha .. versionadded:: 0.18 Parameters ---------- length_scale : float > 0, default: 1.0 The length scale of the kernel. alpha : float > 0, default: 1.0 Scale mixture parameter length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale alpha_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on alpha """ def __init__(self, length_scale=1.0, alpha=1.0, length_scale_bounds=(1e-5, 1e5), alpha_bounds=(1e-5, 1e5)): self.length_scale = length_scale self.alpha = alpha self.length_scale_bounds = length_scale_bounds self.alpha_bounds = alpha_bounds @property def hyperparameter_length_scale(self): return Hyperparameter( "length_scale", "numeric", self.length_scale_bounds) @property def hyperparameter_alpha(self): return Hyperparameter("alpha", "numeric", self.alpha_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: dists = squareform(pdist(X, metric='sqeuclidean')) tmp = dists / (2 * self.alpha * self.length_scale ** 2) base = (1 + tmp) K = base ** -self.alpha np.fill_diagonal(K, 1) else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X, Y, metric='sqeuclidean') K = (1 + dists / (2 * self.alpha * self.length_scale ** 2)) \ ** -self.alpha if eval_gradient: # gradient with respect to length_scale if not self.hyperparameter_length_scale.fixed: length_scale_gradient = \ dists * K / (self.length_scale ** 2 * base) length_scale_gradient = length_scale_gradient[:, :, np.newaxis] else: # l is kept fixed length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) # gradient with respect to alpha if not self.hyperparameter_alpha.fixed: alpha_gradient = \ K * (-self.alpha * np.log(base) + dists / (2 * self.length_scale ** 2 * base)) alpha_gradient = alpha_gradient[:, :, np.newaxis] else: # alpha is kept fixed alpha_gradient = np.empty((K.shape[0], K.shape[1], 0)) return K, np.dstack((alpha_gradient, length_scale_gradient)) else: return K def __repr__(self): return "{0}(alpha={1:.3g}, length_scale={2:.3g})".format( self.__class__.__name__, self.alpha, self.length_scale) class ExpSineSquared(StationaryKernelMixin, NormalizedKernelMixin, Kernel): """Exp-Sine-Squared kernel. The ExpSineSquared kernel allows modeling periodic functions. It is parameterized by a length-scale parameter length_scale>0 and a periodicity parameter periodicity>0. Only the isotropic variant where l is a scalar is supported at the moment. The kernel given by: k(x_i, x_j) = exp(-2 sin(\pi / periodicity * d(x_i, x_j)) / length_scale)^2 .. versionadded:: 0.18 Parameters ---------- length_scale : float > 0, default: 1.0 The length scale of the kernel. periodicity : float > 0, default: 1.0 The periodicity of the kernel. length_scale_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on length_scale periodicity_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on periodicity """ def __init__(self, length_scale=1.0, periodicity=1.0, length_scale_bounds=(1e-5, 1e5), periodicity_bounds=(1e-5, 1e5)): self.length_scale = length_scale self.periodicity = periodicity self.length_scale_bounds = length_scale_bounds self.periodicity_bounds = periodicity_bounds @property def hyperparameter_length_scale(self): return Hyperparameter( "length_scale", "numeric", self.length_scale_bounds) @property def hyperparameter_periodicity(self): return Hyperparameter( "periodicity", "numeric", self.periodicity_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: dists = squareform(pdist(X, metric='euclidean')) arg = np.pi * dists / self.periodicity sin_of_arg = np.sin(arg) K = np.exp(- 2 * (sin_of_arg / self.length_scale) ** 2) else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") dists = cdist(X, Y, metric='euclidean') K = np.exp(- 2 * (np.sin(np.pi / self.periodicity * dists) / self.length_scale) ** 2) if eval_gradient: cos_of_arg = np.cos(arg) # gradient with respect to length_scale if not self.hyperparameter_length_scale.fixed: length_scale_gradient = \ 4 / self.length_scale**2 * sin_of_arg**2 * K length_scale_gradient = length_scale_gradient[:, :, np.newaxis] else: # length_scale is kept fixed length_scale_gradient = np.empty((K.shape[0], K.shape[1], 0)) # gradient with respect to p if not self.hyperparameter_periodicity.fixed: periodicity_gradient = \ 4 * arg / self.length_scale**2 * cos_of_arg \ * sin_of_arg * K periodicity_gradient = periodicity_gradient[:, :, np.newaxis] else: # p is kept fixed periodicity_gradient = np.empty((K.shape[0], K.shape[1], 0)) return K, np.dstack((length_scale_gradient, periodicity_gradient)) else: return K def __repr__(self): return "{0}(length_scale={1:.3g}, periodicity={2:.3g})".format( self.__class__.__name__, self.length_scale, self.periodicity) class DotProduct(Kernel): """Dot-Product kernel. The DotProduct kernel is non-stationary and can be obtained from linear regression by putting N(0, 1) priors on the coefficients of x_d (d = 1, . . . , D) and a prior of N(0, \sigma_0^2) on the bias. The DotProduct kernel is invariant to a rotation of the coordinates about the origin, but not translations. It is parameterized by a parameter sigma_0^2. For sigma_0^2 =0, the kernel is called the homogeneous linear kernel, otherwise it is inhomogeneous. The kernel is given by k(x_i, x_j) = sigma_0 ^ 2 + x_i \cdot x_j The DotProduct kernel is commonly combined with exponentiation. .. versionadded:: 0.18 Parameters ---------- sigma_0 : float >= 0, default: 1.0 Parameter controlling the inhomogenity of the kernel. If sigma_0=0, the kernel is homogenous. sigma_0_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on l """ def __init__(self, sigma_0=1.0, sigma_0_bounds=(1e-5, 1e5)): self.sigma_0 = sigma_0 self.sigma_0_bounds = sigma_0_bounds @property def hyperparameter_sigma_0(self): return Hyperparameter("sigma_0", "numeric", self.sigma_0_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ X = np.atleast_2d(X) if Y is None: K = np.inner(X, X) + self.sigma_0 ** 2 else: if eval_gradient: raise ValueError( "Gradient can only be evaluated when Y is None.") K = np.inner(X, Y) + self.sigma_0 ** 2 if eval_gradient: if not self.hyperparameter_sigma_0.fixed: K_gradient = np.empty((K.shape[0], K.shape[1], 1)) K_gradient[..., 0] = 2 * self.sigma_0 ** 2 return K, K_gradient else: return K, np.empty((X.shape[0], X.shape[0], 0)) else: return K def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ return np.einsum('ij,ij->i', X, X) + self.sigma_0 ** 2 def is_stationary(self): """Returns whether the kernel is stationary. """ return False def __repr__(self): return "{0}(sigma_0={1:.3g})".format( self.__class__.__name__, self.sigma_0) # adapted from scipy/optimize/optimize.py for functions with 2d output def _approx_fprime(xk, f, epsilon, args=()): f0 = f(*((xk,) + args)) grad = np.zeros((f0.shape[0], f0.shape[1], len(xk)), float) ei = np.zeros((len(xk), ), float) for k in range(len(xk)): ei[k] = 1.0 d = epsilon * ei grad[:, :, k] = (f(*((xk + d,) + args)) - f0) / d[k] ei[k] = 0.0 return grad class PairwiseKernel(Kernel): """Wrapper for kernels in sklearn.metrics.pairwise. A thin wrapper around the functionality of the kernels in sklearn.metrics.pairwise. Note: Evaluation of eval_gradient is not analytic but numeric and all kernels support only isotropic distances. The parameter gamma is considered to be a hyperparameter and may be optimized. The other kernel parameters are set directly at initialization and are kept fixed. .. versionadded:: 0.18 Parameters ---------- gamma: float >= 0, default: 1.0 Parameter gamma of the pairwise kernel specified by metric gamma_bounds : pair of floats >= 0, default: (1e-5, 1e5) The lower and upper bound on gamma metric : string, or callable, default: "linear" The metric to use when calculating kernel between instances in a feature array. If metric is a string, it must be one of the metrics in pairwise.PAIRWISE_KERNEL_FUNCTIONS. If metric is "precomputed", X is assumed to be a kernel matrix. Alternatively, if metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays from X as input and return a value indicating the distance between them. pairwise_kernels_kwargs : dict, default: None All entries of this dict (if any) are passed as keyword arguments to the pairwise kernel function. """ def __init__(self, gamma=1.0, gamma_bounds=(1e-5, 1e5), metric="linear", pairwise_kernels_kwargs=None): self.gamma = gamma self.gamma_bounds = gamma_bounds self.metric = metric self.pairwise_kernels_kwargs = pairwise_kernels_kwargs @property def hyperparameter_gamma(self): return Hyperparameter("gamma", "numeric", self.gamma_bounds) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Y : array, shape (n_samples_Y, n_features), (optional, default=None) Right argument of the returned kernel k(X, Y). If None, k(X, X) if evaluated instead. eval_gradient : bool (optional, default=False) Determines whether the gradient with respect to the kernel hyperparameter is determined. Only supported when Y is None. Returns ------- K : array, shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : array (opt.), shape (n_samples_X, n_samples_X, n_dims) The gradient of the kernel k(X, X) with respect to the hyperparameter of the kernel. Only returned when eval_gradient is True. """ pairwise_kernels_kwargs = self.pairwise_kernels_kwargs if self.pairwise_kernels_kwargs is None: pairwise_kernels_kwargs = {} X = np.atleast_2d(X) K = pairwise_kernels(X, Y, metric=self.metric, gamma=self.gamma, filter_params=True, **pairwise_kernels_kwargs) if eval_gradient: if self.hyperparameter_gamma.fixed: return K, np.empty((X.shape[0], X.shape[0], 0)) else: # approximate gradient numerically def f(gamma): # helper function return pairwise_kernels( X, Y, metric=self.metric, gamma=np.exp(gamma), filter_params=True, **pairwise_kernels_kwargs) return K, _approx_fprime(self.theta, f, 1e-10) else: return K def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array, shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : array, shape (n_samples_X,) Diagonal of kernel k(X, X) """ # We have to fall back to slow way of computing diagonal return np.apply_along_axis(self, 1, X).ravel() def is_stationary(self): """Returns whether the kernel is stationary. """ return self.metric in ["rbf"] def __repr__(self): return "{0}(gamma={1}, metric={2})".format( self.__class__.__name__, self.gamma, self.metric)
bsd-3-clause
ngoix/OCRF
sklearn/ensemble/EvOutSe.py
1
3267
"""Aggarwal and Yu evolutionary's algorithm.""" import os import inspect import numpy as np from math import log, floor, ceil from subprocess import call class EvOutSe(object): """Aggarwal and Yu evolutionary's algorithm.""" def __init__(self, m=None, phi=None, k=None, s=None): """Construtor. parameters: * m: Population size for evolutionary algorithm. * phi: Number of equi-depth grid ranges to use in each dimension. * k: Subspace dimensionality to search for. example: import numpy as np from sklearn.ensemble import EvOutSe data = np.genfromtxt("ionosphere.data.txt", delimiter=',') X_train = data[:200, :-1] X_test = data[200:, :-1] y_train = data[:200, -1] y_test = data[200:, -1] estimator = EvOutSe() pred = estimator.fit_predict(X_train, y_train, X_test, y_test) print roc_auc_score(y_test, pred) Ref: http://charuaggarwal.net/outl.pdf. """ self.m = m self.phi = phi self.k = k self.s = s def _p(self, e): if e == 1: return 'a' else: return 'n' def fit_predict(self, X_train, y_train, X_test, y_test): """Fit and predict. Fit f_* with (X_train, X_test). return: pred = f_*(X_test) remark: y_train and y_test should be useless but Elki requires it... """ # Find good default parameters N = X_train.shape[0] + X_test.shape[0] if self.m is None: self.m = int(1.3 * N) if self.phi is None: self.phi = int(ceil(log(N, 10))) if self.s is None: self.s = -3 if self.k is None: self.k = int(floor(log(N / (self.s * self.s) + 1, self.phi))) # Parse data to Elki's format X = np.vstack((X_train, X_test)) pytr = np.array([self._p(e) for e in y_train]) pyte = np.array([self._p(e) for e in y_test]) y = np.vstack((np.reshape(pytr, (pytr.size, 1)), np.reshape(pyte, (pyte.size, 1)))) data = np.hstack((X, y)) install_dir = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) # script directory # Now we can call Elki np.savetxt(".data.csv", data, delimiter=',', fmt="%s") call(["java", "-jar", install_dir + "/elki-0.7.2-SNAPSHOT.jar", "KDDCLIApplication", "-algorithm", "outlier.subspace.AggarwalYuEvolutionary", "-dbc.in", ".data.csv", "-ay.seed", str(0), "-ay.m", str(self.m), "-ay.k", str(self.k), "-ay.phi", str(self.phi), "-out", ".res", # "-time" ]) call(["rm", ".data.csv"]) # Process the results f = open(".res/aggarwal-yu-outlier_order.txt") res = np.zeros(N) for line in f: spl = line.split(' ') i = int(spl[0].split('=')[1]) - 1 res[i] = -float(spl[-1].split('=')[1]) f.close() call(["rm", "-r", ".res"]) return res[X_train.shape[0]:]
bsd-3-clause
alfcrisci/word_cloud
doc/sphinxext/gen_rst.py
17
33207
""" Example generation for the python wordcloud project. Stolen from scikit-learn with modifications from PyStruct. Generate the rst files for the examples by iterating over the python example files. Hacked to plot every example (not only those that start with 'plot'). """ from time import time import os import shutil import traceback import glob import sys from StringIO import StringIO import cPickle import re import urllib2 import gzip import posixpath import codecs try: from PIL import Image except: import Image import matplotlib matplotlib.use('Agg') import token import tokenize import numpy as np ############################################################################### # A tee object to redict streams to multiple outputs class Tee(object): def __init__(self, file1, file2): self.file1 = file1 self.file2 = file2 def write(self, data): self.file1.write(data) self.file2.write(data) def flush(self): self.file1.flush() self.file2.flush() ############################################################################### # Documentation link resolver objects def get_data(url): """Helper function to get data over http or from a local file""" if url.startswith('http://'): resp = urllib2.urlopen(url) encoding = resp.headers.dict.get('content-encoding', 'plain') data = resp.read() if encoding == 'plain': pass elif encoding == 'gzip': data = StringIO(data) data = gzip.GzipFile(fileobj=data).read() else: raise RuntimeError('unknown encoding') else: with open(url, 'r') as fid: data = fid.read() fid.close() return data def parse_sphinx_searchindex(searchindex): """Parse a Sphinx search index Parameters ---------- searchindex : str The Sphinx search index (contents of searchindex.js) Returns ------- filenames : list of str The file names parsed from the search index. objects : dict The objects parsed from the search index. """ def _select_block(str_in, start_tag, end_tag): """Select first block delimited by start_tag and end_tag""" start_pos = str_in.find(start_tag) if start_pos < 0: raise ValueError('start_tag not found') depth = 0 for pos in range(start_pos, len(str_in)): if str_in[pos] == start_tag: depth += 1 elif str_in[pos] == end_tag: depth -= 1 if depth == 0: break sel = str_in[start_pos + 1:pos] return sel def _parse_dict_recursive(dict_str): """Parse a dictionary from the search index""" dict_out = dict() pos_last = 0 pos = dict_str.find(':') while pos >= 0: key = dict_str[pos_last:pos] if dict_str[pos + 1] == '[': # value is a list pos_tmp = dict_str.find(']', pos + 1) if pos_tmp < 0: raise RuntimeError('error when parsing dict') value = dict_str[pos + 2: pos_tmp].split(',') # try to convert elements to int for i in range(len(value)): try: value[i] = int(value[i]) except ValueError: pass elif dict_str[pos + 1] == '{': # value is another dictionary subdict_str = _select_block(dict_str[pos:], '{', '}') value = _parse_dict_recursive(subdict_str) pos_tmp = pos + len(subdict_str) else: raise ValueError('error when parsing dict: unknown elem') key = key.strip('"') if len(key) > 0: dict_out[key] = value pos_last = dict_str.find(',', pos_tmp) if pos_last < 0: break pos_last += 1 pos = dict_str.find(':', pos_last) return dict_out # parse objects query = 'objects:' pos = searchindex.find(query) if pos < 0: raise ValueError('"objects:" not found in search index') sel = _select_block(searchindex[pos:], '{', '}') objects = _parse_dict_recursive(sel) # parse filenames query = 'filenames:' pos = searchindex.find(query) if pos < 0: raise ValueError('"filenames:" not found in search index') filenames = searchindex[pos + len(query) + 1:] filenames = filenames[:filenames.find(']')] filenames = [f.strip('"') for f in filenames.split(',')] return filenames, objects class SphinxDocLinkResolver(object): """ Resolve documentation links using searchindex.js generated by Sphinx Parameters ---------- doc_url : str The base URL of the project website. searchindex : str Filename of searchindex, relative to doc_url. extra_modules_test : list of str List of extra module names to test. relative : bool Return relative links (only useful for links to documentation of this package). """ def __init__(self, doc_url, searchindex='searchindex.js', extra_modules_test=None, relative=False): self.doc_url = doc_url self.relative = relative self._link_cache = {} self.extra_modules_test = extra_modules_test self._page_cache = {} if doc_url.startswith('http://'): if relative: raise ValueError('Relative links are only supported for local ' 'URLs (doc_url cannot start with "http://)"') searchindex_url = doc_url + '/' + searchindex else: searchindex_url = os.path.join(doc_url, searchindex) # detect if we are using relative links on a Windows system if os.name.lower() == 'nt' and not doc_url.startswith('http://'): if not relative: raise ValueError('You have to use relative=True for the local' 'package on a Windows system.') self._is_windows = True else: self._is_windows = False # download and initialize the search index sindex = get_data(searchindex_url) filenames, objects = parse_sphinx_searchindex(sindex) self._searchindex = dict(filenames=filenames, objects=objects) def _get_link(self, cobj): """Get a valid link, False if not found""" fname_idx = None full_name = cobj['module_short'] + '.' + cobj['name'] if full_name in self._searchindex['objects']: value = self._searchindex['objects'][full_name] if isinstance(value, dict): value = value[value.keys()[0]] fname_idx = value[0] elif cobj['module_short'] in self._searchindex['objects']: value = self._searchindex['objects'][cobj['module_short']] if cobj['name'] in value.keys(): fname_idx = value[cobj['name']][0] if fname_idx is not None: fname = self._searchindex['filenames'][fname_idx] + '.html' if self._is_windows: fname = fname.replace('/', '\\') link = os.path.join(self.doc_url, fname) else: link = posixpath.join(self.doc_url, fname) if link in self._page_cache: html = self._page_cache[link] else: html = get_data(link) self._page_cache[link] = html # test if cobj appears in page comb_names = [cobj['module_short'] + '.' + cobj['name']] if self.extra_modules_test is not None: for mod in self.extra_modules_test: comb_names.append(mod + '.' + cobj['name']) url = False for comb_name in comb_names: if html.find(comb_name) >= 0: url = link + '#' + comb_name link = url else: link = False return link def resolve(self, cobj, this_url): """Resolve the link to the documentation, returns None if not found Parameters ---------- cobj : dict Dict with information about the "code object" for which we are resolving a link. cobi['name'] : function or class name (str) cobj['module_short'] : shortened module name (str) cobj['module'] : module name (str) this_url: str URL of the current page. Needed to construct relative URLs (only used if relative=True in constructor). Returns ------- link : str | None The link (URL) to the documentation. """ full_name = cobj['module_short'] + '.' + cobj['name'] link = self._link_cache.get(full_name, None) if link is None: # we don't have it cached link = self._get_link(cobj) # cache it for the future self._link_cache[full_name] = link if link is False or link is None: # failed to resolve return None if self.relative: link = os.path.relpath(link, start=this_url) if self._is_windows: # replace '\' with '/' so it on the web link = link.replace('\\', '/') # for some reason, the relative link goes one directory too high up link = link[3:] return link ############################################################################### rst_template = """ .. _%(short_fname)s: %(docstring)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- """ plot_rst_template = """ .. _%(short_fname)s: %(docstring)s %(image_list)s %(stdout)s **Python source code:** :download:`%(fname)s <%(fname)s>` .. literalinclude:: %(fname)s :lines: %(end_row)s- **Total running time of the example:** %(time_elapsed) .2f seconds """ # The following strings are used when we have several pictures: we use # an html div tag that our CSS uses to turn the lists into horizontal # lists. HLIST_HEADER = """ .. rst-class:: horizontal """ HLIST_IMAGE_TEMPLATE = """ * .. image:: images/%s :scale: 47 """ SINGLE_IMAGE = """ .. image:: images/%s :align: center """ def extract_docstring(filename): """ Extract a module-level docstring, if any """ lines = file(filename).readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 docstring = '' first_par = '' tokens = tokenize.generate_tokens(iter(lines).next) for tok_type, tok_content, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif tok_type == 'STRING': docstring = eval(tok_content) # If the docstring is formatted with several paragraphs, extract # the first one: paragraphs = '\n'.join(line.rstrip() for line in docstring.split('\n')).split('\n\n') if len(paragraphs) > 0: first_par = paragraphs[0] break return docstring, first_par, erow + 1 + start_row def generate_example_rst(app): """ Generate the list of examples, as well as the contents of examples. """ root_dir = os.path.join(app.builder.srcdir, 'auto_examples') example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples') try: plot_gallery = eval(app.builder.config.plot_gallery) except TypeError: plot_gallery = bool(app.builder.config.plot_gallery) if not os.path.exists(example_dir): os.makedirs(example_dir) if not os.path.exists(root_dir): os.makedirs(root_dir) # we create an index.rst with all examples fhindex = file(os.path.join(root_dir, 'index.rst'), 'w') #Note: The sidebar button has been removed from the examples page for now # due to how it messes up the layout. Will be fixed at a later point fhindex.write("""\ .. raw:: html <style type="text/css"> div#sidebarbutton { display: none; } .figure { float: left; margin: 16px; top: 0; left: 0; -webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */ -moz-border-radius: 10px; /* FF1-3.6 */ border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */ border: 2px solid #fff; -webkit-transition: all 0.15s ease-out; /* Saf3.2+, Chrome */ -moz-transition: all 0.15s ease-out; /* FF4+ */ -ms-transition: all 0.15s ease-out; /* IE10? */ -o-transition: all 0.15s ease-out; /* Opera 10.5+ */ transition: all 0.15s ease-out; background-repeat: no-repeat; /* --> Thumbnail image size */ width: 150px; height: 130px; } .figure img { display: inline; } .figure .caption { text-align: center !important; } </style> Examples ======== .. _examples-index: """) # Here we don't use an os.walk, but we recurse only twice: flat is # better than nested. generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery) for dir in sorted(os.listdir(example_dir)): if os.path.isdir(os.path.join(example_dir, dir)): generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery) fhindex.flush() def extract_line_count(filename, target_dir): # Extract the line count of a file example_file = os.path.join(target_dir, filename) lines = file(example_file).readlines() start_row = 0 if lines[0].startswith('#!'): lines.pop(0) start_row = 1 tokens = tokenize.generate_tokens(lines.__iter__().next) check_docstring = True erow_docstring = 0 for tok_type, _, _, (erow, _), _ in tokens: tok_type = token.tok_name[tok_type] if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'): continue elif ((tok_type == 'STRING') and (check_docstring == True)): erow_docstring = erow check_docstring = False return erow_docstring+1+start_row, erow+1+start_row def line_count_sort(file_list, target_dir): # Sort the list of examples by line-count new_list = filter(lambda x: x.endswith('.py'), file_list) unsorted = np.zeros(shape=(len(new_list), 2)) unsorted = unsorted.astype(np.object) for count, exmpl in enumerate(new_list): docstr_lines, total_lines = extract_line_count(exmpl, target_dir) unsorted[count][1] = total_lines - docstr_lines unsorted[count][0] = exmpl index = np.lexsort((unsorted[:,0].astype(np.str), unsorted[:,1].astype(np.float))) return np.array(unsorted[index][:,0]).tolist() def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery): """ Generate the rst file for an example directory. """ if not dir == '.': target_dir = os.path.join(root_dir, dir) src_dir = os.path.join(example_dir, dir) else: target_dir = root_dir src_dir = example_dir if not os.path.exists(os.path.join(src_dir, 'README.txt')): print 80 * '_' print ('Example directory %s does not have a README.txt file' % src_dir) print 'Skipping this directory' print 80 * '_' return fhindex.write(""" %s """ % file(os.path.join(src_dir, 'README.txt')).read()) if not os.path.exists(target_dir): os.makedirs(target_dir) sorted_listdir = line_count_sort(os.listdir(src_dir), src_dir) for fname in sorted_listdir: if fname.endswith('py'): generate_file_rst(fname, target_dir, src_dir, plot_gallery) thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png') link_name = os.path.join(dir, fname).replace(os.path.sep, '_') fhindex.write(""" .. raw:: html <div class="thumbnailContainer"> """) fhindex.write('.. figure:: %s\n' % thumb) if link_name.startswith('._'): link_name = link_name[2:] if dir != '.': fhindex.write(' :target: ./%s/%s.html\n\n' % (dir, fname[:-3])) else: fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3]) fhindex.write(""" :ref:`%s` .. raw:: html </div> .. toctree:: :hidden: %s/%s """ % (link_name, dir, fname[:-3])) fhindex.write(""" .. raw:: html <div style="clear: both"></div> """) # clear at the end of the section # modules for which we embed links into example code DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy', 'wordcloud'] def make_thumbnail(in_fname, out_fname, width, height): """Make a thumbnail with the same aspect ratio centered in an image with a given width and height """ img = Image.open(in_fname) width_in, height_in = img.size scale_w = width / float(width_in) scale_h = height / float(height_in) if height_in * scale_w <= height: scale = scale_w else: scale = scale_h width_sc = int(round(scale * width_in)) height_sc = int(round(scale * height_in)) # resize the image img.thumbnail((width_sc, height_sc), Image.ANTIALIAS) # insert centered thumb = Image.new('RGB', (width, height), (255, 255, 255)) pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2) thumb.paste(img, pos_insert) thumb.save(out_fname) def get_short_module_name(module_name, obj_name): """ Get the shortest possible module name """ parts = module_name.split('.') short_name = module_name for i in range(len(parts) - 1, 0, -1): short_name = '.'.join(parts[:i]) try: exec('from %s import %s' % (short_name, obj_name)) except ImportError: # get the last working module name short_name = '.'.join(parts[:(i + 1)]) break return short_name def generate_file_rst(fname, target_dir, src_dir, plot_gallery): """ Generate the rst file for a given example. """ base_image_name = os.path.splitext(fname)[0] image_fname = '%s_%%s.png' % base_image_name this_template = rst_template last_dir = os.path.split(src_dir)[-1] # to avoid leading . in file names, and wrong names in links if last_dir == '.' or last_dir == 'examples': last_dir = '' else: last_dir += '_' short_fname = last_dir + fname src_file = os.path.join(src_dir, fname) example_file = os.path.join(target_dir, fname) shutil.copyfile(src_file, example_file) # The following is a list containing all the figure names figure_list = [] image_dir = os.path.join(target_dir, 'images') thumb_dir = os.path.join(image_dir, 'thumb') if not os.path.exists(image_dir): os.makedirs(image_dir) if not os.path.exists(thumb_dir): os.makedirs(thumb_dir) image_path = os.path.join(image_dir, image_fname) stdout_path = os.path.join(image_dir, 'stdout_%s.txt' % base_image_name) time_path = os.path.join(image_dir, 'time_%s.txt' % base_image_name) thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png') time_elapsed = 0 if plot_gallery: # generate the plot as png image if it is more recent than an # existing image. first_image_file = image_path % 1 if os.path.exists(stdout_path): stdout = open(stdout_path).read() else: stdout = '' if os.path.exists(time_path): time_elapsed = float(open(time_path).read()) if (not os.path.exists(first_image_file) or os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime): # We need to execute the code print 'plotting %s' % fname t0 = time() import matplotlib.pyplot as plt plt.close('all') cwd = os.getcwd() try: # First CD in the original example dir, so that any file # created by the example get created in this directory orig_stdout = sys.stdout os.chdir(os.path.dirname(src_file)) my_buffer = StringIO() my_stdout = Tee(sys.stdout, my_buffer) sys.stdout = my_stdout my_globals = {'pl': plt, '__file__': src_file} execfile(os.path.basename(src_file), my_globals) time_elapsed = time() - t0 sys.stdout = orig_stdout my_stdout = my_buffer.getvalue() # get variables so we can later add links to the documentation example_code_obj = {} for var_name, var in my_globals.iteritems(): if not hasattr(var, '__module__'): continue if not isinstance(var.__module__, basestring): continue if var.__module__.split('.')[0] not in DOCMODULES: continue # get the type as a string with other things stripped tstr = str(type(var)) tstr = (tstr[tstr.find('\'') + 1:tstr.rfind('\'')].split('.')[-1]) # get shortened module name module_short = get_short_module_name(var.__module__, tstr) cobj = {'name': tstr, 'module': var.__module__, 'module_short': module_short, 'obj_type': 'object'} example_code_obj[var_name] = cobj # find functions so we can later add links to the documentation funregex = re.compile('[\w.]+\(') with open(src_file, 'rt') as fid: for line in fid.readlines(): if line.startswith('#'): continue for match in funregex.findall(line): fun_name = match[:-1] try: exec('this_fun = %s' % fun_name, my_globals) except Exception: #print 'extracting function failed' #print err continue this_fun = my_globals['this_fun'] if not callable(this_fun): continue if not hasattr(this_fun, '__module__'): continue if not isinstance(this_fun.__module__, basestring): continue if (this_fun.__module__.split('.')[0] not in DOCMODULES): continue # get shortened module name fun_name_short = fun_name.split('.')[-1] module_short = get_short_module_name( this_fun.__module__, fun_name_short) cobj = {'name': fun_name_short, 'module': this_fun.__module__, 'module_short': module_short, 'obj_type': 'function'} example_code_obj[fun_name] = cobj fid.close() if len(example_code_obj) > 0: # save the dictionary, so we can later add hyperlinks codeobj_fname = example_file[:-3] + '_codeobj.pickle' with open(codeobj_fname, 'wb') as fid: cPickle.dump(example_code_obj, fid, cPickle.HIGHEST_PROTOCOL) fid.close() if '__doc__' in my_globals: # The __doc__ is often printed in the example, we # don't with to echo it my_stdout = my_stdout.replace( my_globals['__doc__'], '') my_stdout = my_stdout.strip() if my_stdout: stdout = '**Script output**::\n\n %s\n\n' % ( '\n '.join(my_stdout.split('\n'))) open(stdout_path, 'w').write(stdout) open(time_path, 'w').write('%f' % time_elapsed) os.chdir(cwd) # In order to save every figure we have two solutions : # * iterate from 1 to infinity and call plt.fignum_exists(n) # (this requires the figures to be numbered # incrementally: 1, 2, 3 and not 1, 2, 5) # * iterate over [fig_mngr.num for fig_mngr in # matplotlib._pylab_helpers.Gcf.get_all_fig_managers()] for fig_num in (fig_mngr.num for fig_mngr in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()): # Set the fig_num figure as the current figure as we can't # save a figure that's not the current figure. plt.figure(fig_num) plt.savefig(image_path % fig_num) figure_list.append(image_fname % fig_num) except: print 80 * '_' print '%s is not compiling:' % fname traceback.print_exc() print 80 * '_' finally: os.chdir(cwd) sys.stdout = orig_stdout print " - time elapsed : %.2g sec" % time_elapsed else: figure_list = [f[len(image_dir):] for f in glob.glob(image_path % '[1-9]')] #for f in glob.glob(image_path % '*')] # generate thumb file this_template = plot_rst_template if os.path.exists(first_image_file): make_thumbnail(first_image_file, thumb_file, 200, 140) if not os.path.exists(thumb_file): # create something to replace the thumbnail make_thumbnail('images/no_image.png', thumb_file, 200, 140) docstring, short_desc, end_row = extract_docstring(example_file) # Depending on whether we have one or more figures, we're using a # horizontal list or a single rst call to 'image'. if len(figure_list) == 1: figure_name = figure_list[0] image_list = SINGLE_IMAGE % figure_name.lstrip('/') else: image_list = HLIST_HEADER for figure_name in figure_list: image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/') f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w') f.write(this_template % locals()) f.flush() def embed_code_links(app, exception): """Embed hyperlinks to documentation into example code""" try: if exception is not None: return print 'Embedding documentation hyperlinks in examples..' # Add resolvers for the packages for which we want to show links doc_resolvers = {} doc_resolvers['wordcloud'] = SphinxDocLinkResolver(app.builder.outdir, relative=True) doc_resolvers['sklearn'] = SphinxDocLinkResolver( 'http://scikit-learn.org/stable') doc_resolvers['matplotlib'] = SphinxDocLinkResolver( 'http://matplotlib.org') doc_resolvers['numpy'] = SphinxDocLinkResolver( 'http://docs.scipy.org/doc/numpy-1.6.0') doc_resolvers['scipy'] = SphinxDocLinkResolver( 'http://docs.scipy.org/doc/scipy-0.11.0/reference') example_dir = os.path.join(app.builder.srcdir, 'auto_examples') html_example_dir = os.path.abspath(os.path.join(app.builder.outdir, 'auto_examples')) # patterns for replacement link_pattern = '<a href="%s">%s</a>' orig_pattern = '<span class="n">%s</span>' period = '<span class="o">.</span>' for dirpath, _, filenames in os.walk(html_example_dir): for fname in filenames: print '\tprocessing: %s' % fname full_fname = os.path.join(html_example_dir, dirpath, fname) subpath = dirpath[len(html_example_dir) + 1:] pickle_fname = os.path.join(example_dir, subpath, fname[:-5] + '_codeobj.pickle') if os.path.exists(pickle_fname): # we have a pickle file with the objects to embed links for with open(pickle_fname, 'rb') as fid: example_code_obj = cPickle.load(fid) fid.close() str_repl = {} # generate replacement strings with the links for name, cobj in example_code_obj.iteritems(): this_module = cobj['module'].split('.')[0] if this_module not in doc_resolvers: continue link = doc_resolvers[this_module].resolve(cobj, full_fname) if link is not None: parts = name.split('.') name_html = orig_pattern % parts[0] for part in parts[1:]: name_html += period + orig_pattern % part str_repl[name_html] = link_pattern % (link, name_html) # do the replacement in the html file if len(str_repl) > 0: with codecs.open(full_fname, 'rt', encoding='utf-8') as fid: lines_in = fid.readlines() fid.close() with open(full_fname, 'wt') as fid: for line in lines_in: for name, link in str_repl.iteritems(): try: line = line.encode("ascii", 'ignore').replace(name, link) except Exception as e: print(line) print(name) print(link) raise e fid.write(line) fid.close() except urllib2.HTTPError, e: print ("The following HTTP Error has occurred:\n") print e.code except urllib2.URLError, e: print ("\n...\n" "Warning: Embedding the documentation hyperlinks requires " "internet access.\nPlease check your network connection.\n" "Unable to continue embedding due to a URL Error: \n") print e.args print '[done]' def setup(app): app.connect('builder-inited', generate_example_rst) app.add_config_value('plot_gallery', True, 'html') # embed links after build is finished app.connect('build-finished', embed_code_links) # Sphinx hack: sphinx copies generated images to the build directory # each time the docs are made. If the desired image name already # exists, it appends a digit to prevent overwrites. The model is, # the directory is never cleared. This means that each time you build # the docs, the number of images in the directory grows. # # This question has been asked on the sphinx development list, but there # was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html # # The following is a hack that prevents this behavior by clearing the # image build directory each time the docs are built. If sphinx # changes their layout between versions, this will not work (though # it should probably not cause a crash). Tested successfully # on Sphinx 1.0.7 build_image_dir = '_build/html/_images' if os.path.exists(build_image_dir): filelist = os.listdir(build_image_dir) for filename in filelist: if filename.endswith('png'): os.remove(os.path.join(build_image_dir, filename))
mit
shoyer/xray
setup.py
1
4181
#!/usr/bin/env python import sys import versioneer from setuptools import find_packages, setup DISTNAME = 'xarray' LICENSE = 'Apache' AUTHOR = 'xarray Developers' AUTHOR_EMAIL = 'xarray@googlegroups.com' URL = 'https://github.com/pydata/xarray' CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering', ] PYTHON_REQUIRES = '>=3.5' INSTALL_REQUIRES = ['numpy >= 1.12', 'pandas >= 0.19.2'] needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv) SETUP_REQUIRES = ['pytest-runner >= 4.2'] if needs_pytest else [] TESTS_REQUIRE = ['pytest >= 2.7.1'] if sys.version_info[0] < 3: TESTS_REQUIRE.append('mock') DESCRIPTION = "N-D labeled arrays and datasets in Python" LONG_DESCRIPTION = """ **xarray** (formerly **xray**) is an open source project and Python package that makes working with labelled multi-dimensional arrays simple, efficient, and fun! Xarray introduces labels in the form of dimensions, coordinates and attributes on top of raw NumPy_-like arrays, which allows for a more intuitive, more concise, and less error-prone developer experience. The package includes a large and growing library of domain-agnostic functions for advanced analytics and visualization with these data structures. Xarray was inspired by and borrows heavily from pandas_, the popular data analysis package focused on labelled tabular data. It is particularly tailored to working with netCDF_ files, which were the source of xarray's data model, and integrates tightly with dask_ for parallel computing. .. _NumPy: https://www.numpy.org .. _pandas: https://pandas.pydata.org .. _dask: https://dask.org .. _netCDF: https://www.unidata.ucar.edu/software/netcdf Why xarray? ----------- Multi-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called "tensors") are an essential part of computational science. They are encountered in a wide range of fields, including physics, astronomy, geoscience, bioinformatics, engineering, finance, and deep learning. In Python, NumPy_ provides the fundamental data structure and API for working with raw ND arrays. However, real-world datasets are usually more than just raw numbers; they have labels which encode information about how the array values map to locations in space, time, etc. Xarray doesn't just keep track of labels on arrays -- it uses them to provide a powerful and concise interface. For example: - Apply operations over dimensions by name: ``x.sum('time')``. - Select values by label instead of integer location: ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``. - Mathematical operations (e.g., ``x - y``) vectorize across multiple dimensions (array broadcasting) based on dimension names, not shape. - Flexible split-apply-combine operations with groupby: ``x.groupby('time.dayofyear').mean()``. - Database like alignment based on coordinate labels that smoothly handles missing values: ``x, y = xr.align(x, y, join='outer')``. - Keep track of arbitrary metadata in the form of a Python dictionary: ``x.attrs``. Learn more ---------- - Documentation: http://xarray.pydata.org - Issue tracker: http://github.com/pydata/xarray/issues - Source code: http://github.com/pydata/xarray - SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk """ # noqa setup(name=DISTNAME, version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), license=LICENSE, author=AUTHOR, author_email=AUTHOR_EMAIL, classifiers=CLASSIFIERS, description=DESCRIPTION, long_description=LONG_DESCRIPTION, python_requires=PYTHON_REQUIRES, install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, tests_require=TESTS_REQUIRE, url=URL, packages=find_packages(), package_data={'xarray': ['tests/data/*']})
apache-2.0
google-research/google-research
moew/mnist.py
1
8720
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Experiments with mnist data. This experiment uses MNIST handwritten digit database, and train on it with classifiers of varying complexity. The error metric was taken to be the maximum of the error rates for each digit. See the paper for a detailed explanation of the experiment. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags import numpy as np from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import ConstantKernel from sklearn.gaussian_process.kernels import RBF import tensorflow.compat.v1 as tf from tensorflow.examples.tutorials.mnist import input_data # pylint: disable=g-direct-tensorflow-import FLAGS = flags.FLAGS flags.DEFINE_bool('uniform_weights', False, 'whether to use uniform weights') flags.DEFINE_bool('random_weights', False, 'whether to use random weights') flags.DEFINE_bool('random_alpha', False, 'whether to use random alphas') flags.DEFINE_float('sampling_radius', 1.0, 'radius in the sampling ball') flags.DEFINE_integer('num_parallel_alphas', 20, 'number of parallel alphas') flags.DEFINE_integer('num_alpha_batches', 10, 'number of alpha batches') flags.DEFINE_integer('classifier_hidden_nodes', 50, 'classifier hidden nodes') LEARNING_RATE = 0.001 TRAINING_STEPS = 10000 BATCH_SIZE = 100 INPUT_DIM = 784 OUTPUT_DIM = 10 TRAIN_INPUT_SIZE = 60000 def metric(y, logits, all_digits=False): result = np.sum( y * (np.argmax(y, axis=1) != np.argmax(logits, axis=1))[:, None], axis=0) / np.sum( y, axis=0) if all_digits: return result else: return np.max(result) def classifier(x): layer1 = tf.layers.dense( inputs=x, units=FLAGS.classifier_hidden_nodes, activation=tf.sigmoid) logits = tf.layers.dense(inputs=layer1, units=OUTPUT_DIM) return logits def optimization(logits, y, random_weights, alpha, learning_rate): if FLAGS.random_weights: weights = random_weights else: weights = tf.sigmoid(tf.matmul(y, alpha)) weights /= tf.reduce_mean(weights) loss = tf.losses.sigmoid_cross_entropy( multi_class_labels=y, logits=logits, weights=weights) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) return optimizer, loss def sample_from_ball(size=(1, 1), sampling_radius=1): count, dim = size points = np.random.normal(size=size) points /= np.linalg.norm(points, axis=1)[:, np.newaxis] scales = sampling_radius * np.power( np.random.uniform(size=(count, 1)), 1 / dim) points *= scales return points def main(_): mnist = input_data.read_data_sets('/tmp/data/', one_hot=True, seed=12345) random_weight_vector = np.random.uniform( low=0.1, high=1.9, size=TRAIN_INPUT_SIZE) x = tf.placeholder(tf.float32, shape=(None, INPUT_DIM), name='x') y = tf.placeholder(tf.float32, shape=(None, OUTPUT_DIM), name='y') weight = tf.placeholder(tf.float32, shape=(None, OUTPUT_DIM), name='weight') parallel_alphas = tf.placeholder( tf.float32, shape=(FLAGS.num_parallel_alphas, OUTPUT_DIM), name='parallel_alphas') unstack_parallel_alphas = tf.unstack(parallel_alphas, axis=0) parallel_logits = [] parallel_losses = [] parallel_optimizers = [] validation_metrics = [] test_metrics = [] all_test_metrics = [] with tf.variable_scope('classifier'): for alpha_index in range(FLAGS.num_parallel_alphas): logits = classifier(x) alpha = tf.reshape( unstack_parallel_alphas[alpha_index], shape=[OUTPUT_DIM, 1]) optimizer, loss = optimization(logits, y, weight, alpha, LEARNING_RATE) parallel_logits.append(logits) parallel_losses.append(loss) parallel_optimizers.append(optimizer) init = tf.global_variables_initializer() classifiers_init = tf.variables_initializer( tf.global_variables(scope='classifier')) with tf.Session() as sess: sess.run(init) # GetCandidatesAlpha (Algorithm 2 in paper) sample_alphas = np.zeros(shape=(0, OUTPUT_DIM)) for alpha_batch_index in range(FLAGS.num_alpha_batches): sess.run(classifiers_init) if FLAGS.uniform_weights: alpha_batch = np.zeros(shape=(FLAGS.num_parallel_alphas, OUTPUT_DIM)) elif FLAGS.random_alpha or alpha_batch_index < 1: alpha_batch = sample_from_ball( size=(FLAGS.num_parallel_alphas, OUTPUT_DIM), sampling_radius=FLAGS.sampling_radius) sample_alphas = np.concatenate([sample_alphas, alpha_batch]) else: # Use LCB to generate candidates. alpha_batch = np.zeros(shape=(0, OUTPUT_DIM)) sample_metrics = validation_metrics[:] for alpha_index in range(FLAGS.num_parallel_alphas): kernel = RBF( length_scale=FLAGS.sampling_radius, length_scale_bounds=(FLAGS.sampling_radius * 1e-3, FLAGS.sampling_radius * 1e3)) * ConstantKernel(1.0, (1e-3, 1e3)) gp = GaussianProcessRegressor( kernel=kernel, alpha=1e-4).fit(sample_alphas, np.log1p(sample_metrics)) candidates = sample_from_ball((10000, OUTPUT_DIM), FLAGS.sampling_radius) metric_mles, metric_stds = gp.predict(candidates, return_std=True) metric_lcbs = np.maximum( np.expm1(metric_mles - 1.0 * metric_stds), 0.0) metric_lcbs += np.random.random( size=metric_lcbs.shape) * 0.001 # break ties best_index = np.argmin(metric_lcbs) best_alpha = [candidates[best_index]] best_alpha_metric_estimate = np.minimum( np.expm1(metric_mles[best_index] + 1.0 * metric_stds[best_index]), 1.0) alpha_batch = np.concatenate([alpha_batch, best_alpha]) sample_alphas = np.concatenate([sample_alphas, best_alpha]) sample_metrics.append(best_alpha_metric_estimate) # Training classifiers for step in range(TRAINING_STEPS): batch_index = range(step * BATCH_SIZE % TRAIN_INPUT_SIZE, step * BATCH_SIZE % TRAIN_INPUT_SIZE + BATCH_SIZE) (batch_x, batch_y) = mnist.train.next_batch(BATCH_SIZE, shuffle=False) batch_weight = [ [random_weight_vector[i]] * OUTPUT_DIM for i in batch_index ] _, _ = sess.run( [parallel_optimizers, parallel_losses], feed_dict={ x: batch_x, y: batch_y, weight: batch_weight, parallel_alphas: alpha_batch, }) parallel_validation_logits = sess.run( parallel_logits, feed_dict={ x: mnist.validation.images, y: mnist.validation.labels, }) parallel_validation_metrics = [ metric(mnist.validation.labels, validation_logits, all_digits=False) for validation_logits in parallel_validation_logits ] validation_metrics.extend(parallel_validation_metrics) parallel_test_logits = sess.run( parallel_logits, feed_dict={ x: mnist.test.images, y: mnist.test.labels, }) parallel_test_metrics = [ metric(mnist.test.labels, test_logits, all_digits=False) for test_logits in parallel_test_logits ] test_metrics.extend(parallel_test_metrics) parallel_all_test_metrics = [ metric(mnist.test.labels, test_logits, all_digits=True) for test_logits in parallel_test_logits ] all_test_metrics.extend(parallel_all_test_metrics) best_observed_index = np.argmin(validation_metrics) print('[metric] validation={}'.format( validation_metrics[best_observed_index])) print('[metric] test={}'.format(test_metrics[best_observed_index])) for i in range(10): print('[all test metrics] {}={}'.format( i, all_test_metrics[best_observed_index][i])) if __name__ == '__main__': app.run(main)
apache-2.0
McIntyre-Lab/papers
newman_events_2017/python_workflow/programs/create_event_summaries.py
1
10533
#!/usr/bin/env python3 ####################################################################################################################### # # DATE: 2018-04-16 # NAME: create_event_summaries.py # AUTHOR: Jeremy R. B. Newman (jrbnewman@ufl.edu) # # DESCRIPTION: This script creates summaries for each event in the given input file. It takes a wide-formatted dataset of counts # by event, annotations, detection flags, and a design file, and outputs a summary file, detailing group means, # group detection, annotation frequency of events, transcripts and genes. # # REQUIRED PACKAGES: pandas (tested with v0.19.2) # argparse (tested with v1.1) # logging (tested with v0.5.1.2) # sqlite3 # ####################################################################################################################### # Import required packages import pandas as pd import logging import sqlite3 import argparse def getOptions(): # Parse command line arguments parser = argparse.ArgumentParser(description="Takes wide-formatted counts matrix and a design file and outputs a " "TSV of group-wise detection flags") # Inputs parser.add_argument("-i", "--input-data", dest="inCounts", required=True, help="Wide-formatted dataset of counts") parser.add_argument("-d", "--design-file", dest="inDesign", required=True, help="Design file to relate samples to groups") parser.add_argument("-a", "--annotation-file", dest="inAnnot", required=True, help="Formatted annotation file for events (make sure the correct annotation file is specified! ") parser.add_argument("-f", "--detection-flags-file", dest="inFlags", required=True, help="Detection flags for events") parser.add_argument("-j", "--junction-sequence-index", dest="inJuncSeq", required=False, help="Junction-to-sequence index file created by extract_junction_sequence.py. This is REQUIRED " "for junctions only. Exonic regions, fragments and introns do not need this infomation") # User-defined values parser.add_argument("-g", "--group-variable", dest="userGroup", required=True, help="Variable in design file used to group samples by treatment, condition, etc.") parser.add_argument("-l", "--event-length", dest="userLength", required=False, type=int, help="Minimum length (in bp) of event to be considered for analysis. Any event (exon fragment," "jnction, etc.) that is less than this value will be excluded from analysis. If not specified then " "events are not filtered by their length.") # Outputs parser.add_argument("-o", "--output-summary", dest="outFlags", required=True, help="Output TSV of event detection flags by " "treatment group") args = parser.parse_args() return args def main(): # Connect to SQL database con = sqlite3.connect(":memory:") cur = con.cursor() # Import counts, design file, annotation and detection flags countsWideDF = pd.read_csv(args.inCounts, sep="\t") designDF = pd.read_csv(args.inDesign, sep="\t") annotDF = pd.read_csv(args.inAnnot, sep=",") flagsDF = pd.read_csv(args.inFlags, sep="\t") # For junctions, check that the user also suppose if 'junction_id' in annotDF.columns: if args.inJuncSeq : # check that a junction-to-sequence index was provided try: juncSeqDF = pd.read_csv(args.inJuncSeq, sep=",", usecols=['junction_id','sequence_id']) juncSeqDF.to_sql("junc2seqIndex", con, if_exists="replace") except IOError: print("Junction-to-sequence index file not specified or does not exist! Please specify this with the --junction-sequence-index option") except ValueError: print("Junction-to-sequence index file does not appear to be valid. Check your index file.") except : print("An unexpected error occurred while importing the junction-to-sequence index") raise else: print("Junction-to-sequence index file not specified! Please specify this with the --junction-sequence-index option") raise NameError("No junction-to-index file provided") # Convert to a tall dataset countsTallDF = pd.melt(countsWideDF, id_vars=['event_id'], var_name='sampleID', value_name='APN') # Send tall counts dataset and design file to SQL and merge countsTallDF.to_sql("countsTall", con, if_exists="replace") designDF.to_sql("designInfo", con, if_exists="replace") annotDF.to_sql("annotInfo", con, if_exists="replace") flagsDF.to_sql("flagsInfo", con, if_exists="replace") cur.execute("CREATE TABLE countsKey AS SELECT in1.*, in2.event_id, in2.APN " "FROM designInfo in1 INNER JOIN countsTall in2 " "ON in1.sampleID = in2.sampleID ;") # Calculate group means cur.execute("CREATE TABLE countsMean AS SELECT event_id, " + args.userGroup + ", avg(APN) as mean_apn FROM countsKey GROUP BY event_id, "+ args.userGroup + ";") # Put means side-by-side groupList = designDF[args.userGroup].drop_duplicates(keep='first').tolist() counter = 1 for group in range(0,len(groupList)) : groupName = groupList[group] cur.execute("CREATE TABLE tempMean AS SELECT event_id, mean_apn AS mean_apn_"+groupName+" FROM countsMean " "WHERE " + args.userGroup + " = ? ORDER BY event_id;", (groupName, )) if counter == 1: cur.execute("CREATE TABLE eventsMean AS SELECT * FROM tempMean ;") cur.execute("DROP TABLE tempMean;") counter=counter + 1 else: cur.execute("CREATE TABLE eventsMean2 AS SELECT in1.*, in2.mean_apn_" + groupName + " FROM eventsMean in1 INNER JOIN tempMean in2 " "ON in1.event_id = in2.event_id " "ORDER BY in1.event_id ;") cur.execute("DROP TABLE tempMean;") cur.execute("DROP TABLE eventsMean;") cur.execute("ALTER TABLE eventsMean2 RENAME TO eventsMean ;") # Merge in detection flags cur.execute("CREATE TABLE eventsMeansFlag AS SELECT * " "FROM eventsMean in1 INNER JOIN flagsInfo in2 ON in1.event_id = in2.event_id") # Create a list of columns to extract from annotations. We want (if possible) annotation frequency, # flag_multigene, transcript list, gene list, event_type annotList=['event_id'] if 'annotation_frequency' in annotDF.columns: annotList.append('annotation_frequency') if 'flag_multigene' in annotDF.columns: annotList.append('flag_multigene') if 'transcript_id' in annotDF.columns: annotList.append('transcript_id') if 'gene_id' in annotDF.columns: annotList.append('gene_id') if 'flag_junction_annotated' in annotDF.columns: annotList.append('flag_junction_annotated') if 'flag_border_junction' in annotDF.columns: annotList.append('flag_border_junction') if 'flag_alt_donor' in annotDF.columns: annotList.append('flag_alt_donor') if 'flag_alt_acceptor' in annotDF.columns: annotList.append('flag_alt_acceptor') if 'flag_exonskip' in annotDF.columns: annotList.append('flag_exonskip') annotListStr = ', '.join(annotList) # Merge in annotations if 'fragment_id' in annotDF.columns: cur.execute("CREATE TABLE annotInfo2 AS SELECT *, fragment_id AS event_id, (fragment_stop-fragment_start) AS event_length FROM annotInfo ;" ) elif 'intron_id' in annotDF.columns: cur.execute("CREATE TABLE annotInfo2 AS SELECT *, intron_id AS event_id, (intron_stop-intron_start) AS event_length FROM annotInfo ;") elif 'junction_id' in annotDF.columns: cur.execute("CREATE TABLE annotInfo2 AS SELECT *, junction_id AS event_id, (donor_stop-donor_start + acceptor_stop-acceptor_start) AS event_length FROM annotInfo ;") elif 'fusion_id' in annotDF.columns: cur.execute("CREATE TABLE annotInfo2 AS SELECT *, fusion_id AS event_id, (fusion_stop-fusion_start) AS event_length FROM annotInfo ;" ) else: cur.execute("CREATE TABLE annotInfo2 AS SELECT * FROM annotInfo ;") if args.userLength: cur.execute("CREATE TABLE annotInfo3 AS SELECT " + annotListStr + " FROM annotInfo2 WHERE event_length >= ? ORDER BY event_id ;", (args.userLength, ) ) else : cur.execute("CREATE TABLE annotInfo3 AS SELECT " + annotListStr + " FROM annotInfo2 ORDER BY event_id ;") if 'junction_id' in annotDF.columns: # If junctions, we need to convert these from sequnce IDs to junction IDs cur.execute("CREATE TABLE eventsMeansFlag2 AS SELECT *, event_id AS sequence_id FROM eventsMeansFlag ;") cur.execute("CREATE TABLE eventsMeansFlag3 AS SELECT in1.*, in2.junction_id " "FROM eventsMeansFlag2 in1 INNER JOIN junc2seqIndex in2 " "ON in1.sequence_id = in2.sequence_id") cur.execute("CREATE TABLE eventsMeansFlagAnnot AS SELECT in1.*, in2.* " "FROM annotInfo3 in1 INNER JOIN eventsMeansFlag3 in2 " "ON in1.event_id = in2.junction_id ;") eventSummaryDF = pd.read_sql("SELECT * FROM eventsMeansFlagAnnot;", con) eventSummaryDF = eventSummaryDF.drop(['event_id:1','event_id:2','sequence_id','junction_id','index'], axis=1) else: cur.execute("CREATE TABLE eventsMeansFlagAnnot AS SELECT in1.*, in2.* " "FROM eventsMeansFlag in1 INNER JOIN annotInfo3 in2 " "ON in1.event_id = in2.event_id ;") eventSummaryDF = pd.read_sql("SELECT * FROM eventsMeansFlagAnnot;", con) eventSummaryDF = eventSummaryDF.drop(['event_id:1','event_id:2','index'], axis=1) # Write output flags file with open(args.outFlags, 'w') as outFile: eventSummaryDF.to_csv(outFile, encoding='utf-8', index=False, sep="\t") if __name__ == '__main__': # Parse command line arguments global args args = getOptions() # Setting up logger logger = logging.getLogger() logger.info('Starting script') # Calling main script main() logger.info('Script complete')
lgpl-3.0
ocefpaf/iris
docs/iris/example_code/Meteorology/COP_maps.py
2
6435
""" Global average annual temperature maps ====================================== Produces maps of global temperature forecasts from the A1B and E1 scenarios. The data used comes from the HadGEM2-AO model simulations for the A1B and E1 scenarios, both of which were derived using the IMAGE Integrated Assessment Model (Johns et al. 2011; Lowe et al. 2009). References ---------- Johns T.C., et al. (2011) Climate change under aggressive mitigation: the ENSEMBLES multi-model experiment. Climate Dynamics, Vol 37, No. 9-10, doi:10.1007/s00382-011-1005-5. Lowe J.A., C.D. Hewitt, D.P. Van Vuuren, T.C. Johns, E. Stehfest, J-F. Royer, and P. van der Linden, 2009. New Study For Climate Modeling, Analyses, and Scenarios. Eos Trans. AGU, Vol 90, No. 21, doi:10.1029/2009EO210001. """ import os.path import matplotlib.pyplot as plt import numpy as np import iris import iris.coords as coords import iris.plot as iplt def cop_metadata_callback(cube, field, filename): """ A function which adds an "Experiment" coordinate which comes from the filename. """ # Extract the experiment name (such as a1b or e1) from the filename (in # this case it is just the parent folder's name) containing_folder = os.path.dirname(filename) experiment_label = os.path.basename(containing_folder) # Create a coordinate with the experiment label in it exp_coord = coords.AuxCoord( experiment_label, long_name="Experiment", units="no_unit" ) # and add it to the cube cube.add_aux_coord(exp_coord) def main(): # Load e1 and a1 using the callback to update the metadata e1 = iris.load_cube( iris.sample_data_path("E1.2098.pp"), callback=cop_metadata_callback ) a1b = iris.load_cube( iris.sample_data_path("A1B.2098.pp"), callback=cop_metadata_callback ) # Load the global average data and add an 'Experiment' coord it global_avg = iris.load_cube(iris.sample_data_path("pre-industrial.pp")) # Define evenly spaced contour levels: -2.5, -1.5, ... 15.5, 16.5 with the # specific colours levels = np.arange(20) - 2.5 red = ( np.array( [ 0, 0, 221, 239, 229, 217, 239, 234, 228, 222, 205, 196, 161, 137, 116, 89, 77, 60, 51, ] ) / 256.0 ) green = ( np.array( [ 16, 217, 242, 243, 235, 225, 190, 160, 128, 87, 72, 59, 33, 21, 29, 30, 30, 29, 26, ] ) / 256.0 ) blue = ( np.array( [ 255, 255, 243, 169, 99, 51, 63, 37, 39, 21, 27, 23, 22, 26, 29, 28, 27, 25, 22, ] ) / 256.0 ) # Put those colours into an array which can be passed to contourf as the # specific colours for each level colors = np.array([red, green, blue]).T # Subtract the global # Iterate over each latitude longitude slice for both e1 and a1b scenarios # simultaneously for e1_slice, a1b_slice in zip( e1.slices(["latitude", "longitude"]), a1b.slices(["latitude", "longitude"]), ): time_coord = a1b_slice.coord("time") # Calculate the difference from the mean delta_e1 = e1_slice - global_avg delta_a1b = a1b_slice - global_avg # Make a wider than normal figure to house two maps side-by-side fig = plt.figure(figsize=(12, 5)) # Get the time datetime from the coordinate time = time_coord.units.num2date(time_coord.points[0]) # Set a title for the entire figure, giving the time in a nice format # of "MonthName Year". Also, set the y value for the title so that it # is not tight to the top of the plot. fig.suptitle( "Annual Temperature Predictions for " + time.strftime("%Y"), y=0.9, fontsize=18, ) # Add the first subplot showing the E1 scenario plt.subplot(121) plt.title("HadGEM2 E1 Scenario", fontsize=10) iplt.contourf(delta_e1, levels, colors=colors, extend="both") plt.gca().coastlines() # get the current axes' subplot for use later on plt1_ax = plt.gca() # Add the second subplot showing the A1B scenario plt.subplot(122) plt.title("HadGEM2 A1B-Image Scenario", fontsize=10) contour_result = iplt.contourf( delta_a1b, levels, colors=colors, extend="both" ) plt.gca().coastlines() # get the current axes' subplot for use later on plt2_ax = plt.gca() # Now add a colourbar who's leftmost point is the same as the leftmost # point of the left hand plot and rightmost point is the rightmost # point of the right hand plot # Get the positions of the 2nd plot and the left position of the 1st # plot left, bottom, width, height = plt2_ax.get_position().bounds first_plot_left = plt1_ax.get_position().bounds[0] # the width of the colorbar should now be simple width = left - first_plot_left + width # Add axes to the figure, to place the colour bar colorbar_axes = fig.add_axes([first_plot_left, 0.18, width, 0.03]) # Add the colour bar cbar = plt.colorbar( contour_result, colorbar_axes, orientation="horizontal" ) # Label the colour bar and add ticks cbar.set_label(e1_slice.units) cbar.ax.tick_params(length=0) iplt.show() if __name__ == "__main__": main()
lgpl-3.0
jblupus/PyLoyaltyProject
loyalty/resumes.py
1
1906
import json from threading import Thread from utils import HOME import numpy as np import pandas as pd SUMMARY_PATH = HOME + '/Dropbox/Twitter/Summary/' BASE_PATH = HOME + '/Dropbox/Twitter/' class Resume(Thread): def __init__(self, inputfile, outputfile): super(Resume, self).__init__() self.inputfile = inputfile self.outputfile = outputfile def run(self): summary = {} for key in ['like', 'mention', 'retweet']: data = json.load(open(self.inputfile + key + '.jsons', 'r')) for user in data['intervals']: if user not in summary: summary.update({user: {'like': 0, 'mention': 0, 'retweet': 0}}) summary[user][key] = max(np.array(data['intervals'][user].keys()).astype(float)) df = pd.DataFrame() # df['id'] = ids = [] like = [] mention = [] retweet = [] for key in summary.keys(): ids.append(key) like.append(summary[key]['like']) mention.append(summary[key]['mention']) retweet.append(summary[key]['retweet']) df['id'] = ids df['like'] = like df['mention'] = mention df['retweet'] = retweet df.to_csv(SUMMARY_PATH + self.outputfile) Resume(BASE_PATH + '/Interactions.Loyalty/', 'dist_summary.csv').run() # Resume( + f, SUMMARY_PATH + 'dist_' + f).run() # for f in ['like.jsons', 'mention.jsons', 'retweet.jsons']: # Resume(BASE_PATH + 'Text.Distributions/' + f, SUMMARY_PATH + 'text_' + f).run() # for f in ['like.jsons', 'mention.jsons', 'retweet.jsons']: # Resume(BASE_PATH + 'Raw.Distributions2.Friends/' + f, SUMMARY_PATH + 'followers_' + f).run() # for f in ['like.jsons', 'mention.jsons', 'retweet.jsons']: # Resume(BASE_PATH + 'Raw.Distributions2.Users/' + f, SUMMARY_PATH + 'no_followers_' + f).run()
bsd-2-clause