repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
pycbc
pycbc-master/bin/plotting/pycbc_plot_Nth_loudest_coinc_omicron.py
""" Generates a plot that shows the time-frequency trace of Nth loudest coincident trigger overlaid on a background of Omicron triggers. """ import logging import h5py import numpy as np import argparse import glob from ligo.lw import lsctables, utils import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pycbc.events from pycbc.waveform import get_td_waveform, frequency_from_polarizations, amplitude_from_polarizations from pycbc.io.ligolw import LIGOLWContentHandler logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO) parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--coinc-file', type=str, required=True, help='HDF file containing coincident CBC triggers') parser.add_argument('--single-ifo-trigs', type=str, required=True, help='HDF file containing single IFO CBC triggers') parser.add_argument('--ifo', type=str, required=True, help='IFO, L1 or H1') parser.add_argument('--tmpltbank-file', type=str, required=True, help='HDF file containing template information for CBC search') parser.add_argument('--output-file', type=str, required=True, help='Full path to output file') parser.add_argument('--loudest-event-number', type=int, required=True, default=1, help='Script will plot the Nth loudest coincident trigger') parser.add_argument('--omicron-dir', type=str, required=True, help='Directory containing Omicron triggers. Ex: /home/detchar/triggers/ER7/') parser.add_argument('--omicron-snr-thresh', type=int, required=False, default=5, help='SNR threshold for choosing which Omicron triggers to plot.') parser.add_argument('--plot-window', type=float, required=False, default=32, help='Time window to plot around CBC trigger') parser.add_argument('--omicron-channel',type=str, required=False, default='GDS-CALIB_STRAIN', help='Channel to plot Omicron triggers for, do not include IFO') parser.add_argument('--analysis-level', type=str, required=False, default='foreground', choices = ['foreground','background','background_exc'], help='Designates which level of the analysis output to search') args = parser.parse_args() logging.info('Reading HDF files') coinc_trig_file = h5py.File(args.coinc_file,'r') single_trig_file = h5py.File(args.single_ifo_trigs,'r') template_file = h5py.File(args.tmpltbank_file,'r') logging.info('Parsing HDF files') coinc_newsnr = coinc_trig_file[args.analysis_level]['stat'][:] Nth_loudest_idx = np.argsort(coinc_newsnr)[-args.loudest_event_number] if coinc_trig_file.attrs['detector_1'] == args.ifo: idx = coinc_trig_file[args.analysis_level]['trigger_id1'][Nth_loudest_idx] else: idx = coinc_trig_file[args.analysis_level]['trigger_id2'][Nth_loudest_idx] # get info about single detector triggers that comprise loudest background event # and calculate newSNR snr = single_trig_file[args.ifo]['snr'][idx] chisq = single_trig_file[args.ifo]['chisq'][idx] chisq_dof = single_trig_file[args.ifo]['chisq_dof'][idx] reduced_chisq = chisq/(2*chisq_dof - 2) newsnr = pycbc.events.ranking.newsnr(snr,reduced_chisq) cbc_end_time = single_trig_file[args.ifo]['end_time'][idx] template_id = single_trig_file[args.ifo]['template_id'][idx] m1 = template_file['mass1'][template_id] m2 = template_file['mass2'][template_id] s1z = template_file['spin1z'][template_id] s2z = template_file['spin2z'][template_id] omicron_start_time = cbc_end_time - args.plot_window omicron_end_time = cbc_end_time + args.plot_window logging.info('Fetching omicron triggers') # Generate list of directories to search over gps_era_start = str(omicron_start_time)[:5] gps_era_end = str(omicron_end_time)[:5] eras = map(str,range(int(gps_era_start),int(gps_era_end))) if not eras: eras = [gps_era_start] # Grab all relevant Omicron trigger files omicron_times = [] omicron_snr = [] omicron_freq = [] for era in eras: # Generate list of all Omicron SnglBurst xml trigger files file_list = glob.glob(args.omicron_dir + '/%s/%s_Omicron/%s/%s-%s_Omicron-*.xml.gz' %(args.ifo,args.omicron_channel,era,args.ifo,args.omicron_channel.replace('-','_'))) # Parse trigger files into SNR, time, and frequency for Omicron triggers for file_name in file_list: omicron_xml = utils.load_filename( file_name, contenthandler=LIGOLWContentHandler) snglburst_table = lsctables.SnglBurstTable.get_table(omicron_xml) for row in snglburst_table: if (row.snr > args.omicron_snr_thresh and omicron_start_time < row.peak_time < omicron_end_time): omicron_times.append(row.peak_time + row.peak_time_ns * 10**(-9)) omicron_snr.append(row.snr) omicron_freq.append(row.peak_frequency) # Generate inspiral waveform and calculate f(t) to plot on top of Omicron triggers hp, hc = get_td_waveform(approximant='SEOBNRv2', mass1=m1, mass2=m2, spin1x=0, spin1y=0, spin1z=s1z, spin2x=0, spin2y=0, spin2z=s2z, delta_t=(1./32768.), f_lower=30) f = frequency_from_polarizations(hp, hc) amp = amplitude_from_polarizations(hp, hc) stop_idx = amp.abs_max_loc()[1] f = f[:stop_idx] freq = np.array(f.data) times = np.array(f.sample_times) + cbc_end_time logging.info('Plotting') plt.figure(0) cm = plt.cm.get_cmap('Reds') plt.scatter(omicron_times,omicron_freq,c=omicron_snr,s=30,cmap=cm,linewidth=0) plt.grid(b=True, which='both') cbar = plt.colorbar() cbar.set_label('%s Omicron trigger SNR' % (args.ifo)) plt.yscale('log') plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') plt.xlim(omicron_start_time,omicron_end_time) plt.suptitle('%s CBC trigger SNR = ' % (args.ifo) + format(snr,'.2f') + ", newSNR = " + format(newsnr,'.2f'),fontsize=12) plt.title(format(m1,'.2f') + " - " + format(m2,'.2f') + " solar masses at GPS time " + format(cbc_end_time,'.2f'),fontsize=12) plt.hold(True) plt.plot(times,freq) plt.savefig(args.output_file) logging.info('Done! Exiting script.')
6,224
39.16129
102
py
pycbc
pycbc-master/test/test_correlate.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the correlate functions in pycbc.filter.matchedfilter_cpu """ import unittest import numpy from pycbc.types import * from pycbc.scheme import * from pycbc.filter import * from utils import parse_args_all_schemes, simple_exit from pycbc.filter.matchedfilter import BatchCorrelator, Correlator _scheme, _context = parse_args_all_schemes("correlate") from pycbc.filter.matchedfilter_cpu import correlate_numpy trusted_correlate = correlate_numpy class Testcorrelate(unittest.TestCase): def setUp(self,*args): self.context = _context self.scheme = _scheme self.tolerance = 1e-6 xr = numpy.random.uniform(low=-1, high=1.0, size=2**20) yr = numpy.random.uniform(low=-1, high=1.0, size=2**20) xi = numpy.random.uniform(low=-1, high=1.0, size=2**20) yi = numpy.random.uniform(low=-1, high=1.0, size=2**20) self.x = Array(xr + xi * 1.0j, dtype=complex64) self.y = Array(yr + yi * 1.0j, dtype=complex64) self.z = zeros(2**20, dtype=complex64) trusted_correlate(self.x, self.y, self.z) def test_correlate(self): with self.context: z = zeros(2**20, dtype=complex64) correlate(self.x, self.y, z) self.assertTrue(self.z.almost_equal_elem(z, self.tolerance)) def test_correlator(self): x = self.x * 1 y = self.y * 1 z = self.z * 0 corr = Correlator(x, y, z) corr.correlate() self.assertTrue(z.almost_equal_elem(self.z, self.tolerance)) def test_batch_correlate(self): size = len(self.x) xs = [self.x+0, self.x+1, self.x+2, self.x+3] zs = [self.z*0, self.z*1, self.z*2, self.z*3] b = BatchCorrelator(xs, zs, size) b.execute(self.y) for i in range(len(xs)): trusted_correlate(xs[i], self.y, self.z) self.assertTrue(self.z.almost_equal_elem(zs[i], self.tolerance)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(Testcorrelate)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
3,141
35.534884
85
py
pycbc
pycbc-master/test/test_significance_module.py
"""Unit test for converting sets of statistic values into significances.""" import unittest import argparse import itertools import copy import numpy as np from utils import simple_exit from pycbc.events import significance def parse_args(args): # Helper function to convert a list of flags/options into # an arguments structure parser = argparse.ArgumentParser() significance.insert_significance_option_group(parser) return parser, parser.parse_args(args) ifos = ['H1','L1','V1'] # What combinations of the ifos can we make? combos = [] for l in np.arange(len(ifos)) + 1: combos += [''.join(c) for c in itertools.combinations(ifos, l)] class SignificanceParserTest(unittest.TestCase): def setUp(self): # Set up some things we will want to use in the tests: self.maxDiff = None self.ifos = copy.copy(ifos) self.combos = copy.copy(combos) # Tuples of inputs and the errors they should create tests_which_sysexit = [] # Try to use a calculation method which doesn't exist tests_which_sysexit.append((['--far-calculation-method', 'H1L1:nonexistent_method'], 'method_doesnt_exist')) # Try to set a fit threshold when using n_louder method tests_which_sysexit.append((['--fit-threshold', 'H1L1:6'], 'n_louder_with_threshold')) # Try to set a fit function when using n_louder method tests_which_sysexit.append((['--fit-function', 'H1L1:exponential'], 'function_with_n_louder')) # Try to set a fit threshold which is not a number tests_which_sysexit.append((['--fit-threshold', 'H1L1:not_a_number'], 'threshold_not_a_number')) # Try to set a fit function which isn't expected tests_which_sysexit.append((['--far-calculation-method', 'H1L1:trigger_fit', '--fit-function', 'H1L1:spanish_inquisition'], 'function_doesnt_exist')) # Dynamically add sysexit tests into the class for test_sysexit in tests_which_sysexit: parser, args = parse_args(test_sysexit[0]) def check_sysexit_test(self, a=args, p=parser): with self.assertRaises(SystemExit): significance.check_significance_options( a, p) setattr(SignificanceParserTest, 'test_parser_' + test_sysexit[1], check_sysexit_test) # Set up the default values of the output dictionary, we will edit this # for each test default_dict = {} # Default Values for combo in combos: default_dict[combo] = copy.deepcopy(significance._default_opt_dict) tests_which_pass = [] # Does passing no options return the default dictionary? tests_which_pass.append(([], default_dict, 'default_vals')) # Try to add a detector combination which does not exist # - should return dictionary including the nonexistent combination # as we want to be able to give combos to scripts where they aren't valid extra_combo_dict = copy.deepcopy(default_dict) extra_combo_dict['H1G1'] = {} extra_combo_dict['H1G1']['method'] = 'trigger_fit' extra_combo_dict['H1G1']['fit_function'] = None extra_combo_dict['H1G1']['fit_threshold'] = 6. tests_which_pass.append((['--far-calculation-method', 'H1G1:trigger_fit', '--fit-threshold', 'H1G1:6'], extra_combo_dict, 'extra_combo')) # Supply different methods for the different combinations, and check that # they are taken in properly test_dict = copy.deepcopy(default_dict) test_dict['H1L1']['method'] = 'trigger_fit' test_dict['H1']['method'] = 'trigger_fit' test_dict['L1']['method'] = 'trigger_fit' test_dict['H1L1']['fit_function'] = 'power' test_dict['H1']['fit_function'] = 'rayleigh' test_dict['L1']['fit_function'] = 'exponential' test_dict['H1L1']['fit_threshold'] = 6 test_dict['H1']['fit_threshold'] = 5.5 test_dict['L1']['fit_threshold'] = 5 calc_methods = ['H1L1:trigger_fit', 'H1:trigger_fit', 'L1:trigger_fit'] functions = ['H1L1:power', 'H1:rayleigh', 'L1:exponential'] thresholds = ['H1L1:6', 'H1:5.5', 'L1:5'] tests_which_pass.append((['--far-calculation-method'] + calc_methods + ['--fit-function'] + functions + ['--fit-threshold'] + thresholds, test_dict, 'different_combos')) # Dynamically add value tests for the parser for test_values in tests_which_pass: parser, args = parse_args(test_values[0]) def digest_values_test(self, a=args, tv=test_values[1]): method_dict = significance.digest_significance_options( self.combos, a) self.assertEqual(method_dict, tv) setattr(SignificanceParserTest, 'test_parser_values_' + test_values[2], digest_values_test) class SignificanceMethodTest(unittest.TestCase): def setUp(self): self.test_fg_stat = np.random.normal(loc=5,scale=2,size=50) self.test_bg_stat = np.random.normal(loc=5,scale=2,size=500) self.dec_facs = np.ones_like(self.test_bg_stat) method_functions = { 'n_louder': [None], 'trigger_fit': ['exponential','rayleigh'] } # Dynamically add method tests into the class for method in significance._significance_meth_dict: for function in method_functions[method]: method_dict = {} method_dict['method'] = method method_dict['fit_function'] = function method_dict['fit_threshold'] = None if not function else 0 def meth_test(self, md=method_dict): back_cnum, fnlouder = significance.get_n_louder( self.test_bg_stat, self.test_fg_stat, self.dec_facs, **method_dict) back_stat_sort = np.argsort(self.test_bg_stat) back_far_sort = np.argsort(back_cnum) fore_stat_sort = np.argsort(self.test_fg_stat) fore_far_sort = np.argsort(fnlouder) # Basic sanity check - there should be one n_louder value # per stat value self.assertEqual(len(back_cnum), len(self.test_bg_stat)) self.assertEqual(len(fnlouder), len(self.test_fg_stat)) # None of the output should be NaN or infinite self.assertTrue(np.isfinite(back_cnum).all()) self.assertTrue(np.isfinite(fnlouder).all()) # The background stat value order should be the reverse of the # n_louder order back_stat_sort = np.argsort(self.test_bg_stat) back_far_sort = np.argsort(back_cnum) self.assertTrue(np.array_equal(back_stat_sort, back_far_sort[::-1])) fore_stat_sort = np.argsort(self.test_fg_stat) fore_far_sort = np.argsort(fnlouder) # As fg events could have an equal number of louder bg events, # argsort be the opposite way round for the far sort and stat # sort. So we need to use the recovered n_louder as the equal # equality test array self.assertTrue(np.array_equal(fnlouder[fore_stat_sort], fnlouder[fore_far_sort][::-1])) setattr(SignificanceMethodTest, 'test_%s_%s' % (method, function), meth_test) # create and populate unittest's test suite suite = unittest.TestSuite() test_loader = unittest.TestLoader() suite.addTest(test_loader.loadTestsFromTestCase(SignificanceMethodTest)) suite.addTest(test_loader.loadTestsFromTestCase(SignificanceParserTest)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
7,904
37.188406
75
py
pycbc
pycbc-master/test/test_conversions.py
import numpy from pycbc import coordinates from pycbc import distributions from pycbc import conversions import unittest from utils import simple_exit seed = 8202 numpy.random.seed(seed) def almost_equal(derived_val, check_val, precision=1e-8): """Checks whether the difference in the derived and check values are less than the given precision. """ allpass = numpy.allclose(derived_val, check_val, atol=precision) if not allpass: absdiff = abs(derived_val - check_val) maxidx = absdiff.argmax() maxdiff = absdiff[maxidx] else: maxdiff = maxidx = None return allpass, maxdiff, maxidx def angle_almost_equal(derived_val, check_val, precision=1e-8): """Checks whether the given angles are almost equal. This is done by taking the modulus of each value on [0, 2*pi) before comparing. """ derived_val = numpy.mod(derived_val, 2*numpy.pi) check_val = numpy.mod(check_val, 2*numpy.pi) return almost_equal(derived_val, check_val, precision=precision) class TestParams(unittest.TestCase): def setUp(self, *args): self.numtests = 1000 self.precision = 1e-8 self.f_lower = 10. # create some component masses to work with self.m1 = numpy.random.uniform(1., 100., size=self.numtests) self.m2 = numpy.random.uniform(1., 100., size=self.numtests) # create some spins to work with spin_angledist = distributions.UniformSolidAngle() rvals = spin_angledist.rvs(size=self.numtests) self.spin1_polar = rvals['theta'] self.spin1_az = rvals['phi'] self.spin1_amp = numpy.random.uniform(0., 1., size=self.numtests) rvals = spin_angledist.rvs(size=self.numtests) self.spin2_polar = rvals['theta'] self.spin2_az = rvals['phi'] self.spin2_amp = numpy.random.uniform(0., 1., size=self.numtests) # calculate derived parameters from each self.mp = conversions.primary_mass(self.m1, self.m2) self.ms = conversions.secondary_mass(self.m1, self.m2) self.mtotal = conversions.mtotal_from_mass1_mass2(self.m1, self.m2) self.q = conversions.q_from_mass1_mass2(self.m1, self.m2) self.invq = conversions.invq_from_mass1_mass2(self.m1, self.m2) self.mchirp = conversions.mchirp_from_mass1_mass2(self.m1, self.m2) self.eta = conversions.eta_from_mass1_mass2(self.m1, self.m2) self.tau0 = conversions.tau0_from_mtotal_eta(self.mtotal, self.eta, self.f_lower) self.tau3 = conversions.tau3_from_mtotal_eta(self.mtotal, self.eta, self.f_lower) self.spin1x, self.spin1y, self.spin1z = \ coordinates.spherical_to_cartesian(self.spin1_amp, self.spin1_az, self.spin1_polar) self.spin2x, self.spin2y, self.spin2z = \ coordinates.spherical_to_cartesian(self.spin2_amp, self.spin2_az, self.spin2_polar) self.effective_spin = conversions.chi_eff(self.m1, self.m2, self.spin1z, self.spin2z) self.chi_p = conversions.chi_p(self.m1, self.m2, self.spin1x, self.spin1y, self.spin2x, self.spin2y) self.primary_spinx = conversions.primary_spin(self.m1, self.m2, self.spin1x, self.spin2x) self.primary_spiny = conversions.primary_spin(self.m1, self.m2, self.spin1y, self.spin2y) self.primary_spinz = conversions.primary_spin(self.m1, self.m2, self.spin1z, self.spin2z) self.secondary_spinx = conversions.secondary_spin(self.m1, self.m2, self.spin1x, self.spin2x) self.secondary_spiny = conversions.secondary_spin(self.m1, self.m2, self.spin1y, self.spin2y) self.secondary_spinz = conversions.secondary_spin(self.m1, self.m2, self.spin1z, self.spin2z) def test_physical_consistency(self): """Tests whether derived parameters pass physical checks; e.g., eta <= 0.25. """ self.assertTrue((self.mp >= self.ms).all(), 'primary mass not >= secondary mass') self.assertTrue((self.q >= 1.).all(), 'mass ratio not >= 1') self.assertTrue((self.invq <= 1.).all(), 'inverse mass ratio not <= 1') self.assertTrue((self.eta <= 0.25).all(), 'eta not <= 0.25') self.assertTrue((abs(self.effective_spin) <= 1.).all(), 'abs(effective spin) not <= 1') for which_comp in ['primary', 'secondary']: for coord in ['x', 'y', 'z']: spinparam = '{}_spin{}'.format(which_comp, coord) self.assertTrue((abs(getattr(self, spinparam)) <= 1.).all(), '{} not <= 1.'.format(spinparam)) def test_round_robin(self): """Computes inverse transformations to get original parameters from derived, then compares them to the original. """ msg = '{} does not recover same {}; max difference: {}; inputs: {}' # following lists (function to check, # arguments to pass to the function, # name of self's attribute to compare to) fchecks = [ (conversions.mass1_from_mtotal_q, (self.mtotal, self.q), 'mp'), (conversions.mass2_from_mtotal_q, (self.mtotal, self.q), 'ms'), (conversions.mass1_from_mtotal_eta, (self.mtotal, self.eta), 'mp'), (conversions.mass2_from_mtotal_eta, (self.mtotal, self.eta), 'ms'), (conversions.mtotal_from_mchirp_eta, (self.mchirp, self.eta), 'mtotal'), (conversions.mass1_from_mchirp_eta, (self.mchirp, self.eta), 'mp'), (conversions.mass2_from_mchirp_eta, (self.mchirp, self.eta), 'ms'), (conversions.mass2_from_mchirp_mass1, (self.mchirp, self.mp), 'ms'), (conversions.mass2_from_mass1_eta, (self.mp, self.eta), 'ms'), (conversions.mass1_from_mass2_eta, (self.ms, self.eta), 'mp'), (conversions.eta_from_q, (self.q,), 'eta'), (conversions.mass1_from_mchirp_q, (self.mchirp, self.q), 'mp'), (conversions.mass2_from_mchirp_q, (self.mchirp, self.q), 'ms'), (conversions.tau0_from_mchirp, (self.mchirp, self.f_lower), 'tau0'), (conversions.tau0_from_mass1_mass2, (self.m1, self.m2, self.f_lower), 'tau0'), (conversions.tau3_from_mass1_mass2, (self.m1, self.m2, self.f_lower), 'tau3'), (conversions.mchirp_from_tau0, (self.tau0, self.f_lower), 'mchirp'), (conversions.mtotal_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'mtotal'), (conversions.eta_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'eta'), (conversions.mass1_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'mp'), (conversions.mass2_from_tau0_tau3, (self.tau0, self.tau3, self.f_lower), 'ms'), (conversions.chi_eff_from_spherical, (self.m1, self.m2, self.spin1_amp, self.spin1_polar, self.spin2_amp, self.spin2_polar), 'effective_spin'), (conversions.chi_p_from_spherical, (self.m1, self.m2, self.spin1_amp, self.spin1_az, self.spin1_polar, self.spin2_amp, self.spin2_az, self.spin2_polar), 'chi_p'), ] for func, args, compval in fchecks: passed, maxdiff, maxidx = almost_equal(func(*args), getattr(self, compval), self.precision) if not passed: failinputs = [p[maxidx] for p in args] else: failinputs = None self.assertTrue(passed, msg.format(func, compval, maxdiff, failinputs)) def test_chip_compare_lalsuite(self): """Compares effective precession parameter bewteen the pycbc implementation and the lalsuite implementation. """ import lal import lalsimulation as lalsim msg = '{} does not recover same {}; max difference: {}; inputs: {}' f_ref = self.f_lower chip_lal = [] for i in range(len(self.m1)): _,_,tmp,_,_,_,_ = lalsim.SimIMRPhenomPCalculateModelParametersFromSourceFrame( self.m1[i]*lal.MSUN_SI, self.m2[i]*lal.MSUN_SI, f_ref, 0., 0., self.spin1x[i], self.spin1y[i], self.spin1z[i], self.spin2x[i], self.spin2y[i], self.spin2z[i], lalsim.IMRPhenomPv2_V) chip_lal.append(tmp) chip_pycbc = conversions.chi_p( self.m1,self.m2,self.spin1x,self.spin1y,self.spin2x,self.spin2y) passed, maxdiff, maxidx = almost_equal(chip_lal, chip_pycbc, self.precision) failinputs = ( self.m1[maxidx], self.m2[maxidx], self.spin1x[maxidx], self.spin1y[maxidx], self.spin2x[maxidx],self.spin2y[maxidx] ) self.assertTrue(passed, msg.format("conversions.chi_p", "chi_p", maxdiff, failinputs)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestParams)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
9,562
49.068063
96
py
pycbc
pycbc-master/test/test_frame.py
# Copyright (C) 2012 Andrew Miller, Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # ''' These are the unittests for the pycbc frame/cache reading functions ''' import pycbc import unittest import pycbc.frame import numpy from astropy.utils.data import download_file import lal from pycbc.types import TimeSeries from utils import parse_args_cpu_only, simple_exit # Frame tests only need to happen on the CPU parse_args_cpu_only("Frame I/O") class FrameTestBase(unittest.TestCase): __test__ = False def setUp(self): numpy.random.seed(1023) self.size = pow(2,12) self.data1 = numpy.array(numpy.random.rand(self.size), dtype=self.dtype) self.data2 = numpy.array(numpy.random.rand(self.size), dtype=self.dtype) # If the dtype is complex, we should throw in some complex values as well if self.dtype == numpy.complex64 or self.dtype == numpy.complex128: self.data1 += numpy.random.rand(self.size) * 1j self.data2 += numpy.random.rand(self.size) * 1j self.delta_t = .5 self.epoch = lal.LIGOTimeGPS(123456,0) self.expected_data1 = TimeSeries(self.data1,dtype=self.dtype, epoch=self.epoch,delta_t=self.delta_t) self.expected_data2 = TimeSeries(self.data2,dtype=self.dtype, epoch=self.epoch,delta_t=self.delta_t) def test_frame(self): # TODO also test reading a cache # This is a file in the temp directory that will be deleted when it is garbage collected url = 'https://github.com/gwastro/pycbc-config/raw/master/' url += 'test_data_files/frametest{}.gwf' filename = download_file(url.format(self.data1.dtype), cache=True) # Make sure we can run from one directory higher as well import os.path if not os.path.exists(filename): filename = "test/" + filename # Now we will create a frame file, specifiying that it is a timeseries #Fr.frputvect(filename, # [{'name':'channel1', 'data':self.data1, 'start':int(self.epoch), # 'dx':self.delta_t,'type':1}, # {'name':'channel2', 'data':self.data2, 'start':int(self.epoch), # 'dx':self.delta_t,'type':1}]) # Reading just one channel first ts1 = pycbc.frame.read_frame(filename, 'channel1') # Checking all values self.assertEqual(ts1,self.expected_data1) # Now checking the start time self.assertEqual(ts1.start_time, self.epoch) # And the duration self.assertEqual(ts1.end_time - ts1.start_time,self.size * self.delta_t) # Now reading multiple channels ts2 = pycbc.frame.read_frame(filename, ['channel1','channel2']) # We should get back a list self.assertTrue(type(ts2) is list) self.assertEqual(ts2[0],self.expected_data1) self.assertEqual(ts2[1],self.expected_data2) self.assertEqual(ts2[0].start_time, self.epoch) self.assertEqual(ts2[1].start_time, self.epoch) self.assertEqual(ts2[0].end_time - ts2[0].start_time,self.size * self.delta_t) self.assertEqual(ts2[1].end_time - ts2[1].start_time,self.size * self.delta_t) # These are the times and indices for the segment we will try to read start = self.epoch+10 end = self.epoch+50 startind = int(10/self.delta_t) endind = int(50/self.delta_t) # Now reading in a specific segment with an integer ts3 = pycbc.frame.read_frame(filename, 'channel1', start_time=int(start), end_time=int(end)) # The same, but with a LIGOTimeGPS for the start and end times ts4 = pycbc.frame.read_frame(filename, 'channel1', start_time=start, end_time=end) # Now we will check those two TimeSeries self.assertEqual(ts3,self.expected_data1[startind:endind]) self.assertEqual(ts4,self.expected_data1[startind:endind]) self.assertTrue(40 - (float(ts3.end_time)-float(ts3.start_time)) < self.delta_t) self.assertEqual(ts3.start_time, start) self.assertTrue(40 - (float(ts4.end_time)-float(ts4.start_time)) < self.delta_t) self.assertEqual(ts4.start_time, start) # And now some cases that should raise errors # There must be a span grater than 0 self.assertRaises(ValueError, pycbc.frame.read_frame, filename, 'channel1', start_time=self.epoch, end_time=self.epoch) # The start must be before the end self.assertRaises(ValueError, pycbc.frame.read_frame, filename, 'channel1', start_time=self.epoch+1, end_time=self.epoch) # We take a factory approach so we can test all possible dtypes we support TestClasses = [] types = [numpy.float32, numpy.float64, numpy.complex64, numpy.complex128] for ty in types: klass = type('{0}_Test'.format(ty.__name__),(FrameTestBase,),{'dtype': ty}) klass.__test__ = True vars()[klass.__name__] = klass TestClasses.append(klass) del klass if __name__ == '__main__': suite = unittest.TestSuite() for klass in TestClasses: suite.addTest(unittest.TestLoader().loadTestsFromTestCase(klass)) results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
6,510
41.279221
96
py
pycbc
pycbc-master/test/test_threshold.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.waveform module """ import unittest import numpy from pycbc.types import * from pycbc.scheme import * from pycbc.events import * from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("Threshold") from pycbc.events.threshold_cpu import threshold_numpy trusted_threshold = threshold_numpy class TestThreshold(unittest.TestCase): def setUp(self,*args): self.context = _context self.scheme = _scheme r = numpy.random.uniform(low=-1, high=1.0, size=2**20) i = numpy.random.uniform(low=-1, high=1.0, size=2**20) v = r + i*1.0j self.series = Array(v, dtype=complex64) self.threshold = 1.3 self.locs, self.vals = trusted_threshold(self.series, self.threshold) self.tolerance = 1e-6 print(len(self.locs), len(self.vals)) def test_threshold(self): with self.context: locs, vals = threshold(self.series, self.threshold) self.assertTrue((locs == self.locs).all()) self.assertTrue((vals == self.vals).all()) print(len(locs), len(vals)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestThreshold)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,341
35.59375
79
py
pycbc
pycbc-master/test/test_coinc_stat.py
"""Unit test for coincident ranking statistic implementations.""" import unittest import numpy as np from utils import parse_args_cpu_only, simple_exit from pycbc.events.stat import statistic_dict # this test only needs to happen on the CPU parse_args_cpu_only('coinc stats') class CoincStatTest(unittest.TestCase): def setUp(self): # one could loop over all single rankings # and detector combinations for a complete test self.single_rank = 'snr' self.detectors = ['H1', 'L1'] # simulate some single-detector triggers from Gaussian noise self.num_trigs = n = 100 self.single_trigs = {} for d in self.detectors: self.single_trigs[d] = { 'snr': np.random.chisquare(2, size=n) ** 0.5, 'coa_phase': np.random.uniform(0, 2 * np.pi, size=n), 'end_time': 1295441120 + np.random.uniform(-0.01, 0.01, size=n), 'sigmasq': np.random.uniform(1, 10, size=n) } self.time_slide_shift = 0.1 self.time_slide_vector = np.zeros(n, dtype=int) # here we should also prepare some files needed by some stats, # like the PTA histograms or fake trigger fits self.aux_files = [] # dynamically insert a test case method for each available statistic for stat_name in statistic_dict: # FIXME until we can fake the required files, # do not test stats that require them if 'phasetd' in stat_name or 'exp_fit' in stat_name \ or 'ExpFit' in stat_name: continue def stat_test_method(self, sn=stat_name): # instantiate an object for this stat stat = statistic_dict[sn](self.single_rank, files=self.aux_files, ifos=self.detectors) # get single-detector statistic at each detector from the fake triggers single_ranks = [(d, stat.single(self.single_trigs[d])) for d in self.detectors] # pretend the fake triggers are coincident, and rank the coincidences coinc_ranks = stat.rank_stat_coinc( single_ranks, self.time_slide_vector, self.time_slide_shift, (0, 0) # does this make sense? ) # basic sanity check on the produced ranks self.assertEqual(len(coinc_ranks), self.num_trigs) self.assertFalse(np.isnan(coinc_ranks).any()) # here one could add a statistical test based on known analytic # behavior of a particular statistic in Gaussian noise setattr(CoincStatTest, 'test_' + stat_name, stat_test_method) # create and populate unittest's test suite suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(CoincStatTest)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,919
35.5
80
py
pycbc
pycbc-master/test/test_chisq.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.waveform module """ import unittest import numpy from pycbc.types import * from pycbc.scheme import * from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("correlate") from pycbc.vetoes.chisq_cpu import chisq_accum_bin_numpy from pycbc.vetoes import chisq_accum_bin, power_chisq_bins, power_chisq from pycbc.vetoes import power_chisq_at_points_from_precomputed from pycbc.filter import resample_to_delta_t from pycbc.catalog import Merger from pycbc.psd import interpolate, inverse_spectrum_truncation from pycbc.waveform import get_fd_waveform from pycbc.filter import matched_filter_core trusted_accum = chisq_accum_bin_numpy class TestChisq(unittest.TestCase): def setUp(self,*args): self.context = _context self.scheme = _scheme self.tolerance = 1e-6 xr = numpy.random.uniform(low=-1, high=1.0, size=2**20) xi = numpy.random.uniform(low=-1, high=1.0, size=2**20) self.x = Array(xr + xi * 1.0j, dtype=complex64) self.z = zeros(2**20, dtype=float32) for i in range(0, 4): trusted_accum(self.z, self.x) m = Merger("GW170814") ifos = ['H1', 'L1', 'V1'] data = {} psd = {} for ifo in ifos: # Read in and condition the data and measure PSD ts = m.strain(ifo).highpass_fir(15, 512) data[ifo] = resample_to_delta_t(ts, 1.0/2048).crop(2, 2) p = data[ifo].psd(2) p = interpolate(p, data[ifo].delta_f) p = inverse_spectrum_truncation(p, int(2 * data[ifo].sample_rate), low_frequency_cutoff=15.0) psd[ifo] = p hp, _ = get_fd_waveform(approximant="IMRPhenomD", mass1=31.36, mass2=31.36, f_lower=20.0, delta_f=data[ifo].delta_f) hp.resize(len(psd[ifo])) # For each ifo use this template to calculate the SNR time series snr = {} snr_unnorm = {} norm = {} corr = {} for ifo in ifos: snr_unnorm[ifo], corr[ifo], norm[ifo] = \ matched_filter_core(hp, data[ifo], psd=psd[ifo], low_frequency_cutoff=20) snr[ifo] = snr_unnorm[ifo] * norm[ifo] self.snr = snr self.snr_unnorm = snr_unnorm self.norm = norm self.corr = corr self.hp = hp self.data = data self.psd = psd self.ifos = ifos def test_accum(self): with self.context: z = zeros(2**20, dtype=float32) for i in range(0, 4): chisq_accum_bin(z, self.x) self.assertTrue(self.z.almost_equal_elem(z, self.tolerance)) def test_chisq(self): chisq_quick = {} chisq_full = {} chisq_ref = {} for ifo in self.ifos: nbins = 26 dof = nbins * 2 - 2 bins = power_chisq_bins(self.hp, nbins, self.psd[ifo], low_frequency_cutoff=20.0) chisq = power_chisq_at_points_from_precomputed \ (self.corr[ifo], self.snr_unnorm[ifo][27402:27492].data, self.norm[ifo], bins, indices=numpy.arange(27402,27492)) chisq_quick[ifo] = chisq / dof chisq = power_chisq(self.hp, self.data[ifo], nbins, self.psd[ifo], low_frequency_cutoff=20.0) chisq_full[ifo] = chisq[27402:27492] / dof max_diff = max(abs(chisq_full[ifo] - chisq_quick[ifo])) self.assertTrue(max_diff < 1E-5) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestChisq)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
4,885
35.736842
79
py
pycbc
pycbc-master/test/test_waveform.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.waveform module """ import unittest import numpy from numpy import sqrt, cos, sin from pycbc.scheme import CPUScheme from pycbc.waveform import get_td_waveform, get_fd_waveform from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("Waveform") # We only check a few as some require auxiliary files good_waveforms = ['IMRPhenomD', 'TaylorF2', 'SpinTaylorT5', 'IMRPhenomPv2', 'IMRPhenomPv3HM', 'IMRPhenomPv3'] class TestWaveform(unittest.TestCase): def setUp(self,*args): self.context = _context self.scheme = _scheme def test_generation(self): with self.context: for waveform in good_waveforms: print(waveform) hc,hp = get_td_waveform(approximant=waveform,mass1=20,mass2=20,delta_t=1.0/4096,f_lower=40) self.assertTrue(len(hc)> 0) for waveform in good_waveforms: print(waveform) htilde, g = get_fd_waveform(approximant=waveform,mass1=20,mass2=20,delta_f=1.0/256,f_lower=40) self.assertTrue(len(htilde)> 0) def test_spintaylorf2GPU(self): print(type(self.context)) if isinstance(self.context, CPUScheme): return fl = 25 delta_f = 1.0 / 256 for m1 in [3, 5, 15]: for m2 in [1., 2., 3.]: for s1 in [0.001, 1.0, 10]: for s1Ctheta in [-1.,0.,0.5,1.]: for s1phi in [0,2.09,4.18]: for inclination in [0.2,1.2]: s1x = s1 * sqrt(1-s1Ctheta**2) * cos(s1phi) s1y = s1 * sqrt(1-s1Ctheta**2) * sin(s1phi) s1z = s1 * s1Ctheta # Generate SpinTaylorF2 from lalsimulation hpLAL,hcLAL = get_fd_waveform( mass1=m1, mass2=m2, spin1x=s1x, spin1y=s1y,spin1z=s1z, delta_f=delta_f, f_lower=fl,approximant="SpinTaylorF2", amplitude_order=0, phase_order=7, inclination=inclination ) #Generate SpinTaylorF2 from SpinTaylorF2.py with self.context: hp,hc = get_fd_waveform( mass1=m1, mass2=m2, spin1x=s1x, spin1y=s1y,spin1z=s1z, delta_f=delta_f, f_lower=fl,approximant="SpinTaylorF2", amplitude_order=0, phase_order=7, inclination=inclination ) o = overlap(hpLAL, hp) self.assertAlmostEqual(1.0, o, places=4) o = overlap(hcLAL, hc) self.assertAlmostEqual(1.0, o, places=4) ampPLAL=numpy.abs(hpLAL.data) ampP=numpy.abs(hp.data) phasePLAL=numpy.unwrap(numpy.angle(hpLAL.data)) phaseP=numpy.unwrap(numpy.angle(hp.data)) ampCLAL=numpy.abs(hcLAL.data) ampC=numpy.abs(hc.data) phaseCLAL=numpy.unwrap(numpy.angle(hcLAL.data)) phaseC=numpy.unwrap(numpy.angle(hc.data)) indexampP=numpy.where( ampPLAL!= 0) indexphaseP=numpy.where( phasePLAL!= 0) indexampC=numpy.where( ampCLAL!= 0) indexphaseC=numpy.where( phaseCLAL!= 0) AmpDiffP = max(abs ( (ampP[indexampP]-ampPLAL[indexampP]) / ampPLAL[indexampP] ) ) PhaseDiffP = max(abs ( (phaseP[indexphaseP] - phasePLAL[indexphaseP]) / phasePLAL[indexphaseP] ) ) AmpDiffC = max(abs ( (ampC[indexampP]-ampCLAL[indexampP]) / ampCLAL[indexampP] ) ) PhaseDiffC = max(abs ( (phaseC[indexphaseP] - phaseCLAL[indexphaseP]) / phaseCLAL[indexphaseP] ) ) self.assertTrue(AmpDiffP < 0.00001) self.assertTrue(PhaseDiffP < 0.00001) self.assertTrue(AmpDiffC < 0.00001) self.assertTrue(PhaseDiffC < 0.00001) print("..checked m1: %s m2:: %s s1x: %s s1y: %s s1z: %s Inclination: %s" % (m1, m2, s1x, s1y, s1z, inclination)) def test_errors(self): func = get_fd_waveform self.assertRaises(ValueError,func,approximant="BLAH") self.assertRaises(ValueError,func,approximant="SpinTaylorF2",mass1=3) self.assertRaises(ValueError,func,approximant="SpinTaylorF2",mass1=3,mass2=3) self.assertRaises(ValueError,func,approximant="SpinTaylorF2",mass1=3,mass2=3,phase_order=7) self.assertRaises(ValueError,func,approximant="SpinTaylorF2",mass1=3,mass2=3,phase_order=7) self.assertRaises(ValueError,func,approximant="SpinTaylorF2",mass1=3) func = get_fd_waveform self.assertRaises(ValueError,func,approximant="BLAH") self.assertRaises(ValueError,func,approximant="TaylorF2",mass1=3) self.assertRaises(ValueError,func,approximant="TaylorF2",mass1=3,mass2=3) self.assertRaises(ValueError,func,approximant="TaylorF2",mass1=3,mass2=3,phase_order=7) self.assertRaises(ValueError,func,approximant="TaylorF2",mass1=3,mass2=3,phase_order=7) self.assertRaises(ValueError,func,approximant="TaylorF2",mass1=3) for func in [get_fd_waveform,get_td_waveform]: self.assertRaises(ValueError,func,approximant="BLAH") self.assertRaises(ValueError,func,approximant="IMRPhenomB",mass1=3) self.assertRaises(ValueError,func,approximant="IMRPhenomB",mass1=3,mass2=3) self.assertRaises(ValueError,func,approximant="IMRPhenomB",mass1=3,mass2=3,phase_order=7) self.assertRaises(ValueError,func,approximant="IMRPhenomB",mass1=3,mass2=3,phase_order=7) self.assertRaises(ValueError,func,approximant="IMRPhenomB",mass1=3) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestWaveform)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
7,464
51.570423
236
py
pycbc
pycbc-master/test/test_timeseries.py
# Copyright (C) 2012 Alex Nitz, Andrew Miller, Josh Willis, Tito Dal Canton # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # ''' These are the unittests for the pycbc timeseries type ''' import pycbc import unittest from pycbc.types import * from pycbc.scheme import * import numpy import lal from utils import array_base, parse_args_all_schemes, simple_exit import sys import os import tempfile _scheme, _context = parse_args_all_schemes("TimeSeries") # By importing the current schemes array type, it will make it # easier to check the array types later if _scheme == 'cuda': import pycuda import pycuda.gpuarray from pycuda.gpuarray import GPUArray as SchemeArray elif _scheme == 'cpu': from numpy import ndarray as SchemeArray from numpy import ndarray as CPUArray class TestTimeSeriesBase(array_base,unittest.TestCase): __test__ = False def setUp(self): self.scheme = _scheme self.context = _context # We need to check for correct creation from all dtypes, # and errors from incorrect operations so the other precision of # odtype needs to be available as well self.other_precision = {numpy.complex64 : numpy.complex128, numpy.complex128 : numpy.complex64, numpy.float32 : numpy.float64, numpy.float64 : numpy.float32} # Number of decimal places to compare for single precision if self.dtype == numpy.float32 or self.dtype == numpy.complex64: self.places = 5 self.tol = 1e-5 # Number of decimal places to compare for double precision else: self.places = 13 self.tol = 1e-13 # We will also need to check whether dtype and odtype are real or complex, # so that we can test non-zero imaginary parts. if self.dtype == numpy.float32 or self.dtype == numpy.float64: self.kind = 'real' else: self.kind = 'complex' if self.odtype == numpy.float32 or self.odtype == numpy.float64: self.okind = 'real' else: self.okind = 'complex' # Note that self.epoch is set in the factory class constructor at the end; # we need only set self.delta_t here. self.delta_t = 0.1 # We need to tell the arithmetic test functions what our type is: self.type = TimeSeries # and the extra keyword arguments the constructors will need: self.kwds = {'epoch': self.epoch, 'delta_t': self.delta_t} # Now that the kinds are set, we need to call our parent method to set up all the # inputs and answers for our functions self.setNumbers() # The above call created instances for all of our inputs and various correct # outputs. But we make a copy of the scalar to check later. self.s = self.scalar # Finally, we want to have an array that we shouldn't be able to operate on, # because the precision is wrong, and one where the length is wrong. self.bad = TimeSeries([1,1,1], self.delta_t, epoch=self.epoch, dtype = self.other_precision[self.odtype]) self.bad2 = TimeSeries([1,1,1,1], self.delta_t, epoch=self.epoch, dtype = self.dtype) # These are timeseries that have problems specific to timeseries self.bad3 = TimeSeries([1,1,1], 0.2, epoch=self.epoch, dtype = self.dtype) if self.epoch == 0: self.bad4 = TimeSeries([1,1,1], self.delta_t, epoch = lal.LIGOTimeGPS(1000, 1000), dtype = self.dtype) else: self.bad4 = TimeSeries([1,1,1], self.delta_t, epoch=None, dtype = self.dtype) def test_numpy_init(self): with self.context: in1 = numpy.array([5,3,1],dtype=self.odtype) in2 = numpy.array([5,3,1],dtype=self.other_precision[self.odtype]) #We don't want to cast complex as real if not (self.kind == 'real' and self.okind == 'complex'): #First we must check that the dtype is correct when specified out1 = TimeSeries(in1,0.1, dtype=self.dtype, epoch=self.epoch) out2 = TimeSeries(in2,0.1, dtype=self.dtype, epoch=self.epoch) #to be sure that it is copied in1 += 1 in2 += 1 self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertEqual(out1.delta_t, 0.1) self.assertEqual(out1.start_time, self.epoch) self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) self.assertEqual(out2.delta_t,0.1) self.assertEqual(out2.start_time, self.epoch) in1-=1 in2-=1 # Also, when it is unspecified out3 = TimeSeries(in1,0.1,epoch=self.epoch) in1 += 1 self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==self.odtype) self.assertEqual(out3.delta_t,0.1) self.assertEqual(out3.start_time, self.epoch) # Check for copy=false # On the CPU, this should be possible in3 = numpy.array([5,3,1],dtype=self.dtype) if self.scheme == 'cpu': out4 = TimeSeries(in3,0.1,epoch=self.epoch,copy=False) in3 += 1 self.assertTrue(out4.dtype==self.dtype) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertEqual(out4[0],6) self.assertEqual(out4[1],4) self.assertEqual(out4[2],2) self.assertEqual(out4.delta_t,0.1) self.assertEqual(out4.start_time, self.epoch) # If we're in different scheme, this should raise an error else: self.assertRaises(TypeError, TimeSeries, in3, 0.1, copy=False) # We also need to check initialization using GPU arrays if self.scheme == 'cuda': in4 = pycuda.gpuarray.zeros(3,self.dtype) if self.scheme != 'cpu': out4 = TimeSeries(in4,0.1, copy=False, epoch=self.epoch) in4 += 1 self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],1) self.assertEqual(out4[1],1) self.assertEqual(out4[2],1) self.assertTrue(out4.dtype==self.dtype) self.assertEqual(out4.delta_t,0.1) self.assertEqual(out4.start_time, self.epoch) # We should be able to create an array from the wrong dtype, and # it should be cast as float64 in5 = numpy.array([1,2,3],dtype=numpy.int32) out5 = TimeSeries(in5,0.1,epoch=self.epoch) in5 += 1 self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],1) self.assertEqual(out5[1],2) self.assertEqual(out5[2],3) #self.assertTrue(out5.dtype==numpy.float64) self.assertEqual(out5.delta_t,0.1) self.assertEqual(out5.start_time, self.epoch) # We shouldn't be able to copy it though #self.assertRaises(TypeError,TimeSeries,in5, 0.1, copy=False) # Finally, just checking a few things specific to timeseries inbad = numpy.array([],dtype=float64) self.assertRaises(ValueError, TimeSeries, in1, -1) self.assertRaises(ValueError, TimeSeries, inbad, .1) self.assertRaises(TypeError, TimeSeries, in1, .1, epoch=(5,1)) def test_array_init(self): # this array is made outside the context so we can check that an error is raised when copy = false in a GPU scheme cpuarray = Array([1,2,3]) with self.context: in1 = Array([5,3,1],dtype=self.odtype) in2 = Array([5,3,1],dtype=self.other_precision[self.odtype]) self.assertTrue(type(in1._scheme) == type(self.context)) self.assertTrue(type(in1._data) is SchemeArray) self.assertTrue(type(in2._scheme) == type(self.context)) self.assertTrue(type(in2._data) is SchemeArray) # We don't want to cast complex as real if not (self.kind=='real' and self.okind == 'complex'): # First we must check that the dtype is correct when specified out1 = TimeSeries(in1, 0.1, epoch=self.epoch, dtype=self.dtype) out2 = TimeSeries(in2, 0.1, epoch=self.epoch, dtype=self.dtype) # to be sure that it is copied in1 += 1 in2 += 1 self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertEqual(out1.delta_t, 0.1) self.assertEqual(out1.start_time, self.epoch) if out1.dtype == numpy.float32: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.float64: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.complex64: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'complex') if out1.dtype == numpy.complex128: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'complex') self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) self.assertEqual(out2.delta_t, 0.1) self.assertEqual(out2.start_time, self.epoch) in1-=1 in2-=1 # Giving complex input and specifying a real dtype should raise an error else: self.assertRaises(TypeError, TimeSeries, in1,0.1, dtype = self.dtype) self.assertRaises(TypeError, TimeSeries, in2,0.1, dtype = self.dtype) # Also, when it is unspecified out3 = TimeSeries(in1,0.1,epoch=self.epoch) in1 += 1 self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==self.odtype) self.assertEqual(out3.delta_t, 0.1) self.assertEqual(out3.start_time, self.epoch) # We should also be able to create from a CPU Array out4 = TimeSeries(cpuarray,0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],1) self.assertEqual(out4[1],2) self.assertEqual(out4[2],3) self.assertTrue(out4.dtype==self.dtype) self.assertEqual(out4.delta_t, 0.1) self.assertEqual(out4.start_time, self.epoch) # Check for copy=false in3 = Array([5,3,1],dtype=self.dtype) out5 = TimeSeries(in3,0.1,copy=False,epoch=self.epoch) in3 += 1 self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],6) self.assertEqual(out5[1],4) self.assertEqual(out5[2],2) self.assertTrue(out5.dtype==self.dtype) self.assertEqual(out5.delta_t, 0.1) self.assertEqual(out5.start_time, self.epoch) if self.scheme != 'cpu': self.assertRaises(TypeError,TimeSeries,0.1,cpuarray,copy=False) # Things specific to timeseries inbad = Array(numpy.array([],dtype=float64)) self.assertRaises(ValueError, TimeSeries, in1, -1) self.assertRaises(ValueError, TimeSeries, inbad, .1) self.assertRaises(TypeError, TimeSeries, in1, .1, epoch=(5,1)) # Also checking that a cpu array can't be made out of another scheme without copying if self.scheme != 'cpu': self.assertRaises(TypeError, TimeSeries, out4, 0.1, copy=False, epoch=self.epoch) out6 = TimeSeries(out4, 0.1, dtype=self.dtype) self.assertTrue(type(out6._scheme) == CPUScheme) self.assertTrue(type(out6._data) is CPUArray) self.assertEqual(out6[0],1) self.assertEqual(out6[1],2) self.assertEqual(out6[2],3) self.assertTrue(out6.dtype==self.dtype) self.assertEqual(out6.delta_t, 0.1) self.assertEqual(out6.start_time, self.epoch) def test_list_init(self): with self.context: # When specified out1 = TimeSeries([5,3,1],0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertEqual(out1.delta_t, 0.1) self.assertEqual(out1.start_time, self.epoch) if out1.dtype == numpy.float32: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.float64: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.complex64: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'complex') if out1.dtype == numpy.complex128: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'complex') if self.kind == 'complex': out2 = TimeSeries([5.0+0j,3+0j,1+0j], 0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) self.assertEqual(out2.delta_t, 0.1) self.assertEqual(out2.start_time, self.epoch) else: self.assertRaises(TypeError, TimeSeries,[5+0j, 3+0j, 1+0j], 0.1, dtype=self.dtype) #Also, when it is unspecified out3 = TimeSeries([5.0,3,1],0.1,epoch=self.epoch) self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==numpy.float64) self.assertEqual(out3.delta_t, 0.1) self.assertEqual(out3.start_time, self.epoch) out4 = TimeSeries([5+0j,3+0j,1+0j],0.1,epoch = self.epoch) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],5) self.assertEqual(out4[1],3) self.assertEqual(out4[2],1) self.assertTrue(out4.dtype==numpy.complex128) self.assertEqual(out4.delta_t, 0.1) self.assertEqual(out4.start_time, self.epoch) self.assertRaises(TypeError,TimeSeries,[1,2,3],copy=False) # Things specific to timeseries self.assertRaises(ValueError, TimeSeries, [1,2,3], -1) self.assertRaises(ValueError, TimeSeries, [], .1) self.assertRaises(TypeError, TimeSeries, [1,2,3], .1, epoch=(5,1)) def test_mul(self): super(TestTimeSeriesBase,self).test_mul() self.assertRaises(ValueError, self.a.__mul__,self.bad3) self.assertRaises(ValueError, self.a.__mul__,self.bad4) def test_rmul(self): super(TestTimeSeriesBase,self).test_rmul() self.assertRaises(ValueError, self.a.__rmul__,self.bad3) self.assertRaises(ValueError, self.a.__rmul__,self.bad4) def test_imul(self): super(TestTimeSeriesBase,self).test_imul() self.assertRaises(ValueError, self.a.__imul__,self.bad3) self.assertRaises(ValueError, self.a.__imul__,self.bad4) def test_add(self): super(TestTimeSeriesBase,self).test_add() self.assertRaises(ValueError, self.a.__add__, self.bad3) self.assertRaises(ValueError, self.a.__add__, self.bad4) def test_radd(self): super(TestTimeSeriesBase,self).test_radd() self.assertRaises(ValueError, self.a.__radd__,self.bad3) self.assertRaises(ValueError, self.a.__radd__,self.bad4) def test_iadd(self): super(TestTimeSeriesBase,self).test_iadd() self.assertRaises(ValueError, self.a.__iadd__,self.bad3) self.assertRaises(ValueError, self.a.__iadd__,self.bad4) def test_sub(self): super(TestTimeSeriesBase,self).test_sub() self.assertRaises(ValueError, self.a.__sub__,self.bad3) self.assertRaises(ValueError, self.a.__sub__,self.bad4) def test_rsub(self): super(TestTimeSeriesBase,self).test_rsub() self.assertRaises(ValueError, self.a.__rsub__,self.bad3) self.assertRaises(ValueError, self.a.__rsub__,self.bad4) def test_isub(self): super(TestTimeSeriesBase,self).test_isub() self.assertRaises(ValueError, self.a.__isub__,self.bad3) self.assertRaises(ValueError, self.a.__isub__,self.bad4) def test_div(self): super(TestTimeSeriesBase,self).test_div() self.assertRaises(ValueError, self.a.__div__,self.bad3) self.assertRaises(ValueError, self.a.__div__,self.bad4) def test_rdiv(self): super(TestTimeSeriesBase,self).test_rdiv() self.assertRaises(ValueError, self.a.__rdiv__,self.bad3) self.assertRaises(ValueError, self.a.__rdiv__,self.bad4) def test_idiv(self): super(TestTimeSeriesBase,self).test_idiv() self.assertRaises(ValueError, self.a.__idiv__,self.bad3) self.assertRaises(ValueError, self.a.__idiv__,self.bad4) def test_dot(self): super(TestTimeSeriesBase,self).test_dot() self.assertRaises(ValueError, self.a.dot,self.bad3) self.assertRaises(ValueError, self.a.dot,self.bad4) def test_duration(self): with self.context: # Moving these to the current scheme self.a*=1 self.b*=1 self.bad3*=1 self.assertAlmostEqual(self.a.duration, 0.3) self.assertAlmostEqual(self.b.duration, 0.3) self.assertAlmostEqual(self.bad3.duration, 0.6) def test_at_time(self): a = TimeSeries([0, 1, 2, 3, 4, 5, 6, 7], delta_t=1.0) self.assertAlmostEqual(a.at_time(0.5), 0.0) self.assertAlmostEqual(a.at_time(0.6, nearest_sample=True), 1.0) self.assertAlmostEqual(a.at_time(0.5, interpolate='linear'), 0.5) self.assertAlmostEqual(a.at_time([2.5], interpolate='quadratic'), 2.5) i = numpy.array([-0.2, 0.5, 1.5, 7.0]) x = a.at_time(i, extrapolate=0) n = numpy.array([0, 0.0, 1.0, 7.0]) self.assertAlmostEqual((x-n).sum(), 0) x = a.at_time(i, extrapolate=0, nearest_sample=True) n = numpy.array([0, 1.0, 2.0, 7.0]) self.assertAlmostEqual((x-n).sum(), 0) x = a.at_time(i, extrapolate=0, interpolate='linear') n = numpy.array([0, 0.5, 1.5, 0.0]) self.assertAlmostEqual((x-n).sum(), 0) x = a.at_time(i, extrapolate=0, interpolate='quadratic') n = numpy.array([0, 0.0, 1.5, 0.0]) self.assertAlmostEqual((x-n).sum(), 0) def test_inject(self): a = TimeSeries(numpy.zeros(2**20, dtype=numpy.float32), delta_t=1.0) a[2**19] = 1 # Check that the obvious case reduces to an add operation r = a.inject(a) self.assertAlmostEqual(r.max(), 2.0, places=7) # Check adding an offset vector b = a.cyclic_time_shift(-0.21) r = a.inject(b) self.assertAlmostEqual(r.max(), 2.0, places=5) # check adding shoter offset vector c = a.time_slice(2**19-5000, 2**19+5000).cyclic_time_shift(32.12) r = a.inject(c) self.assertAlmostEqual(r.max(), 2.0, places=4) def test_sample_times(self): with self.context: # Moving these to the current scheme self.a*=1 self.b*=1 self.bad3*=1 self.assertEqual(len(self.a.sample_times), 3) self.assertAlmostEqual(self.a.sample_times[-1] - self.a.sample_times[0], 0.2) self.assertEqual(len(self.b.sample_times), 3) self.assertAlmostEqual(self.b.sample_times[-1] - self.b.sample_times[0], 0.2) self.assertEqual(len(self.bad3.sample_times), 3) self.assertAlmostEqual(self.bad3.sample_times[-1] - self.bad3.sample_times[0], 0.4) def test_save(self): with self.context: # make temporary file paths temp_file = tempfile.NamedTemporaryFile() temp_path_npy = temp_file.name + '.npy' temp_path_txt = temp_file.name + '.txt' # make a test time series a_numpy = numpy.arange(100, dtype=self.dtype) a = TimeSeries(a_numpy, delta_t=0.1) # test saving to Numpy array a.save(temp_path_npy) b = numpy.load(temp_path_npy) self.assertEqual(b.shape, (a_numpy.shape[0], 2)) self.assertEqual(numpy.abs(b[:,0] - a.sample_times.numpy()).max(), 0) self.assertEqual(numpy.abs(b[:,1] - a_numpy).max(), 0) os.remove(temp_path_npy) # test saving to text file a.save(temp_path_txt) b = numpy.loadtxt(temp_path_txt) if a.kind == 'complex': self.assertEqual(b.shape, (a_numpy.shape[0], 3)) b = numpy.vstack((b[:,0], b[:,1] + 1j * b[:,2])).T elif a.kind == 'real': self.assertEqual(b.shape, (a_numpy.shape[0], 2)) self.assertEqual(numpy.abs(b[:,0] - a.sample_times.numpy()).max(), 0) self.assertEqual(numpy.abs(b[:,1] - a_numpy).max(), 0) os.remove(temp_path_txt) def ts_test_maker(dtype, odtype, epoch): class TestTimeSeries(TestTimeSeriesBase): __test__ = True def __init__(self, *args): self.dtype = dtype self.odtype = odtype self.epoch = epoch if epoch is not None else lal.LIGOTimeGPS(0, 0) unittest.TestCase.__init__(self, *args) TestTimeSeries.__name__ = _scheme + " " + dtype.__name__ + " with " + odtype.__name__ return TestTimeSeries types = [ (float32,[float32,complex64]), (float64,[float64,complex128]), (complex64,[complex64,float32]), (complex128,[float64,complex128]) ] suite = unittest.TestSuite() # Unlike the regular array tests, we will need to test with an epoch, and with none epochs = [lal.LIGOTimeGPS(1000, 1000), None] i = 0 for t,otypes in types: for ot in otypes: for epoch in epochs: na = 'test' + str(i) vars()[na] = ts_test_maker(t, ot, epoch) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(vars()[na])) i += 1 if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
26,161
42.749164
122
py
pycbc
pycbc-master/test/test_waveform_utils.py
import unittest import numpy from utils import simple_exit from pycbc.waveform.utils import apply_fd_time_shift from pycbc.types import (TimeSeries) class TestFDTimeShift(unittest.TestCase): """Tests ``apply_fd_time_shift``.""" def setUp(self): # we'll use a sine wave time series to do the testing, with the # the segment length such that an interger number of cycles fit, so # we don't have to worry about boundary effects self.freq = 128 self.sample_rate = 4096 self.seglen = 1 ncycles = self.freq * self.seglen t = numpy.linspace(0, ncycles*2*numpy.pi, num=self.sample_rate*self.seglen, endpoint=False) self.time_series = TimeSeries(t, delta_t=1./self.sample_rate, epoch=0) self.tdsinx = numpy.sin(self.time_series) self.fdsinx = self.tdsinx.to_frequencyseries() def _shift_and_ifft(self, fdsinx, tshift, fseries=None): """Calls apply_fd_time_shift, and iFFTs to the time domain. """ start_time = self.time_series.start_time tdshift = apply_fd_time_shift(fdsinx, start_time+tshift, fseries=fseries) return tdshift.to_timeseries() def _test_apply_fd_time_shift(self, fdsinx, fseries=None, atol=1e-8): """Tests ``apply_fd_time_shift`` with the given fdseries. If ``fdsinx`` is a FrequencySeries, this will test the shift code written in C. Otherwise, this will test the numpy version. Parameters ---------- fdsinx : FrequencySeries The frequency series to shift and test. fseires : array, optional Array of the sample frequencies of ``fdsinx``. This is only needed for the numpy version. atol : float, optional The absolute tolerance for the comparison test. See ``numpy.isclose`` for details. """ # shift by -pi/2: should be the same as the cosine tshift = 1./(4*self.freq) tdshift = self._shift_and_ifft(fdsinx, -tshift, fseries=fseries) # check comp = numpy.cos(self.time_series) if tdshift.precision == 'single': # cast to single comp = comp.astype(numpy.float32) self.assertTrue(numpy.isclose(tdshift, comp, atol=atol).all()) # shift by +pi/2: should be the same as the -cosine tdshift = self._shift_and_ifft(fdsinx, tshift, fseries=fseries) self.assertTrue(numpy.isclose(tdshift, -1*comp, atol=atol).all()) # shift by a non-integer fraction of the period; we'll do this by # shifting by a prime number times dt / 3 # forward: tshift = 193 * self.time_series.delta_t / 3. tdshift = self._shift_and_ifft(fdsinx, tshift, fseries=fseries) comp = numpy.sin(self.time_series - 2*numpy.pi*self.freq*tshift) if tdshift.precision == 'single': # cast to single comp = comp.astype(numpy.float32) self.assertTrue(numpy.isclose(tdshift, comp, atol=atol).all()) # backward: tdshift = self._shift_and_ifft(fdsinx, -tshift, fseries=fseries) comp = numpy.sin(self.time_series + 2*numpy.pi*self.freq*tshift) if tdshift.precision == 'single': # cast to single comp = comp.astype(numpy.float32) self.assertTrue(numpy.isclose(tdshift, comp, atol=atol).all()) def test_fd_time_shift(self): """Applies shifts to fdsinx using cython code, and compares the result to applying the shift directly in the time domain. """ self._test_apply_fd_time_shift(self.fdsinx) def test_fd_time_shift32(self): """Tests the cython code using single precision. """ # we need to increase the tolerance on isclose self._test_apply_fd_time_shift(self.fdsinx.astype(numpy.complex64), atol=1e-4) def test_fseries_time_shift(self): """Applies shifts to fdsinx using numpy code, and compares the result to applying the shift directly in the time domain. """ fdsinx = self.fdsinx.copy() fseries = self.fdsinx.sample_frequencies.numpy() self._test_apply_fd_time_shift(fdsinx, fseries) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestFDTimeShift)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
4,561
40.853211
78
py
pycbc
pycbc-master/test/test_fftw_openmp.py
# Copyright (C) 2012 Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unit-tests for the pycbc.fft subpackage, testing only unthreaded backends for the various schemes. """ import pycbc.fft from pycbc.scheme import CPUScheme import unittest from sys import exit as _exit from utils import parse_args_cpu_only, simple_exit from fft_base import _BaseTestFFTClass parse_args_cpu_only("FFTW openmp backend") # See if we can get set the FFTW backend to 'openmp'; if not, say so and exit. if 'fftw' in pycbc.fft.get_backend_names(): import pycbc.fft.fftw try: pycbc.fft.fftw.set_threads_backend('openmp') except: print("Unable to import openmp threads backend to FFTW; skipping openmp thread tests") _exit(0) else: print("FFTW does not seem to be an available CPU backend; skipping openmp thread tests") _exit(0) # Now set the number of threads to something nontrivial # Most of the work is now done in fft_base. FFTTestClasses = [] for num_threads in [2,4,6,8]: kdict = {'backends' : ['fftw'], 'scheme' : 'cpu', 'context' : CPUScheme(num_threads=num_threads)} klass = type('FFTW_OpenMP_test', (_BaseTestFFTClass,),kdict) klass.__test__ = True FFTTestClasses.append(klass) # Finally, we create suites and run them if __name__ == '__main__': suite = unittest.TestSuite() for klass in FFTTestClasses: suite.addTest(unittest.TestLoader().loadTestsFromTestCase(klass)) results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,496
31.855263
94
py
pycbc
pycbc-master/test/test_noise.py
# Copyright (C) 2019 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for noise generation """ import unittest import pycbc.psd from utils import simple_exit from pycbc.noise.reproduceable import noise_from_string from pycbc.fft.fftw import set_measure_level from pycbc.noise.reproduceable import normal set_measure_level(0) class TestNoise(unittest.TestCase): def setUp(self,*args): self.ts = noise_from_string('aLIGOZeroDetHighPower', 100, 200, sample_rate=1024, seed=0, low_frequency_cutoff=1.0, filter_duration=64) def test_consistent_result(self): # This just checks that the result hasn't changed. If it has # you should find out why summ = self.ts.sum() comp = 4.265258573533567e-18 diff = abs(summ - comp) self.assertTrue(diff < 1e-30) def test_noise_psd(self): p = self.ts.psd(4) p2 = pycbc.psd.from_string('aLIGOZeroDetHighPower', len(p), p.delta_f, 1.0) kmin = int(1.0 / p.delta_f) kmax = int(500 / p.delta_f) ratio = p[kmin:kmax] / p2[kmin:kmax] ave = ratio.numpy().mean() self.assertAlmostEqual(ave, 1, 1) def test_noise_reproducible(self): ts1 = normal(20, 30, sample_rate=16384, seed=87693) ts2 = normal(25, 35, sample_rate=16384, seed=87693) self.assertEqual(ts1.time_slice(25, 30), ts2.time_slice(25, 30)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestNoise)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,721
35.783784
79
py
pycbc
pycbc-master/test/test_fft_mkl_threaded.py
# Copyright (C) 2012 Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unit-tests for the pycbc.fft subpackage, testing only unthreaded backends for the various schemes. """ import platform import unittest import pycbc.fft from pycbc.scheme import CPUScheme from sys import exit as _exit from utils import parse_args_cpu_only, simple_exit from fft_base import _BaseTestFFTClass parse_args_cpu_only("MKL threaded backend") # See if we can get set the FFTW backend to 'openmp'; if not, say so and exit. if 'arm64' in platform.machine(): print("MKL not supported on arm64, skipping") pass elif not 'mkl' in pycbc.fft.get_backend_names(): print("MKL does not seem to be available; why isn't it installed?") _exit(0) else: # Now set the number of threads to something nontrivial # Most of the work is now done in fft_base. FFTTestClasses = [] for num_threads in [2,4,6,8]: kdict = {'backends' : ['mkl'], 'scheme' : 'cpu', 'context' : CPUScheme(num_threads=num_threads)} klass = type('FFTW_OpenMP_test', (_BaseTestFFTClass,),kdict) klass.__test__ = True FFTTestClasses.append(klass) # Finally, we create suites and run them if __name__ == '__main__': suite = unittest.TestSuite() for klass in FFTTestClasses: suite.addTest(unittest.TestLoader().loadTestsFromTestCase(klass)) results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,447
33
79
py
pycbc
pycbc-master/test/test_calibration.py
import unittest from pycbc import strain from pycbc.types import FrequencySeries import numpy as np from pycbc.workflow.configuration import WorkflowConfigParser from pycbc.strain.recalibrate import Recalibrate from utils import simple_exit def strain_array(): frequency_array = np.linspace(0, 2048, 100) delta_f = frequency_array[1] - frequency_array[0] strain_array = frequency_array**2 return FrequencySeries(strain_array, delta_f) class CalibrateTestBase(unittest.TestCase): def test_instantiate(self): self.assertRaises(TypeError, Recalibrate, 'test') def test_instantiation(self): params = dict(ifo_name='test', minimum_frequency=10, maximum_frequency=1024, n_points=5) model = strain.models['cubic_spline'](**params) self.assertTrue(model.name == 'cubic_spline') def test_instantiation_from_config(self): parameters = dict(minimum_frequency='10', maximum_frequency='1024', n_points='5') cp = WorkflowConfigParser() cp.add_section('test') ifo_name = 'ifo' cp.set('test', '{}_model'.format(ifo_name), 'cubic_spline') for key in parameters: cp.set('test', '{}_{}'.format(ifo_name, key), parameters[key]) from_config = strain.read_model_from_config(cp, ifo_name, 'test') self.assertTrue(all([from_config.name == 'cubic_spline', from_config.ifo_name == ifo_name])) def test_too_few_spline_points_fails(self): self.assertRaises(ValueError, strain.CubicSpline, ifo_name='test', minimum_frequency=10, maximum_frequency=1024, n_points=3) def test_update_parameters(self): init_params = dict(ifo_name='test', minimum_frequency=10, maximum_frequency=1024, n_points=5) dict_params = dict(recalib_amplitude_test_0=0.1, recalib_amplitude_test_1=0.1, recalib_amplitude_test_2=0.1, recalib_amplitude_test_3=0.1, recalib_amplitude_test_4=0.1, recalib_phase_test_0=0.1, recalib_phase_test_1=0.1, recalib_phase_test_2=0.1, recalib_phase_test_3=0.1, recalib_phase_test_4=0.1, ) model = strain.models['cubic_spline'](**init_params) dict_params.update(dict(foo='bar')) prefix = 'recalib_' model.map_to_adjust(strain_array(), prefix=prefix, **dict_params) model_keys = [key[len(prefix):] for key in dict_params if prefix in key] self.assertTrue(all([model.params[key] == dict_params[prefix+key] for key in model_keys])) if __name__ == '__main__': suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(CalibrateTestBase)) results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,989
38.342105
81
py
pycbc
pycbc-master/test/utils.py
# Copyright (C) 2012--2013 Alex Nitz, Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ This module contains a few helper functions designed to make writing PyCBC unit tests easier, while still allowing the tests to be run on CPU and CUDA All tests starting with 'test_' in the test subdirectory of pycbc are run whenever the command 'python setup.py test' is given. That command will attempt to call each test, passing it the argument '-s <scheme>' where scheme is each of 'cpu', 'cuda' in turn. Unit tests designed to validate code that should run under multiple schemes should accept each of these options, rerunning the same tests under each successive scheme. This will usually be done by putting something like: _scheme, _context = parse_args_all_schemes('MyFeature') before the definition of the unit test class. In the definition of 'setUp' (which is a mandatory function that must be defined in the class, per the Python unittest module design) one would then usually have: self.scheme = _scheme self.context = _context and those properties of any instance can then be used by later tests defined in the class; for example, beginning a block with 'with self.context:' to ensure the appropriate context manager is used for that scheme. Some other unit tests may be for features or sub-packages that are not GPU capable, and cannot be meaningfully tested on the GPU. Those tests, after importing pycbc.test.utils, should instead just call: parse_args_cpu_only('MyFeature') This call is needed because the tests must still be able to accept the arguments specifying a GPU environment (since setup.py does not know which tests are GPU capable and which are not) but when called with a GPU scheme will exit immediately. Both functions take a single string as an argument. That string is used to customize the heading of all of the tests (according to feature and scheme) to make the output of running all of the unit tests somewhat easier to parse when they are all run at once. """ import pycbc import optparse from sys import exit as _exit from optparse import OptionParser from pycbc.scheme import CPUScheme, CUDAScheme from numpy import float32, float64, complex64, complex128 from pycbc.types import Array def _check_scheme_all(option, opt_str, scheme, parser): if scheme=='cuda' and not pycbc.HAVE_CUDA: raise optparse.OptionValueError("CUDA not found") setattr (parser.values, option.dest, scheme) def parse_args_all_schemes(feature_str): _parser = OptionParser() _parser.add_option('--scheme','-s', action='callback', type = 'choice', choices = ('cpu','cuda'), default = 'cpu', dest = 'scheme', callback = _check_scheme_all, help = 'specifies processing scheme, can be cpu [default], cuda') _parser.add_option('--device-num','-d', action='store', type = 'int', dest = 'devicenum', default=0, help = 'specifies a GPU device to use for CUDA, 0 by default') (_opt_list, _args) = _parser.parse_args() # Changing the optvalues to a dict makes them easier to read _options = vars(_opt_list) _scheme = _options['scheme'] if _scheme == 'cpu': _context = CPUScheme() if _scheme == 'cuda': _context = CUDAScheme(device_num=_options['devicenum']) _scheme_dict = { 'cpu': 'CPU', 'cuda': 'CUDA'} print(72*'=') print("Running {0} unit tests for {1}:".format(_scheme_dict[_scheme],feature_str)) return [_scheme,_context] def _check_scheme_cpu(option, opt_str, scheme, parser): if scheme=='cuda': exit(0) setattr (parser.values, option.dest, scheme) def parse_args_cpu_only(feature_str): _parser = OptionParser() _parser.add_option('--scheme','-s', action='callback', type = 'choice', choices = ('cpu','cuda'), default = 'cpu', dest = 'scheme', callback = _check_scheme_cpu, help = 'specifies processing scheme, can be cpu [default], cuda') _parser.add_option('--device-num','-d', action='store', type = 'int', dest = 'devicenum', default=0, help = 'specifies a GPU device to use for CUDA, 0 by default') (_opt_list, _args) = _parser.parse_args() # In this case, the only reason we parsed the arguments was to exit if we were given # a GPU scheme. So if we get here we're on the CPU, and should print out our message # and return. print(72*'=') print("Running {0} unit tests for {1}:".format('CPU', feature_str)) return def simple_exit(results): """ A simpler version of exit_based_on_results(); this function causes the script to exit normally with return value of zero if and only if all tests within the script passed and had no errors. Otherwise it returns the number of failures plus the number of errors Parameters ---------- results: an instance of unittest.TestResult, returned (for instance) from a call such as results = unittest.TextTestRunner(verbosity=2).run(suite) """ if results.wasSuccessful(): _exit(0) else: nfail = len(results.errors)+len(results.failures) _exit(nfail) def exit_based_on_results(results): """ A probably-obsolete function to exit from a unit test-script with a status that depends on whether or not the only errors or failures were NotImplemented errors. Specifically, if the unit-test suite execution encoded in results was: All tests successful: Exit 0 All tests successful or only NotImplemented errors: Exit 1 Some tests either failed or had other errors: Exit 2 The intent was that failures due to missing features be treated differently (especially when that happens on one of the GPU schemes) and that these exit statuses could then be interpreted by NMI or some other automatic build/test system accordingly. Parameters ---------- results: an instance of unittest.TestResult, returned (for instance) from a call such as results = unittest.TextTestRunner(verbosity=2).run(suite) """ NotImpErrors = 0 for error in results.errors: for errormsg in error: if type(errormsg) is str: if 'NotImplemented' in errormsg: NotImpErrors +=1 break if results.wasSuccessful(): _exit(0) elif len(results.failures)==0 and len(results.errors)==NotImpErrors: _exit(1) else: _exit(2) # Copied over from base_array.py so we can refactor it to remove # the scheme shuffling and take advantage of the new equality/almost # equal methods of the types. # The following dictionary converts dtypes into the corresponding real # dtype; it is needed for functions that return arrays of the same # precision but which are always real. _real_dtype_dict = { float32: float32, complex64: float32, float64: float64, complex128: float64 } class array_base(object): def setNumbers(self): # We create instances of our types, and need to know the generic answer # type so that we can convert the many basic lists into the appropriate # precision and kind. This logic essentially implements (for our limited # use cases) what is in the function numpy.result_type, but that function # doesn't become available until Numpy 1.6.0 if self.kind == 'real': if self.okind == 'real': self.result_dtype = self.dtype else: self.result_dtype = self.odtype else: self.result_dtype = self.dtype self.rdtype = _real_dtype_dict[self.dtype] # The child class (testing one of Array, TimeSeries, or FrequencySeries) # should set the following in its setUp method before this method (setNumbers) # is called: # self.type = one of [Array,TimeSeries,FrequencySeries] # self.kwds = dict for other kwd args beyond 'dtype'; normally an # epoch and one of delta_t or delta_f # These are the values that should be used to initialize the test arrays. if self.kind == 'real': self.a = self.type([5,3,1],dtype=self.dtype,**self.kwds) self.alist = [5,3,1] else: self.a = self.type([5+1j,3+3j,1+5j],dtype=self.dtype,**self.kwds) self.alist = [5+1j,3+3j,1+5j] if self.okind == 'real': self.b = self.type([10,8,6],dtype=self.odtype,**self.kwds) self.blist = [10,8,6] else: self.b = self.type([10+6j,8+4j,6+2j],dtype=self.odtype,**self.kwds) self.blist = [10+6j,8+4j,6+2j] # And the scalar to test on if self.okind == 'real': self.scalar = 5 else: self.scalar = 5+2j # The weights used in the weighted inner product test are always an Array, # regardless of the types whose inner product is being tested. self.w = Array([1, 2, 1],dtype=self.dtype) # All the answers are stored here to make it easier to read in the actual tests. # Again, it makes a difference whether they are complex or real valued, so there # are four sets of possible answers, depending on the dtypes. if self.kind == 'real' and self.okind == 'real': self.cumsum=self.type([5,8,9],dtype=self.dtype,**self.kwds) self.mul = self.type([50, 24, 6],dtype=self.result_dtype,**self.kwds) self.mul_s = self.type([25, 15, 5],dtype=self.result_dtype,**self.kwds) self.add = self.type([15, 11, 7],dtype=self.result_dtype,**self.kwds) self.add_s = self.type([10, 8, 6],dtype=self.result_dtype,**self.kwds) #self.div = [.5, 3./8., 1./6.] self.div = self.type([.5, 0.375, .16666666666666666667],dtype=self.result_dtype,**self.kwds) #self.div_s = [1., 3./5., 1./5.] self.div_s = self.type([1., 0.6, 0.2],dtype=self.result_dtype,**self.kwds) #self.rdiv = [2., 8./3., 6.] self.rdiv = self.type([2., 2.66666666666666666667, 6.],dtype=self.result_dtype,**self.kwds) #self.rdiv_s = [1., 5./3., 5.] self.rdiv_s = self.type([1., 1.66666666666666666667, 5.],dtype=self.result_dtype,**self.kwds) self.sub = self.type([-5, -5, -5],dtype=self.result_dtype,**self.kwds) self.sub_s = self.type([0, -2, -4],dtype=self.result_dtype,**self.kwds) self.rsub = self.type([5, 5, 5],dtype=self.result_dtype,**self.kwds) self.rsub_s = self.type([0, 2, 4],dtype=self.result_dtype,**self.kwds) self.pow1 = self.type([25., 9., 1.],dtype=self.dtype,**self.kwds) #self.pow2 = [pow(5,-1.5), pow(3,-1.5), pow(1,-1.5)] self.pow2 = self.type([0.08944271909999158786, 0.19245008972987525484, 1.],dtype=self.dtype,**self.kwds) self.abs = self.type([5, 3, 1],dtype=self.rdtype,**self.kwds) self.real = self.type([5,3,1],dtype=self.rdtype,**self.kwds) self.imag = self.type([0, 0, 0],dtype=self.rdtype,**self.kwds) self.conj = self.type([5, 3, 1],dtype=self.dtype,**self.kwds) self.sum = 9 self.dot = 80 self.inner = self.dot self.weighted_inner = 68 if self.kind =='real' and self.okind == 'complex': self.cumsum= self.type([5,8,9],dtype=self.dtype,**self.kwds) self.mul = self.type([50+30j, 24+12j, 6+2j],dtype=self.result_dtype,**self.kwds) self.mul_s = self.type([25+10j, 15+6j, 5+2j],dtype=self.result_dtype,**self.kwds) self.add = self.type([15+6j, 11+4j, 7+2j],dtype=self.result_dtype,**self.kwds) self.add_s = self.type([10+2j, 8+2j, 6+2j],dtype=self.result_dtype,**self.kwds) #self.div = [25./68.-15.j/68., 3./10.-3.j/20., 3./20.-1.j/20.] self.div = self.type([0.36764705882352941176-0.22058823529411764706j, 0.3-0.15j, 0.15-0.05j],dtype=self.result_dtype,**self.kwds) #self.div_s = [25./29.-10.j/29., 15./29.-6.j/29., 5./29.-2.j/29.] self.div_s = self.type([0.86206896551724137931-0.34482758620689655172j, 0.51724137931034482759-0.20689655172413793103j, 0.17241379310344827586-0.06896551724137931034j], dtype=self.result_dtype,**self.kwds) #self.rdiv = [2.+6.j/5., 8./3.+4.j/3, 6.+2.j] self.rdiv = self.type([2.+1.2j, 2.66666666666666666667+1.33333333333333333333j, 6.+2.j],dtype=self.result_dtype,**self.kwds) #self.rdiv_s = [1.+2.j/5., 5./3.+2.j/3., 5.+2.j] self.rdiv_s = self.type([1.+0.4j, 1.66666666666666666667+0.666666666666666666667j, 5.+2.j],dtype=self.result_dtype,**self.kwds) self.sub = self.type([-5-6j, -5-4j, -5-2j],dtype=self.result_dtype,**self.kwds) self.sub_s = self.type([0-2j, -2-2j, -4-2j],dtype=self.result_dtype,**self.kwds) self.rsub = self.type([5+6j, 5+4j, 5+2j],dtype=self.result_dtype,**self.kwds) self.rsub_s = self.type([0+2j, 2+2j, 4+2j],dtype=self.result_dtype,**self.kwds) self.pow1 = self.type([25., 9., 1.],dtype=self.dtype,**self.kwds) #self.pow2 = [pow(5,-1.5), pow(3,-1.5), pow(1,-1.5)] self.pow2 = self.type([0.08944271909999158786, 0.19245008972987525484, 1.],dtype=self.dtype,**self.kwds) self.abs = self.type([5, 3, 1],dtype=self.rdtype,**self.kwds) self.real = self.type([5,3,1],dtype=self.rdtype,**self.kwds) self.imag = self.type([0, 0, 0],dtype=self.rdtype,**self.kwds) self.conj = self.type([5, 3, 1],dtype=self.dtype,**self.kwds) self.sum = 9 self.dot = 80+44j self.inner = self.dot self.weighted_inner = 68 + 38j if self.kind == 'complex' and self.okind == 'real': self.cumsum = self.type([5+1j,8+4j,9+9j],dtype=self.dtype,**self.kwds) self.mul = self.type([50+10j, 24+24j, 6+30j],dtype=self.result_dtype,**self.kwds) self.mul_s = self.type([25+5j, 15+15j, 5+25j],dtype=self.result_dtype,**self.kwds) self.add = self.type([15+1j, 11+3j, 7+5j],dtype=self.result_dtype,**self.kwds) self.add_s = self.type([10+1j, 8+3j, 6+5j],dtype=self.result_dtype,**self.kwds) #self.div = [1./2.+1.j/10., 3./8.+3.j/8., 1./6.+5.j/6.] self.div = self.type([0.5+0.1j, 0.375+0.375j,0.16666666666666666667+0.83333333333333333333j], dtype=self.result_dtype,**self.kwds) #self.div_s = [1.+1.j/5., 3./5.+3.j/5., 1./5.+1.j] self.div_s = self.type([1.+0.2j, 0.6+0.6j, 0.2+1.j],dtype=self.result_dtype,**self.kwds) #self.rdiv = [25./13.-5.j/13., 4./3.-4.j/3., 3./13.-15.j/13.] self.rdiv = self.type([1.92307692307692307692-0.38461538461538461538j, 1.33333333333333333333-1.33333333333333333333j, 0.23076923076923076923-1.15384615384615384615j], dtype=self.result_dtype,**self.kwds) #self.rdiv_s = [25./26.-5.j/26., 5./6.-5.j/6., 5./26.-25.j/26.] self.rdiv_s = self.type([0.96153846153846153846-0.19230769230769230769j, 0.83333333333333333333-0.83333333333333333333j, 0.19230769230769230769-0.96153846153846153846j], dtype=self.result_dtype,**self.kwds) self.sub = self.type([-5+1j, -5+3j, -5+5j],dtype=self.result_dtype,**self.kwds) self.sub_s = self.type([0+1j, -2+3j, -4+5j],dtype=self.result_dtype,**self.kwds) self.rsub = self.type([5-1j, 5-3j, 5-5j],dtype=self.result_dtype,**self.kwds) self.rsub_s = self.type([0-1j, 2-3j, 4-5j],dtype=self.result_dtype,**self.kwds) self.pow1 = self.type([24.+10.j, 0.+18.j, -24.+10.j],dtype=self.dtype,**self.kwds) #self.pow2 = [pow(5+1j,-1.5), pow(3+3j,-1.5), pow(1+5j,-1.5)] self.pow2 = self.type([0.08307064054041229214-0.0253416052125975132j, 0.04379104225017853491-0.1057209281108342370j, -0.04082059235165559671-0.0766590341356157206j], dtype=self.dtype,**self.kwds) #self.abs = [pow(26,.5), 3*pow(2,.5), pow(26,.5)] self.abs = self.type([5.09901951359278483003, 4.24264068711928514641, 5.09901951359278483003],dtype=self.rdtype,**self.kwds) self.real = self.type([5,3,1],dtype=self.rdtype,**self.kwds) self.imag = self.type([1, 3, 5],dtype=self.rdtype,**self.kwds) self.conj = self.type([5-1j, 3-3j, 1-5j],dtype=self.dtype,**self.kwds) self.sum = 9+9j self.dot = 80+64j self.inner = 80-64j self.weighted_inner = 68- 52j if self.kind =='complex' and self.okind =='complex': self.cumsum = self.type([5+1j,8+4j,9+9j],dtype=self.dtype,**self.kwds) self.mul = self.type([44+40j, 12+36j, -4+32j],dtype=self.result_dtype,**self.kwds) self.mul_s = self.type([23+15j, 9+21j, -5+27j],dtype=self.result_dtype,**self.kwds) self.add = self.type([15+7j, 11+7j, 7+7j],dtype=self.result_dtype,**self.kwds) self.add_s = self.type([10+3j, 8+5j, 6+7j],dtype=self.result_dtype,**self.kwds) #self.div = [7./17.-5.j/34., 9./20.+3.j/20., 2./5.+7.j/10.] self.div = self.type([0.41176470588235294118-0.14705882352941176471j, 0.45+0.15j, 0.4+0.7j],dtype=self.result_dtype,**self.kwds) #self.div_s = [27./29.-5.j/29., 21./29.+9.j/29., 15./29.+23.j/29.] self.div_s = self.type([0.93103448275862068966-0.17241379310344827586j, 0.72413793103448275862+0.31034482758620689655j, 0.51724137931034482759+0.79310344827586206897j], dtype=self.result_dtype,**self.kwds) #self.rdiv = [28./13.+10.j/13., 2.-2.j/3., 8./13.-14.j/13.] self.rdiv = self.type([2.15384615384615384615+0.76923076923076923077j, 2. -0.66666666666666666667j, 0.61538461538461538462-1.07692307692307692308j], dtype=self.result_dtype,**self.kwds) #self.rdiv_s = [27./26.+5.j/26., 7./6.-1.j/2., 15./26.-23.j/26] self.rdiv_s = self.type([1.03846153846153846154+0.19230769230769230769j, 1.16666666666666666667-0.5j, 0.57692307692307692308-0.88461538461538461538j], dtype=self.result_dtype,**self.kwds) self.sub = self.type([-5-5j, -5-1j, -5+3j],dtype=self.result_dtype,**self.kwds) self.sub_s = self.type([0-1j, -2+1j, -4+3j],dtype=self.result_dtype,**self.kwds) self.rsub = self.type([5+5j, 5+1j, 5-3j],dtype=self.result_dtype,**self.kwds) self.rsub_s = self.type([0+1j, 2-1j, 4-3j],dtype=self.result_dtype,**self.kwds) self.pow1 = self.type([24.+10.j, 0.+18.j, -24.+10.j],dtype=self.dtype,**self.kwds) #self.pow2 = [pow(5+1j,-1.5), pow(3+3j,-1.5), pow(1+5j,-1.5)] self.pow2 = self.type([0.08307064054041229214-0.0253416052125975132j, 0.04379104225017853491-0.1057209281108342370j, -0.04082059235165559671-0.0766590341356157206j], dtype=self.dtype,**self.kwds) #self.abs = [pow(26,.5), 3*pow(2,.5), pow(26,.5)] self.abs = self.type([5.09901951359278483003, 4.24264068711928514641, 5.09901951359278483003],dtype=self.rdtype,**self.kwds) self.real = self.type([5,3,1],dtype=self.rdtype,**self.kwds) self.imag = self.type([1, 3, 5],dtype=self.rdtype,**self.kwds) self.conj = self.type([5-1j, 3-3j, 1-5j],dtype=self.dtype,**self.kwds) self.sum = 9+9j self.dot = 52+108j self.inner = 108-20j self.weighted_inner= 90 -14j self.min = 1 self.max = 5 def test_mul(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Two of whichever type c = self.a * self.b self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.mul.almost_equal_elem(c,tol=self.tol)) # Type with scalar c = self.a * self.s self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.mul_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__mul__, self.bad) self.assertRaises(ValueError, self.a.__mul__, self.bad2) def test_rmul(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Two of whichever type c = self.a.__rmul__(self.b) self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.mul.almost_equal_elem(c,tol=self.tol)) # Type with scalar c = self.s * self.a self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.mul_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__rmul__, self.bad) self.assertRaises(ValueError, self.a.__rmul__, self.bad2) def test_imul(self): if not (self.kind == 'real' and self.okind == 'complex'): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself self.a *= self.b self.assertEqual(bcopy,self.b) self.assertTrue(self.mul.almost_equal_elem(self.a,tol=self.tol)) # Reset for next test self.a = type(self.a)(acopy) # Type with scalar self.a *= self.s self.assertEqual(self.scalar,self.s) self.assertTrue(self.mul_s.almost_equal_elem(self.a,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__imul__, self.bad) self.assertRaises(ValueError, self.a.__imul__, self.bad2) else: with self.context: self.assertRaises(TypeError, self.a.__imul__,self.s) self.assertRaises(TypeError, self.a.__imul__,self.b) def test_add(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself c = self.a + self.b self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.add.almost_equal_elem(c,tol=self.tol)) # Type with scalar c = self.a + self.s self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.add_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__add__, self.bad) self.assertRaises(ValueError, self.a.__add__, self.bad2) def test_radd(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself c = self.a.__radd__(self.b) self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.add.almost_equal_elem(c,tol=self.tol)) # Type with scalar c = self.s + self.a self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.add_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__radd__, self.bad) self.assertRaises(ValueError, self.a.__radd__, self.bad2) def test_iadd(self): if not (self.kind == 'real' and self.okind == 'complex'): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself self.a += self.b self.assertEqual(bcopy,self.b) self.assertTrue(self.add.almost_equal_elem(self.a,tol=self.tol)) # Reset for next test self.a = type(self.a)(acopy) # Type with scalar self.a += self.s self.assertEqual(self.scalar,self.s) self.assertTrue(self.add_s.almost_equal_elem(self.a,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__iadd__, self.bad) self.assertRaises(ValueError, self.a.__iadd__, self.bad2) else: with self.context: self.assertRaises(TypeError, self.a.__iadd__,self.s) self.assertRaises(TypeError, self.a.__iadd__,self.b) def test_div(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself c = self.a / self.b self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.div.almost_equal_elem(c,tol=self.tol)) # Type with scalar c = self.a / self.s self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.div_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__div__, self.bad) self.assertRaises(ValueError, self.a.__div__, self.bad2) def test_rdiv(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with scalar c = self.s / self.a self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.rdiv_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__rdiv__, self.bad) self.assertRaises(ValueError, self.a.__rdiv__, self.bad2) def test_idiv(self): if not (self.kind == 'real' and self.okind == 'complex'): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself self.a /= self.b self.assertEqual(bcopy,self.b) self.assertTrue(self.div.almost_equal_elem(self.a,tol=self.tol)) # Reset for next test self.a = type(self.a)(acopy) # Type with scalar self.a /= self.s self.assertEqual(self.scalar,self.s) self.assertTrue(self.div_s.almost_equal_elem(self.a,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__idiv__, self.bad) self.assertRaises(ValueError, self.a.__idiv__, self.bad2) else: with self.context: self.assertRaises(TypeError, self.a.__idiv__,self.s) self.assertRaises(TypeError, self.a.__idiv__,self.b) def test_sub(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself c = self.a - self.b self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertTrue(self.sub.almost_equal_elem(c,tol=self.tol)) # Type with scalar c = self.a - self.s self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.sub_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__sub__, self.bad) self.assertRaises(ValueError, self.a.__sub__, self.bad2) def test_rsub(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with scalar c = self.s - self.a self.assertEqual(self.a,acopy) self.assertEqual(self.scalar,self.s) self.assertTrue(self.rsub_s.almost_equal_elem(c,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__rsub__, self.bad) self.assertRaises(ValueError, self.a.__rsub__, self.bad2) def test_isub(self): if not (self.kind == 'real' and self.okind == 'complex'): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # Type with itself self.a -= self.b self.assertEqual(bcopy,self.b) self.assertTrue(self.sub.almost_equal_elem(self.a,tol=self.tol)) # Reset for next test self.a = type(self.a)(acopy) # Type with scalar self.a -= self.s self.assertEqual(self.scalar,self.s) self.assertTrue(self.sub_s.almost_equal_elem(self.a,tol=self.tol)) # Input that should raise an error self.assertRaises(TypeError, self.a.__isub__, self.bad) self.assertRaises(ValueError, self.a.__isub__, self.bad2) else: with self.context: self.assertRaises(TypeError, self.a.__isub__,self.s) self.assertRaises(TypeError, self.a.__isub__,self.b) def test_pow(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: # From CPU c1 = self.a ** 2 c2 = self.a ** -1.5 self.assertEqual(acopy,self.a) self.assertTrue(self.pow1.almost_equal_elem(c1,tol=self.tol)) self.assertTrue(self.pow2.almost_equal_elem(c2,tol=self.tol)) def test_abs(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) # We want to check that absolute value behaves correctly no matter # what quadrant it's in. t1 = self.a * 1 t2 = self.a * -1 t3 = self.a * 1j t4 = self.a * -1j with self.context: c1 = abs(t1) c2 = abs(t2) c3 = abs(t3) c4 = abs(t4) self.assertEqual(self.a,acopy) # Because complex arrays can involve floating-point math, we # must use almost-equal comparisons, esp. on the GPU self.assertTrue(self.abs.almost_equal_norm(c1,tol=self.tol)) self.assertTrue(self.abs.almost_equal_norm(c2,tol=self.tol)) self.assertTrue(self.abs.almost_equal_norm(c3,tol=self.tol)) self.assertTrue(self.abs.almost_equal_norm(c4,tol=self.tol)) def test_real(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: c = self.a.real() self.assertEqual(self.a,acopy) self.assertEqual(self.real,c) def test_imag(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: c = self.a.imag() self.assertEqual(self.a,acopy) self.assertEqual(self.imag,c) def test_conj(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: c = self.a.conj() self.assertEqual(self.a,acopy) self.assertEqual(self.conj,c) def test_cumsum(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: c = self.a.cumsum() self.assertEqual(self.a,acopy) self.assertTrue(self.cumsum.almost_equal_elem(c,tol=self.tol)) def test_sum(self): # Make copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: # From CPU c = self.a.sum() self.assertEqual(self.a,acopy) # Hand calculate the relative tolerance for a scalar answer self.assertTrue(abs(c-self.sum)<=self.tol*abs(self.sum)) def test_dot(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: c = self.a.dot(self.b) self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) # Hand calculate the relative tolerance for a scalar answer self.assertTrue(abs(c-self.dot)<=self.tol*abs(self.dot)) def test_inner(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) with self.context: # CPU with CPU c = self.a.inner(self.b) self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) # Hand calculate the relative tolerance for a scalar answer self.assertTrue(abs(c-self.inner)<=self.tol*abs(self.inner)) # Input that should raise an error self.assertRaises(TypeError, self.a.inner, self.bad) self.assertRaises(ValueError, self.a.inner, self.bad2) def test_weighted_inner(self): # Make copies to see we don't overwrite acopy = type(self.a)(self.a) bcopy = type(self.b)(self.b) wcopy = type(self.w)(self.w) with self.context: # CPU with CPU c = self.a.weighted_inner(self.b, self.w) self.assertEqual(self.a,acopy) self.assertEqual(self.b,bcopy) self.assertEqual(self.w,wcopy) # Hand calculate the relative tolerance for a scalar answer self.assertTrue(abs(c-self.weighted_inner)<=self.tol*abs(self.weighted_inner)) # Input that should raise an error self.assertRaises(TypeError, self.a.weighted_inner, self.bad, self.w) self.assertRaises(ValueError, self.a.weighted_inner, self.bad2, self.w) def test_max(self): if self.kind == 'real': # Make a copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: c = self.a.max() self.assertEqual(self.a,acopy) self.assertEqual(self.max,c) def test_min(self): if self.kind == 'real': # Make a copy to see we don't overwrite acopy = type(self.a)(self.a) with self.context: c = self.a.min() self.assertEqual(self.a,acopy) self.assertEqual(self.min,c) def test_view(self): rtypes = { complex64: float32, complex128: float64} if self.kind == 'complex': rtype = rtypes[self.dtype] # Create an array that is the complex array # reinterpreted as real c_cmp = self.type([5,1,3,3,1,5],dtype=rtype,**self.kwds) d_cmp = self.type([5+2j,3+3j,1+5j],dtype=self.dtype,**self.kwds) with self.context: c = self.a.view(rtype) # Check that we correctly created the view self.assertEqual(c,c_cmp) # That the memory locations are the same self.assertEqual(self.a.ptr,c.ptr) # And that changing the view changes the original c[1] = 2.0 self.assertEqual(self.a,d_cmp)
38,174
43.235226
105
py
pycbc
pycbc-master/test/test_pnutils.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.filter.matchedfilter module """ import unittest import numpy from pycbc.pnutils import * from pycbc.scheme import * from utils import parse_args_cpu_only, simple_exit # We only need CPU tests parse_args_cpu_only("PN Utilities") class TestUtils(unittest.TestCase): def test_mass1_mass2_to_tau0_tau3(self): result = mass1_mass2_to_tau0_tau3(3.0,5.0,15.0) answer = (63.039052988077955, 2.353532999897545) self.assertAlmostEqual(result[0]/answer[0],1,places=6) self.assertAlmostEqual(result[1]/answer[1],1,places=6) def test_tau0_tau3_to_mtotal_eta(self): result = tau0_tau3_to_mtotal_eta(93.84928959285253,2.9198487498891126,20.0) answer = [5., 4.*1./5./5.] self.assertAlmostEqual(result[0]/answer[0],1,places=6) self.assertAlmostEqual(result[1]/answer[1],1,places=6) def test_tau0_tau3_to_mass1_mass2(self): result = tau0_tau3_to_mass1_mass2(12.410035910174642,0.9266455525603574,30.0) answer = [6., 2.] self.assertAlmostEqual(result[0]/answer[0],1,places=6) self.assertAlmostEqual(result[1]/answer[1],1,places=6) def test_mass1_mass2_to_mtotal_eta(self): result = mass1_mass2_to_mtotal_eta(5,10) answer = [15.0, 0.22222222222222221] self.assertAlmostEqual(result[0]/answer[0],1,places=6) self.assertAlmostEqual(result[1]/answer[1],1,places=6) def test_mass1_mass2_to_mchirp_eta(self): result = mass1_mass2_to_mchirp_eta(5,10) answer = [6.0836434189320574, 0.22222222222222224] self.assertAlmostEqual(result[0]/answer[0],1,places=6) self.assertAlmostEqual(result[1]/answer[1],1,places=6) def test_mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(self): # with no spin result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(1.4, 1.4, 0., 0.) for i in range(3): self.assertAlmostEqual(result[i], 0, places=6) # with spin result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(10., 1.4, 0.9, 0.1) answer = [7.208723197, 3.251802285, 243.2697314] for r, a in zip(result, answer): self.assertAlmostEqual(r / a, 1, places=6) result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(5., 5., 0.5, -0.7) answer = [-0.7833333333, 0.07250000000, -24.59479718] for r, a in zip(result, answer): self.assertAlmostEqual(r / a, 1, places=6) # using array arguments mass1 = numpy.array([1.4, 10., 5., 5.]) mass2 = numpy.array([1.4, 1.4, 5., 5.]) spin1 = numpy.array([0., 0.9, 0.5, -0.7]) spin2 = numpy.array([0., 0.1, -0.7, 0.5]) answer = numpy.array([ [0., 0., 0.], [7.208723197, 3.251802285, 243.2697314], [-0.7833333333, 0.07250000000, -24.59479718], [-0.7833333333, 0.07250000000, -24.59479718] ]).T result = mass1_mass2_spin1z_spin2z_to_beta_sigma_gamma(mass1, mass2, spin1, spin2) for error in (result - answer).ravel(): self.assertAlmostEqual(error, 0, places=6) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
4,562
42.04717
85
py
pycbc
pycbc-master/test/test_psd.py
# Copyright (C) 2012 Tito Dal Canton, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # ''' These are the unittests for the pycbc PSD module. ''' import os import tempfile import pycbc import pycbc.psd from pycbc.types import TimeSeries, FrequencySeries from pycbc.fft import ifft from pycbc.fft.fftw import set_measure_level import unittest import numpy from utils import parse_args_all_schemes, simple_exit set_measure_level(0) _scheme, _context = parse_args_all_schemes("PSD") class TestPSD(unittest.TestCase): def setUp(self): self.scheme = _scheme self.context = _context self.psd_len = 1024 self.psd_delta_f = 0.1 self.psd_low_freq_cutoff = 10. # generate 1/f noise for testing PSD estimation noise_size = 524288 sample_freq = 4096. delta_f = sample_freq / noise_size numpy.random.seed(132435) fd_size = noise_size // 2 + 1 noise = numpy.random.normal(loc=0, scale=1, size=fd_size) + \ 1j * numpy.random.normal(loc=0, scale=1, size=fd_size) noise_model = 1. / numpy.linspace(1., 100., fd_size) noise *= noise_model / numpy.sqrt(delta_f) / 2 noise[0] = noise[0].real noise_fs = FrequencySeries(noise, delta_f=delta_f) self.noise = TimeSeries(numpy.zeros(noise_size), delta_t=1./sample_freq) ifft(noise_fs, self.noise) def test_analytical(self): """Basic test of lalsimulation's analytical noise PSDs""" with self.context: psd_list = pycbc.psd.analytical.get_lalsim_psd_list() self.assertTrue(psd_list) for psd_name in psd_list: psd = pycbc.psd.analytical.from_string(psd_name, self.psd_len, self.psd_delta_f, self.psd_low_freq_cutoff) psd_min = psd.min() self.assertTrue(psd_min >= 0, msg=(psd_name + ': negative values')) self.assertTrue(psd.min() < 1e-40, msg=(psd_name + ': unreasonably high minimum')) def test_read(self): """Test reading PSDs from text files""" test_data = numpy.zeros((self.psd_len, 2)) test_data[:, 0] = numpy.linspace(0., (self.psd_len - 1) * self.psd_delta_f, self.psd_len) test_data[:, 1] = numpy.sqrt(test_data[:, 0]) file_desc, file_name = tempfile.mkstemp() os.close(file_desc) numpy.savetxt(file_name, test_data) test_data[test_data[:, 0] < self.psd_low_freq_cutoff, 1] = 0. with self.context: psd = pycbc.psd.read.from_txt(file_name, self.psd_len, self.psd_delta_f, self.psd_low_freq_cutoff, is_asd_file=True) self.assertAlmostEqual(abs(psd - test_data[:, 1] ** 2).max(), 0) os.unlink(file_name) def test_estimate_welch(self): """Test estimating PSDs from data using Welch's method""" for seg_len in (2048, 4096, 8192): noise_model = (numpy.linspace(1., 100., seg_len//2 + 1)) ** (-2) for seg_stride in (seg_len, seg_len//2): for method in ('mean', 'median', 'median-mean'): with self.context: psd = pycbc.psd.welch(self.noise, seg_len=seg_len, \ seg_stride=seg_stride, avg_method=method) error = (psd.numpy() - noise_model) / noise_model err_rms = numpy.sqrt(numpy.mean(error ** 2)) self.assertTrue(err_rms < 0.2, msg='seg_len=%d seg_stride=%d method=%s -> rms=%.3f' % \ (seg_len, seg_stride, method, err_rms)) def test_truncation(self): """Test inverse PSD truncation""" for seg_len in (2048, 4096, 8192): noise_model = (numpy.linspace(1., 100., seg_len//2 + 1)) ** (-2) for max_len in (1024, 512, 256): with self.context: psd = pycbc.psd.welch(self.noise, seg_len=seg_len, \ seg_stride=seg_len//2, avg_method='mean') psd_trunc = pycbc.psd.inverse_spectrum_truncation( psd, max_len, low_frequency_cutoff=self.psd_low_freq_cutoff) freq = psd.sample_frequencies.numpy() error = (psd.numpy() - noise_model) / noise_model error = error[freq > self.psd_low_freq_cutoff] err_rms = numpy.sqrt(numpy.mean(error ** 2)) self.assertTrue(err_rms < 0.1, msg='seg_len=%d max_len=%d -> rms=%.3f' \ % (seg_len, max_len, err_rms)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPSD)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
5,913
43.134328
97
py
pycbc
pycbc-master/test/test_io_live.py
# Copyright (C) 2018 Tito Dal Canton # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import unittest import os import shutil import random import tempfile import itertools import numpy as np from utils import parse_args_cpu_only, simple_exit from pycbc.types import TimeSeries, FrequencySeries from pycbc.io.live import CandidateForGraceDB from pycbc.io.ligolw import LIGOLWContentHandler from ligo.lw import lsctables from ligo.lw import utils as ligolw_utils from lal import series as lalseries # if we have the GraceDb module then we can do deeper tests, # otherwise just fall back to quicker ones try: from ligo.gracedb.rest import GraceDb except ImportError: GraceDb = None parse_args_cpu_only("io.live") class TestIOLive(unittest.TestCase): def setUp(self): self.template = {'template_id': 0, 'mass1': 10, 'mass2': 11, 'spin1x': 0, 'spin1y': 0, 'spin1z': 0, 'spin2x': 0, 'spin2y': 0, 'spin2z': 0} self.possible_ifos = 'H1 L1 V1 K1 I1'.split() def do_test(self, n_ifos, n_ifos_extra): # choose a random selection of interferometers # n_ifos will be used to generate the simulated trigger including # significance followup # n_ifos_extra will be used for sky loc only all_ifos = random.sample(self.possible_ifos, n_ifos + n_ifos_extra) trig_ifos = all_ifos[0:n_ifos] # take 2 ifos to represent the initial coinc trigger coinc_ifos = all_ifos[0:2] results = {'foreground/stat': np.random.uniform(4, 20), 'foreground/ifar': np.random.uniform(0.01, 1000)} skyloc_data = {} for ifo in all_ifos: offset = 10000 + np.random.uniform(-0.02, 0.02) amplitude = np.random.uniform(4, 20) # generate a mock SNR time series with a peak n = 201 dt = 1. / 2048. t = np.arange(n) * dt t_peak = dt * n / 2 snr = np.exp(-(t - t_peak) ** 2 * 3e-3 ** -2) * amplitude snr_series = TimeSeries((snr + 1j * 0).astype(np.complex64), delta_t=dt, epoch=offset) # generate a mock PSD psd_samples = np.random.exponential(size=1024) psd = FrequencySeries(psd_samples, delta_f=1.) # fill in the various fields if ifo in trig_ifos: base = 'foreground/' + ifo + '/' results[base + 'end_time'] = t_peak + offset results[base + 'snr'] = amplitude results[base + 'sigmasq'] = np.random.uniform(1e6, 2e6) skyloc_data[ifo] = {'snr_series': snr_series, 'psd': psd} for ifo, k in itertools.product(trig_ifos, self.template): results['foreground/' + ifo + '/' + k] = self.template[k] channel_names = {ifo: 'TEST' for ifo in all_ifos} kwargs = {'psds': {ifo: skyloc_data[ifo]['psd'] for ifo in all_ifos}, 'low_frequency_cutoff': 20., 'skyloc_data': skyloc_data, 'channel_names': channel_names} coinc = CandidateForGraceDB(coinc_ifos, trig_ifos, results, **kwargs) tempdir = tempfile.mkdtemp() coinc_file_name = os.path.join(tempdir, 'coinc.xml.gz') if GraceDb is not None: # pretend to upload the event to GraceDB. # The upload will fail, but it should not raise an exception # and it should still leave the event file around coinc.upload(coinc_file_name, gracedb_server='localhost', testing=True) else: # no GraceDb module, so just save the coinc file coinc.save(coinc_file_name) # read back and check the coinc document read_coinc = ligolw_utils.load_filename( coinc_file_name, verbose=False, contenthandler=LIGOLWContentHandler) single_table = lsctables.SnglInspiralTable.get_table(read_coinc) self.assertEqual(len(single_table), len(all_ifos)) coinc_table = lsctables.CoincInspiralTable.get_table(read_coinc) self.assertEqual(len(coinc_table), 1) # make sure lalseries can read the PSDs psd_doc = ligolw_utils.load_filename( coinc_file_name, verbose=False, contenthandler=lalseries.PSDContentHandler) psd_dict = lalseries.read_psd_xmldoc(psd_doc) self.assertEqual(set(psd_dict.keys()), set(all_ifos)) shutil.rmtree(tempdir) def test_2_ifos_no_followup(self): self.do_test(2, 0) def test_3_ifos_no_followup(self): self.do_test(3, 0) def test_4_ifos_no_followup(self): self.do_test(4, 0) def test_5_ifos_no_followup(self): self.do_test(5, 0) def test_2_ifos_1_followup(self): self.do_test(2, 1) def test_2_ifos_2_followup(self): self.do_test(2, 2) def test_2_ifos_3_followup(self): self.do_test(2, 3) def test_3_ifos_1_followup(self): self.do_test(3, 1) def test_3_ifos_2_followup(self): self.do_test(3, 2) def test_4_ifos_1_followup(self): self.do_test(4, 1) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestIOLive)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
6,282
34.902857
77
py
pycbc
pycbc-master/test/test_dq.py
# Copyright (C) 2019 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the data quality query methods in pycbc """ import unittest from utils import simple_exit from pycbc.dq import query_flag, query_cumulative_flags, query_str class TestDataQualityFlags(unittest.TestCase): def setUp(self,*args): pass def test_direct_empty_return(self): segs = query_flag('H1', 'DATA', 1126051217, 1126051217 + 1000, cache=True) self.assertTrue(len(segs) == 0) def test_direct_query(self): segs = query_flag('H1', 'DATA', 1126051217, 1126051217 + 100000, cache=True) self.assertTrue(len(segs) > 0) def test_veto_flag(self): d = query_flag('L1', 'data', 1126051217, 1126051217 + 100000, cache=True) v1n = query_flag('L1', 'CBC_HW_INJ', 1126051217, 1126051217 + 100000, cache=True) v1 = query_flag('L1', 'CBC_HW_INJ', 1126051217, 1126051217 + 100000, cache=True) self.assertTrue(abs((v1 + v1n).coalesce() - d) == 0) v2n = query_flag('L1', 'CBC_CAT2', 1126051217, 1126051217 + 100000, cache=True) v2 = query_flag('L1', 'CBC_CAT2_VETO', 1126051217, 1126051217 + 100000, cache=True) self.assertTrue(abs((v2 + v2n).coalesce() - d) == 0) def test_cumulative_query(self): segs1 = query_flag('H1', 'CBC_HW_INJ', 1126051217, 1126051217 + 100000, cache=True) segs2 = query_flag('H1', 'BURST_HW_INJ', 1126051217, 1126051217 + 100000, cache=True) segs = query_cumulative_flags('H1', ['CBC_HW_INJ', 'BURST_HW_INJ'], 1126051217, 1126051217 + 100000, cache=True) csegs = (segs1 + segs2).coalesce() self.assertTrue(abs(csegs - segs) == 0) def test_bounds(self): segs = query_cumulative_flags('H1', ['DATA'], 1126051217, 1126051217 + 100000, cache=True) segs_all = query_cumulative_flags('H1', ['DATA'], 1126051217, 1126051217 + 100000, bounds={'DATA':(1126051217, 1126051217 + 100000)}, cache=True) segs_none = query_cumulative_flags('H1', ['DATA'], 1126051217, 1126051217 + 100000, bounds={'DATA':(0, 10)}, cache=True) self.assertTrue(abs(segs) == abs(segs_all)) self.assertTrue(abs(segs) > abs(segs_none)) def test_padding(self): segs = query_cumulative_flags('H1', ['DATA'], 1126051217, 1126051217 + 100000, cache=True) segs2 = query_cumulative_flags('H1', ['DATA'], 1126051217, 1126051217 + 100000, padding={'DATA':(8, -8)}, cache=True) self.assertTrue(abs(segs) > abs(segs2)) def test_query_str(self): d = query_flag('H1', 'data', 1126051217, 1126051217 + 100000) d1 = query_str('H1', '+data', 1126051217, 1126051217 + 100000) d2 = query_str('H1', '+H1:data', 1126051217, 1126051217 + 100000) d3 = query_str('H1', '+data[1126051217:1127051217]', 1126051217, 1126051217 + 100000) d4 = query_str('H1', '+data<0:0>[1126051217:1127051217]', 1126051217, 1126051217 + 100000) self.assertTrue(abs(d - d1) == 0) self.assertTrue(abs(d - d2) == 0) self.assertTrue(abs(d - d3) == 0) self.assertTrue(abs(d - d4) == 0) d5 = query_str('L1', '+data', 1126051217, 1126051217 + 100000) d6 = query_str('H1', '+L1:data', 1126051217, 1126051217 + 100000) self.assertTrue(abs(d5 - d6) == 0) d7 = query_flag('H1', 'CBC_HW_INJ', 1126051217, 1126051217 + 100000, cache=True) d8 = query_str('H1', '+data,-CBC_HW_INJ', 1126051217, 1126051217 + 100000) self.assertTrue(abs((d-d7) - d8) == 0) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestDataQualityFlags)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
5,132
42.5
100
py
pycbc
pycbc-master/test/test_array_lal.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.filter.matchedfilter module """ import unittest from pycbc.types import * from pycbc.scheme import * from lal import LIGOTimeGPS as LTG from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("lal() method") class TestUtils(unittest.TestCase): def setUp(self,*args): self.context = _context self.delta_t = 1.0 / 4096 self.epoch = LTG(0,0) self.at = TimeSeries([1], delta_t=self.delta_t, dtype=float32,epoch=self.epoch) self.bt = TimeSeries([1], delta_t=self.delta_t, dtype=float64,epoch=self.epoch) self.ct = TimeSeries([1], delta_t=self.delta_t, dtype=complex64,epoch=self.epoch) self.dt = TimeSeries([1], delta_t=self.delta_t, dtype=complex128,epoch=self.epoch) self.a = Array([1], dtype=float32) self.b = Array([1], dtype=float64) self.c = Array([1], dtype=complex64) self.d = Array([1], dtype=complex128) self.af = FrequencySeries([1], delta_f=self.delta_t, dtype=float32,epoch=self.epoch) self.bf = FrequencySeries([1], delta_f=self.delta_t, dtype=float64,epoch=self.epoch) self.cf = FrequencySeries([1], delta_f=self.delta_t, dtype=complex64,epoch=self.epoch) self.df = FrequencySeries([1], delta_f=self.delta_t, dtype=complex128,epoch=self.epoch) if _scheme == 'cpu': def test_array_to_lal(self): al = self.a.lal() self.assertEqual(al.data.dtype, self.a.dtype) self.assertEqual(al.data[0], self.a[0]) al = self.b.lal() self.assertEqual(al.data.dtype, self.b.dtype) self.assertEqual(al.data[0], self.b[0]) al = self.c.lal() self.assertEqual(al.data.dtype, self.c.dtype) self.assertEqual(al.data[0], self.c[0]) al = self.d.lal() self.assertEqual(al.data.dtype, self.d.dtype) self.assertEqual(al.data[0], self.d[0]) def test_timeseries_to_lal(self): al = self.at.lal() self.assertEqual(al.data.data.dtype, self.at.dtype) self.assertEqual(al.data.data[0], self.at[0]) self.assertEqual(al.deltaT, self.at.delta_t) al = self.bt.lal() self.assertEqual(al.data.data.dtype, self.bt.dtype) self.assertEqual(al.data.data[0], self.bt[0]) self.assertEqual(al.deltaT, self.bt.delta_t) al = self.ct.lal() self.assertEqual(al.data.data.dtype, self.ct.dtype) self.assertEqual(al.data.data[0], self.ct[0]) self.assertEqual(al.deltaT, self.ct.delta_t) al = self.dt.lal() self.assertEqual(al.data.data.dtype, self.dt.dtype) self.assertEqual(al.data.data[0], self.dt[0]) self.assertEqual(al.deltaT, self.dt.delta_t) def test_frequencyseries_to_lal(self): al = self.af.lal() self.assertEqual(al.data.data.dtype, self.af.dtype) self.assertEqual(al.data.data[0], self.af[0]) self.assertEqual(al.deltaF, self.af.delta_f) al = self.bf.lal() self.assertEqual(al.data.data.dtype, self.bf.dtype) self.assertEqual(al.data.data[0], self.bf[0]) self.assertEqual(al.deltaF, self.bf.delta_f) al = self.cf.lal() self.assertEqual(al.data.data.dtype, self.cf.dtype) self.assertEqual(al.data.data[0], self.cf[0]) self.assertEqual(al.deltaF, self.cf.delta_f) al = self.df.lal() self.assertEqual(al.data.data.dtype, self.df.dtype) self.assertEqual(al.data.data[0], self.df[0]) self.assertEqual(al.deltaF, self.df.delta_f) else: def test_array_lal_errors(self): with self.context: self.assertRaises(TypeError, self.a.lal) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
5,083
41.722689
95
py
pycbc
pycbc-master/test/test_fft_unthreaded.py
# Copyright (C) 2012 Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unit-tests for the pycbc.fft subpackage, testing only unthreaded backends for the various schemes. """ import logging import pycbc.fft import unittest from utils import parse_args_all_schemes, simple_exit from fft_base import _BaseTestFFTClass _scheme, _context = parse_args_all_schemes("FFT") # Most of the work is now done in fft_base. Below are factories for # creating a test for each backend of each scheme. # Get our list of backends: backends = pycbc.fft.get_backend_names() # Numpy will warn not to use its class interface, silence it. logging.disable(logging.WARNING) FFTTestClasses = [] for backend in backends: # This creates, for each backend, a new class derived from # both _BaseTestFFTClass and unittest.TestCase, and with # the additional property 'self.backend' set to the value # of backend. One such class for each backend is appended # to the list kdict = {'backends' : [backend], 'scheme' : _scheme, 'context' : _context} klass = type('{0}_{1}_test'.format(_scheme,backend), (_BaseTestFFTClass,),kdict) klass.__test__ = True vars()[klass.__name__] = klass FFTTestClasses.append(klass) del klass # Finally, we create suites and run them if __name__ == '__main__': suite = unittest.TestSuite() for klass in FFTTestClasses: suite.addTest(unittest.TestLoader().loadTestsFromTestCase(klass)) results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,475
33.388889
79
py
pycbc
pycbc-master/test/bankvetotest.py
from pycbc.types import * from pycbc.noise.gaussian import * from pycbc.filter import * from pycbc.waveform import * from pycbc.vetoes import * import pycbc.psd sr = 4096.0 dt = 1.0/sr bl = 256 df = 1.0/bl N = int(sr * bl) n = int(N/2 + 1) psd = pycbc.psd.from_string("aLIGOZeroDetHighPower", n, df, 14) strain = noise_from_psd(N, dt, psd, seed=0) htildep, htildec = get_fd_waveform(approximant="TaylorF2", mass1=10, mass2=10, f_lower=15, delta_f=df) htildep.resize(n) snr, corr, norm = matched_filter_core(htildep, strain, psd, low_frequency_cutoff=15) # TEST 1 bank_tilde1,_ = get_fd_waveform(approximant="TaylorF2", mass1=1.4, mass2=1.4, f_lower=15, delta_f=df) bank_tilde2,_ = get_fd_waveform(approximant="TaylorF2", mass1=2.0, mass2=2.0, f_lower=15, delta_f=df) bank_tilde3,_ = get_fd_waveform(approximant="TaylorF2", mass1=4.0, mass2=4.0, f_lower=15, delta_f=df) bank_tilde4,_ = get_fd_waveform(approximant="TaylorF2", mass1=9.0, mass2=9.0, f_lower=15, delta_f=df) bank_tilde5,_ = get_fd_waveform(approximant="TaylorF2", mass1=6.0, mass2=1.4, f_lower=15, delta_f=df) bank_tilde1.resize(n) bank_tilde2.resize(n) bank_tilde3.resize(n) bank_tilde4.resize(n) bank_tilde5.resize(n) bank_veto_bank = [bank_tilde1,bank_tilde2,bank_tilde3,bank_tilde4,bank_tilde5] bank_veto_curr_overlaps = [] bank_snrs = [] bank_norms = [] for bank_template in bank_veto_bank: # For every bank veto template compute overlap between template # and the data curr_bank_snr,_,curr_bank_norm = matched_filter_core(bank_template,\ strain,psd,low_frequency_cutoff=15) # SNR time series stored here bank_snrs.append(curr_bank_snr) # Template normalization factor stored here bank_norms.append(curr_bank_norm) bank_veto_curr_overlaps.append(overlap_cplx(htildep,\ bank_template,psd=psd,\ low_frequency_cutoff=15)) bank_veto = bank_chisq_from_filters(snr,norm,bank_snrs,bank_norms,bank_veto_curr_overlaps) #numpy.savetxt('BV_TEST1.txt',bank_veto) # TEST 2 bank_tilde1,_ = get_fd_waveform(approximant="TaylorF2", mass1=9.9, mass2=9.9, f_lower=15, delta_f=df) bank_tilde2,_ = get_fd_waveform(approximant="TaylorF2", mass1=9.9, mass2=10., f_lower=15, delta_f=df) bank_tilde3,_ = get_fd_waveform(approximant="TaylorF2", mass1=9.9, mass2=10.1, f_lower=15, delta_f=df) bank_tilde4,_ = get_fd_waveform(approximant="TaylorF2", mass1=10., mass2=10.1, f_lower=15, delta_f=df) bank_tilde5,_ = get_fd_waveform(approximant="TaylorF2", mass1=10.1, mass2=10.1, f_lower=15, delta_f=df) bank_tilde1.resize(n) bank_tilde2.resize(n) bank_tilde3.resize(n) bank_tilde4.resize(n) bank_tilde5.resize(n) bank_veto_bank = [bank_tilde1,bank_tilde2,bank_tilde3,bank_tilde4,bank_tilde5] bank_veto_curr_overlaps = [] bank_snrs = [] bank_norms = [] for bank_template in bank_veto_bank: # For every bank veto template compute overlap between template # and the data curr_bank_snr,_,curr_bank_norm = matched_filter_core(bank_template,\ strain,psd,low_frequency_cutoff=15) # SNR time series stored here bank_snrs.append(curr_bank_snr) # Template normalization factor stored here bank_norms.append(curr_bank_norm) bank_veto_curr_overlaps.append(overlap_cplx(htildep,\ bank_template,psd=psd,\ low_frequency_cutoff=15)) test1,_,test2 = matched_filter_core(bank_template,\ htildep,psd=psd,low_frequency_cutoff=15) sigmasq1 = sigmasq(htildep, psd, 15, None) sigmasq2 = sigmasq(bank_template, psd, 15, None) bank_veto = bank_chisq_from_filters(snr,norm,bank_snrs,bank_norms,bank_veto_curr_overlaps) #numpy.savetxt('BV_TEST2.txt',bank_veto)
3,650
38.258065
103
py
pycbc
pycbc-master/test/test_distributions.py
# Copyright (C) 2017 Christopher M. Biwer # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ These are the unittests for distributions in the pycbc.distribtions subpackage. """ import itertools import numpy import os import unittest from pycbc import distributions from pycbc.inference import entropy from utils import parse_args_cpu_only from utils import simple_exit from pycbc.workflow import WorkflowConfigParser # distributions to exclude from one-dimensional distribution unit tests # some of these distributons have their own specific unit test EXCLUDE_DIST_NAMES = ["fromfile", "arbitrary", "external", "external_func_fromfile", "fisher_sky", "uniform_solidangle", "uniform_sky", "independent_chip_chieff", "uniform_f0_tau", "fixed_samples"] # tests only need to happen on the CPU parse_args_cpu_only("Distributions") def cartesian(arrays): """ Returns a cartesian product from a list of iterables. """ return numpy.array([numpy.array(element) for element in itertools.product(*arrays)]) class TestDistributions(unittest.TestCase): def setUp(self): # set random seed numpy.random.seed(1024) # path to example configuration file for testing config_path = "/".join([os.path.dirname(os.path.realpath(__file__)), "../examples/distributions/example.ini"]) # get a set of simulated command line options for # configuration file reading class Arguments(object): config_overrides = [] config_delete = [] config_files = [config_path] self.opts = Arguments() # read configuration files self.cp = WorkflowConfigParser.from_cli(self.opts) self.variable_args, self.static_args = \ distributions.read_params_from_config(self.cp) self.constraints = distributions.read_constraints_from_config( self.cp, static_args=self.static_args) # read distributions self.dists = distributions.read_distributions_from_config(self.cp) # check that all distriubtions will be tested for dname in distributions.distribs: dclass = distributions.distribs[dname] if (not numpy.any([isinstance(dist, dclass) for dist in self.dists]) and dname not in EXCLUDE_DIST_NAMES): raise ValueError("There is no test for {}".format(dname)) def test_pdf_rvs(self): """ Check the Kullback-Leibler divergence between draws of random samples form the distribution and the probability density function of the distribution. This implementation only works for one dimensional distriubtions. """ # set threshold for KL divergence threshold = 0.1 # number of samples in random draw for test n_samples = int(1e6) # step size to take in PDF evaluation step = 0.1 # loop over distributions for dist in self.dists: if dist.name in EXCLUDE_DIST_NAMES: continue for param in dist.params: # get min and max hist_min = dist.bounds[param][0] hist_max = dist.bounds[param][1] # generate some random draws samples = dist.rvs(n_samples)[param] # get the PDF x = numpy.arange(hist_min, hist_max, step) pdf = numpy.array([dist.pdf(**{param : xx}) for xx in x]) # compute the KL divergence and check if below threshold kl_val = entropy.kl(samples, pdf, bins=pdf.size, pdf2=True, hist_min=hist_min, hist_max=hist_max) if not (kl_val < threshold): raise ValueError( "Class {} KL divergence is {} which is " "greater than the threshold " "of {}".format(dist.name, kl_val, threshold)) def test_pdf_logpdf(self): """ Checks that the probability density function (PDF) is within some tolerance of the natural logarithm of the PDF. This implementation is for one dimensional distributions. """ # assign tolerance for element-wise ratio of logarithm of PDF tolerance = 0.01 # step size to take in PDF evaluation step = 0.1 # loop over distributions for dist in self.dists: if dist.name in EXCLUDE_DIST_NAMES: continue for param in dist.params: # get min and max hist_min = dist.bounds[param][0] hist_max = dist.bounds[param][1] # get the PDF and logarithm of the PDF from the distriubtion x = numpy.arange(hist_min, hist_max, step) pdf = numpy.array([dist.pdf(**{param : xx}) for xx in x]) logpdf = numpy.array([dist.logpdf(**{param : xx}) for xx in x]) # find the logarithm of the PDF pdf_log = numpy.log(pdf) # see if each element in ratio of these two logarithm of PDF # values are within the specified tolerance if not numpy.all(abs(1.0 - logpdf / pdf_log) < tolerance): raise ValueError("The PDF and logarithm of the PDF " "functions for distribution {} " "do not agree".format(dist.name)) def test_solid_angle(self): """ The uniform solid angle and uniform sky position distributions are two independent one-dimensional distributions. This tests checks that the two indepdent one-dimensional distributions agree by comparing the `rvs`, `pdf`, and `logpdf` functions' output. """ # set tolerance for comparing PDF and logPDF functions tolerance = 0.01 # set threshold for KL divergence test threshold = 0.1 # number of random draws for KL divergence test n_samples = int(1e6) # create generic angular distributions for test sin_dist = distributions.SinAngle(theta=(0, numpy.pi)) cos_dist = distributions.CosAngle(theta=(-numpy.pi/2.0, numpy.pi/2.0)) ang_dist = distributions.UniformAngle(theta=(0, numpy.pi*2.0)) # step size for PDF calculation step = 0.1 # valid range of parameters polar_sin = numpy.arange(0, numpy.pi, step) polar_cos = numpy.arange(-numpy.pi, numpy.pi, step) azimuthal = numpy.arange(0, 2 * numpy.pi, step) # get Cartesian product to explore the two-dimensional space cart_sin = cartesian([polar_sin, azimuthal]) # loop over distributions for dist in self.dists: if dist.name == distributions.UniformSolidAngle.name: polar_vals = polar_sin polar_dist = sin_dist elif dist.name == distributions.UniformSky.name: polar_vals = polar_cos polar_dist = cos_dist else: continue # check PDF equilvalent pdf_1 = numpy.array([dist.pdf(**{dist.polar_angle : p, dist.azimuthal_angle : a}) for p, a in cart_sin]) pdf_2 = numpy.array([polar_dist.pdf(**{"theta" : p}) * ang_dist.pdf(**{"theta" : a}) for p, a in cart_sin]) # Catch and silence warnings here with numpy.errstate(invalid="ignore", divide='ignore'): if not (numpy.all(numpy.nan_to_num(abs(1.0 - pdf_1 / pdf_2)) < tolerance)): raise ValueError("The {} distribution PDF does not match " "its component " "distributions.".format(dist.name)) # check logarithm of PDF equivalent pdf_1 = numpy.array([dist.logpdf(**{dist.polar_angle : p, dist.azimuthal_angle : a}) for p, a in cart_sin]) pdf_2 = numpy.array([polar_dist.logpdf(**{"theta" : p}) + ang_dist.logpdf(**{"theta" : a}) for p, a in cart_sin]) if not (numpy.all(numpy.nan_to_num(abs(1.0 - pdf_1 / pdf_2)) < tolerance)): raise ValueError("The {} distribution PDF does not match " "its component " "distriubtions.".format(dist.name)) # check random draws from polar angle equilvalent ang_1 = dist.rvs(n_samples)[dist.polar_angle] ang_2 = polar_dist.rvs(n_samples)["theta"] kl_val = entropy.kl(ang_1, ang_2, bins=polar_vals.size, hist_min=polar_vals.min(), hist_max=polar_vals.max()) if not (kl_val < threshold): raise ValueError( "Class {} KL divergence is {} which is " "greater than the threshold for polar angle" "of {}".format(dist.name, kl_val, threshold)) # check random draws from azimuthal angle equilvalent ang_1 = dist.rvs(n_samples)[dist.azimuthal_angle] ang_2 = ang_dist.rvs(n_samples)["theta"] kl_val = entropy.kl(ang_1, ang_2, bins=azimuthal.size, hist_min=azimuthal.min(), hist_max=azimuthal.max()) if not (kl_val < threshold): raise ValueError( "Class {} KL divergence is {} which is " "greater than the threshold for azimuthal angle" "of {}".format(dist.name, kl_val, threshold)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestDistributions)) if __name__ == "__main__": results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
11,170
40.838951
79
py
pycbc
pycbc-master/test/test_autochisq.py
from pycbc.fft.fftw import set_measure_level set_measure_level(0) from pycbc.filter import matched_filter_core from pycbc.types import Array, TimeSeries, FrequencySeries from pycbc.waveform import * from pycbc.vetoes import * import numpy as np from math import cos, sin, pi, exp import unittest from utils import parse_args_all_schemes, simple_exit import time _scheme, _context = parse_args_all_schemes("Auto Chi-squared Veto") class TestAutochisquare(unittest.TestCase): def setUp(self): self.Msun = 4.92549095e-6 self.sample_rate = 4096 self.segment_length = 256 self.low_frequency_cutoff = 30.0 # chirp params self.m1 = 2.0 self.m2 = 2.5 self.del_t = 1.0/self.sample_rate self.Dl = 40.0 self.iota = 1.0 self.phi_c = 2.0 self.tc_indx = 86*self.sample_rate ## offset from the beginnig of a segment self.fmax = 1.0/(6.**1.5 *pi *(self.m1+self.m2)*self.Msun) self.zeta = 1.0 self.thetaS = 0.5 self.phiS = 2.781 self.Fp = 0.5*cos(2.0*self.zeta)*(1.0 + cos(self.thetaS)*cos(self.thetaS))*cos(2.0*self.phiS) - \ sin(2.*self.zeta)*cos(self.thetaS)*sin(2.*self.phiS) self.Fc = 0.5*sin(2.0*self.zeta)*(1.0 + cos(self.thetaS)*cos(self.thetaS))*cos(2.0*self.phiS) + \ cos(2.*self.zeta)*sin(self.thetaS)*sin(2.*self.phiS) # params of sin-gaussian self.Q = 1.e-1 self.om = 200.0*pi*2.0 # use flat psd self.seg_len_idx = self.segment_length * self.sample_rate self.psd_len = int(self.seg_len_idx/2+1) self.Psd = np.ones(self.psd_len)*2.0e-46 # generate waveform and chirp signal hp, hc = get_td_waveform(approximant="SpinTaylorT5", mass1=self.m1, mass2=self.m2, \ delta_t=self.del_t, f_lower=self.low_frequency_cutoff, distance=self.Dl, \ inclination=self.iota, coa_phase=self.phi_c) # signal which is a noiseless data thp = np.zeros(self.seg_len_idx) thp[self.tc_indx:len(hp)+self.tc_indx] = hp thc = np.zeros(self.seg_len_idx) thc[self.tc_indx:len(hc)+self.tc_indx] = hc fct = 10.0/15.21377 self.sig1 = fct*(self.Fp*thp + self.Fc*thc) #### template h = np.zeros(self.seg_len_idx) h[0:len(hp)] = hp hpt = TimeSeries(h, self.del_t) self.htilde = make_frequency_series(hpt) # generate sin-gaussian signal time = np.arange(0, len(hp))*self.del_t Nby2 = int(len(hp)/2) sngt = np.zeros(len(hp)) for i in range(len(hp)): sngt[i] = 9.0e-21*exp(-(time[i]-time[Nby2])**2/self.Q)*sin(self.om*time[i]) self.sig2 = np.zeros(self.seg_len_idx) self.sig2[self.tc_indx:len(sngt)+self.tc_indx] = sngt def test_chirp(self): ### use a chirp as a signal sigt = TimeSeries(self.sig1, self.del_t) sig_tilde = make_frequency_series(sigt) del_f = sig_tilde.get_delta_f() psd = FrequencySeries(self.Psd, del_f) flow = self.low_frequency_cutoff with _context: hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \ low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax) hautocor = hautocor * float(np.real(1./hautocor[0])) snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \ low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax) hacor = Array(hautocor, copy=True) indx = np.array([352250, 352256, 352260]) snr = snr*nrm with _context: dof, achisq, indices= \ autochisq_from_precomputed(snr, snr, hacor, indx, stride=3, num_points=20) obt_snr = abs(snr[indices[1]]) obt_ach = achisq[1] self.assertTrue(obt_snr > 10.0 and obt_snr < 12.0) self.assertTrue(obt_ach < 3.e-3) self.assertTrue(achisq[0] > 20.0) self.assertTrue(achisq[2] > 20.0) #with _context: # dof, achi_list = autochisq(self.htilde, sig_tilde, psd, stride=3, num_points=20, \ # low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax, max_snr=True) #self.assertTrue(obt_snr == achi_list[0, 1]) #self.assertTrue(obt_ach == achi_list[0, 2]) # for i in range(1, len(achi_list)): # self.assertTrue(achi_list[i,2] > 4.0) def test_sg(self): ### use a sin-gaussian as a signal sigt = TimeSeries(self.sig2, self.del_t) sig_tilde = make_frequency_series(sigt) del_f = sig_tilde.get_delta_f() psd = FrequencySeries(self.Psd, del_f) flow = self.low_frequency_cutoff with _context: hautocor, hacorfr, hnrm = matched_filter_core(self.htilde, self.htilde, psd=psd, \ low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax) hautocor = hautocor * float(np.real(1./hautocor[0])) snr, cor, nrm = matched_filter_core(self.htilde, sig_tilde, psd=psd, \ low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax) hacor = Array(hautocor.real(), copy=True) indx = np.array([301440, 301450, 301460]) snr = snr*nrm with _context: dof, achisq, indices= \ autochisq_from_precomputed(snr, snr, hacor, indx, stride=3, num_points=20) obt_snr = abs(snr[indices[1]]) obt_ach = achisq[1] self.assertTrue(obt_snr > 12.0 and obt_snr < 15.0) self.assertTrue(obt_ach > 6.8e3) self.assertTrue(achisq[0] > 6.8e3) self.assertTrue(achisq[2] > 6.8e3) # with _context: # dof, achi_list = autochisq(self.htilde, sig_tilde, psd, stride=3, num_points=20, \ # low_frequency_cutoff=flow, high_frequency_cutoff=self.fmax, max_snr=True) #self.assertTrue(obt_snr == achi_list[0, 1]) #self.assertTrue(obt_ach == achi_list[0, 2]) # for i in range(1, len(achi_list)): # self.assertTrue(achi_list[i,2] > 2.e3) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestAutochisquare)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
6,519
32.265306
105
py
pycbc
pycbc-master/test/test_resample.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.filter.matchedfilter module """ import unittest from pycbc.types import * from pycbc.filter import * from pycbc.scheme import * from utils import parse_args_all_schemes, simple_exit from numpy.random import uniform import scipy.signal from pycbc.filter.resample import lfilter _scheme, _context = parse_args_all_schemes("Resampling") class TestUtils(unittest.TestCase): def setUp(self,*args): self.scheme = _scheme self.context = _context self.delta_t = 1.0 / 4096 self.target_delta_t = 1.0 / 1024 self.a = TimeSeries([1,2,3,4], delta_t=self.delta_t, dtype=float32) self.b = TimeSeries([1,2,3,4], delta_t=self.delta_t, dtype=float64) self.c = TimeSeries([1,2,3,4], delta_t=self.delta_t, dtype=complex64) self.d = Array([1,2,3,4], dtype=float32) if _scheme == 'cpu': def test_resample_float32(self): ra = resample_to_delta_t(self.a, self.target_delta_t) self.assertAlmostEqual(ra[0], 0.00696246) ra = resample_to_delta_t(self.a, self.delta_t) self.assertAlmostEqual(ra[0], 1) def test_resample_float64(self): rb = resample_to_delta_t(self.b, self.target_delta_t) self.assertAlmostEqual(rb[0], 0.00696246) rb = resample_to_delta_t(self.b, self.delta_t) self.assertAlmostEqual(rb[0], 1) def test_resample_errors(self): self.assertRaises(TypeError, resample_to_delta_t, self.c, self.target_delta_t) self.assertRaises(TypeError, resample_to_delta_t, self.d, self.target_delta_t) if self.scheme != 'cpu': with self.context: self.assertRaises(TypeError, resample_to_delta_t, self.a, self.target_delta_t) def test_lfilter(self): "Check our hand written lfilter" for csize, vsize in [(1024, 4302300), (10, 1000)]: c = uniform(-10, 10, size=csize) ts = TimeSeries(uniform(-1, 1, size=vsize), delta_t=self.delta_t) ref = scipy.signal.lfilter(c, 1.0, ts) test = lfilter(c, ts) # These only agree where there is no fft wraparound # so excluded corrupted region from test ref = ref[len(c):] test = test[len(c):] maxreldiff = ((ref - test) / ref).max() self.assertTrue(isinstance(test, TimeSeries)) self.assertTrue(maxreldiff < 1e-7) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtils)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
3,646
37.797872
94
py
pycbc
pycbc-master/test/lalsim.py
# Copyright (C) 2013 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are simple unit tests for lalsimulation """ import unittest import copy import numpy import lal, lalsimulation from pycbc.filter import match, overlap, sigma, make_frequency_series from pycbc.waveform import td_approximants, fd_approximants, \ get_td_waveform, get_fd_waveform, TimeSeries import optparse from utils import simple_exit, _check_scheme_cpu parser = optparse.OptionParser() parser.add_option('--scheme','-s', action='callback', type = 'choice', choices = ('cpu','cuda'), default = 'cpu', dest = 'scheme', callback = _check_scheme_cpu, help = optparse.SUPPRESS_HELP) parser.add_option('--device-num','-d', action='store', type = 'int', dest = 'devicenum', default=0, help = optparse.SUPPRESS_HELP) parser.add_option('--show-plots', action='store_true', help = 'show the plots generated in this test suite') parser.add_option('--save-plots', action='store_true', help = 'save the plots generated in this test suite') parser.add_option('--approximant', type = 'choice', choices = td_approximants() + fd_approximants(), help = "Choices are %s" % str(td_approximants() + fd_approximants())) parser.add_option('--mass1', type = float, default=10, help = "[default: %default]") parser.add_option('--mass2', type = float, default=9, help = "[default: %default]") parser.add_option('--spin1x', type = float, default=0, help = "[default: %default]") parser.add_option('--spin1y', type = float, default=0, help = "[default: %default]") parser.add_option('--spin1z', type = float, default=0, help = "[default: %default]") parser.add_option('--spin2x', type = float, default=0, help = "[default: %default]") parser.add_option('--spin2y', type = float, default=0, help = "[default: %default]") parser.add_option('--spin2z', type = float, default=0, help = "[default: %default]") parser.add_option('--lambda1', type = float, default=0, help = "[default: %default]") parser.add_option('--lambda2', type = float, default=0, help = "[default: %default]") parser.add_option('--coa-phase', type = float, default=0, help = "[default: %default]") parser.add_option('--inclination', type = float, default=0, help = "[default: %default]") parser.add_option('--delta-t', type = float, default=1.0/8192, help = "[default: %default]") parser.add_option('--delta-f', type = float, default=1.0/256, help = "[default: %default]") parser.add_option('--f-lower', type = float, default=30, help = "[default: %default]") parser.add_option('--phase-order', type = int, default=-1, help = "[default: %default]") parser.add_option('--amplitude-order', type = int, default=-1, help = "[default: %default]") parser.add_option('--spin-order', type = int, default=-1, help = "[default: %default]") parser.add_option('--tidal-order', type = int, default=-1, help = "[default: %default]") (opt, args) = parser.parse_args() print(72*'=') print("Running {0} unit tests for {1}:".format('CPU', "Lalsimulation Waveforms")) import matplotlib if not opt.show_plots: matplotlib.use('Agg') import pylab def get_waveform(p, **kwds): """ Given the input parameters get me the waveform, whether it is TD or FD """ params = copy.copy(p.__dict__) params.update(kwds) if params['approximant'] in td_approximants(): return get_td_waveform(**params) else: return get_fd_waveform(**params) class TestLALSimulation(unittest.TestCase): def setUp(self,*args): self.save_plots = opt.save_plots self.show_plots = opt.show_plots self.plot_dir = "." class params(object): pass self.p = params() # Overide my parameters with the program input arguments self.p.__dict__.update(vars(opt)) if 'approximant' in self.kwds: self.p.approximant = self.kwds['approximant'] from pycbc import version self.version_txt = "pycbc: %s %s\n" % (version.git_hash, version.date) + \ "lalsimulation: %s %s" % (lalsimulation.SimulationVCSIdentInfo.vcsId, lalsimulation.SimulationVCSIdentInfo.vcsDate) def test_varying_orbital_phase(self): #"""Check that the waveform is consistent under phase changes #""" if self.p.approximant in td_approximants(): sample_attr = 'sample_times' else: sample_attr = 'sample_frequencies' f = pylab.figure() pylab.axes([.1, .2, 0.8, 0.70]) hp_ref, hc_ref = get_waveform(self.p, coa_phase=0) pylab.plot(getattr(hp_ref, sample_attr), hp_ref.real(), label="phiref") hp, hc = get_waveform(self.p, coa_phase=lal.PI/4) m, i = match(hp_ref, hp) self.assertAlmostEqual(1, m, places=2) o = overlap(hp_ref, hp) pylab.plot(getattr(hp, sample_attr), hp.real(), label="$phiref \pi/4$") hp, hc = get_waveform(self.p, coa_phase=lal.PI/2) m, i = match(hp_ref, hp) o = overlap(hp_ref, hp) self.assertAlmostEqual(1, m, places=7) self.assertAlmostEqual(-1, o, places=7) pylab.plot(getattr(hp, sample_attr), hp.real(), label="$phiref \pi/2$") hp, hc = get_waveform(self.p, coa_phase=lal.PI) m, i = match(hp_ref, hp) o = overlap(hp_ref, hp) self.assertAlmostEqual(1, m, places=7) self.assertAlmostEqual(1, o, places=7) pylab.plot(getattr(hp, sample_attr), hp.real(), label="$phiref \pi$") pylab.xlim(min(getattr(hp, sample_attr)), max(getattr(hp, sample_attr))) pylab.title("Vary %s oribital phiref, h+" % self.p.approximant) if self.p.approximant in td_approximants(): pylab.xlabel("Time to coalescence (s)") else: pylab.xlabel("GW Frequency (Hz)") pylab.ylabel("GW Strain (real part)") pylab.legend(loc="upper left") info = self.version_txt pylab.figtext(0.05, 0.05, info) if self.save_plots: pname = self.plot_dir + "/%s-vary-phase.png" % self.p.approximant pylab.savefig(pname) if self.show_plots: pylab.show() else: pylab.close(f) def test_distance_scaling(self): #""" Check that the waveform is consistent under distance changes #""" distance = 1e6 tolerance = 1e-5 fac = 10 hpc, hcc = get_waveform(self.p, distance=distance) hpm, hcm = get_waveform(self.p, distance=distance*fac) hpf, hcf = get_waveform(self.p, distance=distance*fac*fac) hpn, hcn = get_waveform(self.p, distance=distance/fac) f = pylab.figure() pylab.axes([.1, .2, 0.8, 0.70]) htilde = make_frequency_series(hpc) pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D") htilde = make_frequency_series(hpm) pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D * %s" %fac) htilde = make_frequency_series(hpf) pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D * %s" %(fac*fac)) htilde = make_frequency_series(hpn) pylab.loglog(htilde.sample_frequencies, abs(htilde), label="D / %s" %fac) pylab.title("Vary %s distance, $\\tilde{h}$+" % self.p.approximant) pylab.xlabel("GW Frequency (Hz)") pylab.ylabel("GW Strain") pylab.legend() pylab.xlim(xmin=self.p.f_lower) info = self.version_txt pylab.figtext(0.05, .05, info) if self.save_plots: pname = self.plot_dir + "/%s-distance-scaling.png" % self.p.approximant pylab.savefig(pname) if self.show_plots: pylab.show() else: pylab.close(f) self.assertTrue(hpc.almost_equal_elem(hpm * fac, tolerance, relative=True)) self.assertTrue(hpc.almost_equal_elem(hpf * fac * fac, tolerance, relative=True)) self.assertTrue(hpc.almost_equal_elem(hpn / fac, tolerance, relative=True)) def test_nearby_waveform_agreement(self): #""" Check that the overlaps are consistent for nearby waveforms #""" def nearby(params): tol = 1e-7 from numpy.random import uniform nearby_params = copy.copy(params) nearby_params.mass1 *= uniform(low=1-tol, high=1+tol) nearby_params.mass2 *= uniform(low=1-tol, high=1+tol) nearby_params.spin1x *= uniform(low=1-tol, high=1+tol) nearby_params.spin1y *= uniform(low=1-tol, high=1+tol) nearby_params.spin1z *= uniform(low=1-tol, high=1+tol) nearby_params.spin2x *= uniform(low=1-tol, high=1+tol) nearby_params.spin2y *= uniform(low=1-tol, high=1+tol) nearby_params.spin2z *= uniform(low=1-tol, high=1+tol) nearby_params.inclination *= uniform(low=1-tol, high=1+tol) nearby_params.coa_phase *= uniform(low=1-tol, high=1+tol) return nearby_params hp, hc = get_waveform(self.p) for i in range(10): p_near = nearby(self.p) hpn, hcn = get_waveform(p_near) maxlen = max(len(hpn), len(hp)) hp.resize(maxlen) hpn.resize(maxlen) o = overlap(hp, hpn) self.assertAlmostEqual(1, o, places=5) def test_almost_equal_mass_waveform(self): #""" Check that the overlaps are consistent for nearby waveforms #""" def nearby(params): tol = 1e-7 from numpy.random import uniform nearby_params = copy.copy(params) nearby_params.mass2 = nearby_params.mass1 * \ uniform(low=1-tol, high=1+tol) nearby_params.mass1 *= uniform(low=1-tol, high=1+tol) nearby_params.spin1x *= uniform(low=1-tol, high=1+tol) nearby_params.spin1y *= uniform(low=1-tol, high=1+tol) nearby_params.spin1z *= uniform(low=1-tol, high=1+tol) nearby_params.spin2x *= uniform(low=1-tol, high=1+tol) nearby_params.spin2y *= uniform(low=1-tol, high=1+tol) nearby_params.spin2z *= uniform(low=1-tol, high=1+tol) nearby_params.inclination *= uniform(low=1-tol, high=1+tol) nearby_params.coa_phase *= uniform(low=1-tol, high=1+tol) return nearby_params for i in range(10): p_near = nearby(self.p) hpn, hcn = get_waveform(p_near) def test_varying_inclination(self): #""" Test that the waveform is consistent for changes in inclination #""" sigmas = [] incs = numpy.arange(0, 21, 1.0) * lal.PI / 10.0 for inc in incs: # WARNING: This does not properly handle the case of SpinTaylor* # where the spin orientation is not relative to the inclination hp, hc = get_waveform(self.p, inclination=inc) s = sigma(hp, low_frequency_cutoff=self.p.f_lower) sigmas.append(s) f = pylab.figure() pylab.axes([.1, .2, 0.8, 0.70]) pylab.plot(incs, sigmas) pylab.title("Vary %s inclination, $\\tilde{h}$+" % self.p.approximant) pylab.xlabel("Inclination (radians)") pylab.ylabel("sigma (flat PSD)") info = self.version_txt pylab.figtext(0.05, 0.05, info) if self.save_plots: pname = self.plot_dir + "/%s-vary-inclination.png" % self.p.approximant pylab.savefig(pname) if self.show_plots: pylab.show() else: pylab.close(f) self.assertAlmostEqual(sigmas[-1], sigmas[0], places=7) self.assertAlmostEqual(max(sigmas), sigmas[0], places=7) self.assertTrue(sigmas[0] > sigmas[5]) def test_swapping_constituents(self): #""" Test that waveform remains unchanged under swapping both objects #""" hp, hc = get_waveform(self.p) hpswap, hcswap = get_waveform(self.p, mass1=self.p.mass2, mass2=self.p.mass1, spin1x=self.p.spin2x, spin1y=self.p.spin2y, spin1z=self.p.spin2z, spin2x=self.p.spin1x, spin2y=self.p.spin1y, spin2z=self.p.spin1z, lambda1=self.p.lambda2, lambda2=self.p.lambda1) op = overlap(hp, hpswap) self.assertAlmostEqual(1, op, places=7) oc = overlap(hc, hcswap) self.assertAlmostEqual(1, oc, places=7) def test_change_rate(self): #""" Test that waveform remains unchanged under changing rate #""" hp, hc = get_waveform(self.p) hp2dec, hc2dec = get_waveform(self.p, delta_t=self.p.delta_t*2.) hpdec=numpy.zeros(len(hp2dec.data)) hcdec=numpy.zeros(len(hp2dec.data)) for idx in range(min(len(hp2dec.data),int(len(hp.data)/2))): hpdec[idx]=hp.data[2*idx] hcdec[idx]=hc.data[2*idx] hpTS=TimeSeries(hpdec, delta_t=self.p.delta_t*2.,epoch=hp.start_time) hcTS=TimeSeries(hcdec, delta_t=self.p.delta_t*2.,epoch=hc.start_time) f = pylab.figure() pylab.plot(hp.sample_times, hp.data,label="rate %s Hz" %"{:.0f}".format(1./self.p.delta_t)) pylab.plot(hp2dec.sample_times, hp2dec.data, label="rate %s Hz" %"{:.0f}".format(1./(self.p.delta_t*2.))) pylab.title("Halving %s rate, $\\tilde{h}$+" % self.p.approximant) pylab.xlabel("time (sec)") pylab.ylabel("amplitude") pylab.legend() info = self.version_txt pylab.figtext(0.05, 0.05, info) if self.save_plots: pname = self.plot_dir + "/%s-vary-rate.png" % self.p.approximant pylab.savefig(pname) if self.show_plots: pylab.show() else: pylab.close(f) op=overlap(hpTS,hp2dec) self.assertAlmostEqual(1., op, places=2) oc=overlap(hcTS,hc2dec) self.assertAlmostEqual(1., oc, places=2) def test_maker(class_name, name, **kwds): class Test(class_name): def __init__(self, *args): self.kwds = kwds class_name.__init__(self, *args) Test.__name__ = "Test %s" % name return Test suite = unittest.TestSuite() if opt.approximant: apxs = [opt.approximant] else: apxs = td_approximants() + fd_approximants() # These waveforms fail the current sanity checks, and are not used in current # analyses. Tracking down reasons for each of these failures is a lot of work, # so for now I just exclude these from tests. fail_list = ['EOBNRv2', 'HGimri', 'SEOBNRv1', 'SpinDominatedWf', 'PhenSpinTaylor', 'PhenSpinTaylorRD', 'EccentricTD', 'EccentricFD', 'Lackey_Tidal_2013_SEOBNRv2_ROM'] for apx in apxs: # The inspiral wrapper is only single precision we won't bother checking # it here. It may need different tolerances and some special care. if apx.startswith("Inspiral-"): continue # The INTERP waveforms are designed only for filters if apx.endswith('_INTERP') and not opt.approximant: continue if apx in fail_list and not opt.approximant: # These waveforms segfault and prints debugging to screen # Only test this is specifically told to do so continue if apx in ['NR_hdf5']: # We'll need an example file for this. Also it will need a special # set of tests. continue vars()[apx] = test_maker(TestLALSimulation, apx, approximant=apx) suite.addTest( unittest.TestLoader().loadTestsFromTestCase(vars()[apx]) ) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
16,620
38.016432
143
py
pycbc
pycbc-master/test/test_injection.py
# Copyright (C) 2013 Tito Dal Canton, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ Unit test for PyCBC's injection module. """ import tempfile import lal from pycbc.types import TimeSeries from pycbc.detector import Detector, get_available_detectors from pycbc.inject import InjectionSet import unittest import numpy import itertools from ligo.lw import ligolw from ligo.lw import lsctables from ligo.lw import utils as ligolw_utils from utils import parse_args_cpu_only, simple_exit # Injection tests only need to happen on the CPU parse_args_cpu_only("Injections") class MyInjection(object): def fill_sim_inspiral_row(self, row): # using dummy values for many fields, should work for our purposes row.waveform = 'TaylorT4threePointFivePN' row.distance = self.distance total_mass = self.mass1 + self.mass2 row.mass1 = self.mass1 row.mass2 = self.mass2 row.eta = self.mass1 * self.mass2 / total_mass ** 2 row.mchirp = total_mass * row.eta ** (3. / 5.) row.latitude = self.latitude row.longitude = self.longitude row.inclination = self.inclination row.polarization = self.polarization row.phi0 = 0 row.f_lower = 20 row.f_final = lal.C_SI ** 3 / \ (6. ** (3. / 2.) * lal.PI * lal.G_SI * total_mass) row.spin1x = row.spin1y = row.spin1z = 0 row.spin2x = row.spin2y = row.spin2z = 0 row.alpha1 = 0 row.alpha2 = 0 row.alpha3 = 0 row.alpha4 = 0 row.alpha5 = 0 row.alpha6 = 0 row.alpha = 0 row.beta = 0 row.theta0 = 0 row.psi0 = 0 row.psi3 = 0 row.geocent_end_time = int(self.end_time) row.geocent_end_time_ns = int(1e9 * (self.end_time - row.geocent_end_time)) row.end_time_gmst = lal.GreenwichMeanSiderealTime( lal.LIGOTimeGPS(self.end_time)) for d in 'lhvgt': row.__setattr__('eff_dist_' + d, row.distance) row.__setattr__(d + '_end_time', row.geocent_end_time) row.__setattr__(d + '_end_time_ns', row.geocent_end_time_ns) row.amp_order = 0 row.coa_phase = 0 row.bandpass = 0 row.taper = self.taper row.numrel_mode_min = 0 row.numrel_mode_max = 0 row.numrel_data = None row.source = 'ANTANI' class TestInjection(unittest.TestCase): def setUp(self): available_detectors = get_available_detectors() self.assertTrue('H1' in available_detectors) self.assertTrue('L1' in available_detectors) self.assertTrue('V1' in available_detectors) self.detectors = [Detector(d) for d in ['H1', 'L1', 'V1']] self.sample_rate = 4096. self.earth_time = lal.REARTH_SI / lal.C_SI # create a few random injections self.injections = [] start_time = float(lal.GPSTimeNow()) taper_choices = ('TAPER_NONE', 'TAPER_START', 'TAPER_END', 'TAPER_STARTEND') for i, taper in zip(range(20), itertools.cycle(taper_choices)): inj = MyInjection() inj.end_time = start_time + 40000 * i + \ numpy.random.normal(scale=3600) random = numpy.random.uniform inj.mass1 = random(low=1., high=20.) inj.mass2 = random(low=1., high=20.) inj.distance = random(low=0.9, high=1.1) * 1e6 * lal.PC_SI inj.latitude = numpy.arccos(random(low=-1, high=1)) inj.longitude = random(low=0, high=2 * lal.PI) inj.inclination = numpy.arccos(random(low=-1, high=1)) inj.polarization = random(low=0, high=2 * lal.PI) inj.taper = taper self.injections.append(inj) # create LIGOLW document xmldoc = ligolw.Document() xmldoc.appendChild(ligolw.LIGO_LW()) # create sim inspiral table, link it to document and fill it sim_table = lsctables.New(lsctables.SimInspiralTable) xmldoc.childNodes[-1].appendChild(sim_table) for i in range(len(self.injections)): row = sim_table.RowType() self.injections[i].fill_sim_inspiral_row(row) row.process_id = 0 row.simulation_id = i sim_table.append(row) # write document to temp file self.inj_file = tempfile.NamedTemporaryFile(suffix='.xml') ligolw_utils.write_fileobj(xmldoc, self.inj_file) def test_injection_presence(self): """Verify presence of signals at expected times""" injections = InjectionSet(self.inj_file.name) for det in self.detectors: for inj in self.injections: ts = TimeSeries(numpy.zeros(int(10 * self.sample_rate)), delta_t=1/self.sample_rate, epoch=lal.LIGOTimeGPS(inj.end_time - 5), dtype=numpy.float64) injections.apply(ts, det.name) max_amp, max_loc = ts.abs_max_loc() # FIXME could test amplitude and time more precisely self.assertTrue(max_amp > 0 and max_amp < 1e-10) time_error = ts.sample_times.numpy()[max_loc] - inj.end_time self.assertTrue(abs(time_error) < 2 * self.earth_time) def test_injection_absence(self): """Verify absence of signals outside known injection times""" clear_times = [ self.injections[0].end_time - 86400, self.injections[-1].end_time + 86400 ] injections = InjectionSet(self.inj_file.name) for det in self.detectors: for epoch in clear_times: ts = TimeSeries(numpy.zeros(int(10 * self.sample_rate)), delta_t=1/self.sample_rate, epoch=lal.LIGOTimeGPS(epoch), dtype=numpy.float64) injections.apply(ts, det.name) max_amp, max_loc = ts.abs_max_loc() self.assertEqual(max_amp, 0) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestInjection)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
6,986
39.859649
84
py
pycbc
pycbc-master/test/test_array.py
# Copyright (C) 2012 Alex Nitz, Andrew Miller, Tito Dal Canton, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # ''' These are the unittests for the pycbc array type ''' import pycbc import unittest import itertools from pycbc.types import * from pycbc.scheme import * import numpy from utils import array_base, parse_args_all_schemes, simple_exit import sys import os import tempfile _scheme, _context = parse_args_all_schemes("Array") # By importing the current schemes array type, it will make it # easier to check the array types later if _scheme == 'cuda': import pycuda import pycuda.gpuarray from pycuda.gpuarray import GPUArray as SchemeArray elif _scheme == 'cpu': from numpy import ndarray as SchemeArray from numpy import ndarray as CPUArray # ********************GENERIC ARRAY TESTS *********************** class ArrayTestBase(array_base,unittest.TestCase): __test__ = False def setUp(self): self.scheme = _scheme self.context = _context # We need to check for correct creation from all dtypes, # and errors from incorrect operations so the other precision of # odtype needs to be available as well self.other_precision = {numpy.complex64 : numpy.complex128, numpy.complex128 : numpy.complex64, numpy.float32 : numpy.float64, numpy.float64 : numpy.float32} # Number of decimal places to compare for single precision if self.dtype == numpy.float32 or self.dtype == numpy.complex64: self.places = 5 self.tol = 1e-5 # Number of decimal places to compare for double precision else: self.places = 13 self.tol = 1e-13 # We will also need to check whether dtype and odtype are real or complex, # so that we can test non-zero imaginary parts. if self.dtype == numpy.float32 or self.dtype == numpy.float64: self.kind = 'real' else: self.kind = 'complex' if self.odtype == numpy.float32 or self.odtype == numpy.float64: self.okind = 'real' else: self.okind = 'complex' # We need to tell the arithmetic test functions what our type is: self.type = Array # and for Array, there are no additional keyword arguments needed: self.kwds = {} # Now that the kinds are set, we need to call our parent method to set up all the # inputs and answers for our functions self.setNumbers() # The above call created instances for all of our inputs and various correct # outputs. But we make a copy of the scalar to check later. self.s = self.scalar # Finally, we want to have an array that we shouldn't be able to operate on, # first because the precision is wrong, and seconds because the length is wrong. self.bad = Array([1,1,1],dtype = self.other_precision[self.odtype]) self.bad2 = Array([1,1,1,1], dtype = self.dtype) def test_set(self): c = self.a * 1 with self.context: # First we will check that get works properly for all # the different python syntaxes self.assertTrue(self.a[:][0] == self.alist[0:3][0]) self.assertTrue(self.a[:][1] == self.alist[0:3][1]) self.assertTrue(self.a[:][2] == self.alist[0:3][2]) self.assertRaises(IndexError,self.a[:].__getitem__,3) self.assertTrue(self.a[-1] ==self.alist[2]) self.assertTrue(self.a[-2] == self.alist[1]) self.assertTrue(self.a[1:2][0] == self.alist[1]) self.assertRaises(IndexError,self.a[1:2].__getitem__,1) self.assertTrue(self.a[:-1][0] == self.alist[0:2][0]) self.assertTrue(self.a[:-1][1] == self.alist[0:2][1]) self.assertTrue(self.a[-1:][0] == self.alist[2]) self.assertRaises(IndexError, self.a.__getitem__, 3) self.assertRaises(IndexError, self.a.__getitem__, -4) def test_numpy_init(self): with self.context: in1 = numpy.array([5,3,1],dtype=self.odtype) in2 = numpy.array([5,3,1],dtype=self.other_precision[self.odtype]) #We don't want to cast complex as real if not (self.kind == 'real' and self.okind == 'complex'): #First we must check that the dtype is correct when specified out1 = Array(in1, dtype=self.dtype) out2 = Array(in2, dtype=self.dtype) #to be sure that it is copied in1 += 1 in2 += 1 self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) in1-=1 in2-=1 # Also, when it is unspecified out3 = Array(in1) in1 += 1 self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==self.odtype) # Check for copy=false # On the CPU, this should be possible in3 = numpy.array([5,3,1],dtype=self.dtype) if self.scheme == 'cpu': out4 = Array(in3,copy=False) in3 += 1 self.assertTrue(out4.dtype==self.dtype) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertEqual(out4[0],6) self.assertEqual(out4[1],4) self.assertEqual(out4[2],2) # If we're in different scheme, this should raise an error else: self.assertRaises(TypeError, Array, in3, copy=False) # We also need to check initialization using GPU arrays if self.scheme == 'cuda': in4 = pycuda.gpuarray.zeros(3,self.dtype) if self.scheme != 'cpu': out4 = Array(in4, copy=False) in4 += 1 self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],1) self.assertEqual(out4[1],1) self.assertEqual(out4[2],1) self.assertTrue(out4.dtype==self.dtype) # We should be able to create an array from the wrong dtype, and # it should be cast as float64 in5 = numpy.array([1,2,3],dtype=numpy.int32) out5 = Array(in5) in5 += 1 self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],1) self.assertEqual(out5[1],2) self.assertEqual(out5[2],3) # Just checking that we can make an empty array correctly empty = numpy.array([]) out6 = Array(empty) self.assertTrue(out6.dtype==numpy.float64) self.assertRaises(IndexError, out6.__getitem__,0) def test_array_init(self): # this array is made outside the context so we can check that an error is raised when copy = false in a GPU scheme cpuarray = Array([1,2,3]) with self.context: in1 = Array([5,3,1],dtype=self.odtype) in2 = Array([5,3,1],dtype=self.other_precision[self.odtype]) self.assertTrue(type(in1._scheme) == type(self.context)) self.assertTrue(type(in1._data) is SchemeArray) self.assertTrue(type(in2._scheme) == type(self.context)) self.assertTrue(type(in2._data) is SchemeArray) # We don't want to cast complex as real if not (self.kind=='real' and self.okind == 'complex'): # First we must check that the dtype is correct when specified out1 = Array(in1, dtype=self.dtype) out2 = Array(in2, dtype=self.dtype) # to be sure that it is copied in1 += 1 in2 += 1 self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) if out1.dtype == numpy.float32: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.float64: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.complex64: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'complex') if out1.dtype == numpy.complex128: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'complex') self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) in1-=1 in2-=1 # Giving complex input and specifying a real dtype should raise an error else: self.assertRaises(TypeError, Array, in1, dtype = self.dtype) self.assertRaises(TypeError, Array, in2, dtype = self.dtype) # Also, when it is unspecified out3 = Array(in1) in1 += 1 self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==self.odtype) # We should also be able to create from a CPU Array out4 = Array(cpuarray, dtype=self.dtype) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],1) self.assertEqual(out4[1],2) self.assertEqual(out4[2],3) self.assertTrue(out4.dtype==self.dtype) # Check for copy=false in3 = Array([5,3,1],dtype=self.dtype) out5 = Array(in3,copy=False) in3 += 1 self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],6) self.assertEqual(out5[1],4) self.assertEqual(out5[2],2) self.assertTrue(out5.dtype==self.dtype) if self.scheme != 'cpu': self.assertRaises(TypeError,Array,cpuarray,copy=False) # Just checking that we can make an empty array correctly empty = Array(numpy.array([])) out7 = Array(empty) self.assertTrue(out7.dtype==numpy.float64) self.assertRaises(IndexError, out7.__getitem__,0) # Also checking that a cpu array can't be made out of another scheme without copying if self.scheme != 'cpu': self.assertRaises(TypeError, Array, out4, copy=False) out6 = Array(out4, dtype=self.dtype) self.assertTrue(type(out6._scheme) == CPUScheme) self.assertTrue(type(out6._data) is CPUArray) self.assertEqual(out6[0],1) self.assertEqual(out6[1],2) self.assertEqual(out6[2],3) self.assertTrue(out6.dtype==self.dtype) def test_take(self): with self.context: if self.kind == 'real': a = Array([1,2,3,4,5,6], dtype=self.dtype) if self.kind == 'complex': a = Array([1+2j, 2+3j, 3+4j, 4+5j, 5+6j, 6+7j], dtype=self.dtype) i = numpy.array([0,4,2], dtype=numpy.int64) b = a.take(i) self.assertEqual(b[0], a[0]) self.assertEqual(b[1], a[4]) self.assertEqual(b[2], a[2]) def test_abs_max_loc(self): with self.context: if self.kind == 'real': a = Array([-1,2,3,4,5,-6], dtype=self.dtype) v = abs(-6) if self.kind == 'complex': a = Array([1+2j, 2+3j, -3+4j, 4+5j, 5+6j, -6+7j], dtype=self.dtype) v = abs(-6+7j) m, l = a.abs_max_loc() self.assertAlmostEqual(m, v, places=5) self.assertEqual(l, 5) def test_clear(self): with self.context: if self.kind == 'real': a = Array([1,2,3,4,5,6], dtype=self.dtype) if self.kind == 'complex': a = Array([1+2j, 2+3j, 3+4j, 4+5j, 5+6j, 6+7j], dtype=self.dtype) a.clear() for i in range(len(a)): self.assertEqual(a[i], 0) def test_max_loc(self): with self.context: if self.kind == 'real': a = Array([1,2,3,4,5,6], dtype=self.dtype) m, l = a.max_loc() self.assertEqual(m, 6) self.assertEqual(l, 5) def test_list_init(self): with self.context: # When specified out1 = Array([5,3,1], dtype=self.dtype) self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) if out1.dtype == numpy.float32: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.float64: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.complex64: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'complex') if out1.dtype == numpy.complex128: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'complex') if self.kind == 'complex': out2 = Array([5+0j,3+0j,1+0j], dtype=self.dtype) self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) else: self.assertRaises(TypeError, Array,[5+0j, 3+0j, 1+0j],dtype=self.dtype) #Also, when it is unspecified out3 = Array([5.0,3,1]) self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==numpy.float64) out4 = Array([5+0j,3+0j,1+0j]) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],5) self.assertEqual(out4[1],3) self.assertEqual(out4[2],1) self.assertTrue(out4.dtype==numpy.complex128) # Just checking that we can make an empty array correctly out7 = Array([]) self.assertTrue(out7.dtype==numpy.float64) self.assertRaises(IndexError, out7.__getitem__,0) #We also need to check the zero function out5 = zeros(3,dtype=self.dtype) out6 = zeros(3) self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],0) self.assertEqual(out5[1],0) self.assertEqual(out5[2],0) self.assertTrue(out5.dtype == self.dtype) self.assertTrue(type(out6._scheme) == type(self.context)) self.assertTrue(type(out6._data) is SchemeArray) self.assertEqual(out6[0],0) self.assertEqual(out6[1],0) self.assertEqual(out6[2],0) self.assertTrue(out6.dtype == numpy.float64) self.assertRaises(TypeError,Array,[1,2,3],copy=False) def test_save(self): with self.context: # make temporary file paths temp_file = tempfile.NamedTemporaryFile() temp_path_npy = temp_file.name + '.npy' temp_path_txt = temp_file.name + '.txt' # make a test array a_numpy = numpy.arange(100, dtype=self.dtype) a = Array(a_numpy) # test saving to Numpy array a.save(temp_path_npy) b = numpy.load(temp_path_npy) self.assertEqual(b.shape, a_numpy.shape) self.assertEqual(numpy.abs(b - a_numpy).max(), 0) os.remove(temp_path_npy) # test saving to text file a.save(temp_path_txt) b = numpy.loadtxt(temp_path_txt) if a.kind == 'complex': self.assertEqual(b.shape, (a_numpy.shape[0], 2)) b = b[:,0] + 1j * b[:,1] elif a.kind == 'real': self.assertEqual(b.shape, a_numpy.shape) self.assertEqual(numpy.abs(b - a_numpy).max(), 0) os.remove(temp_path_txt) def test_multiply_and_add(self): with self.context: if not self.kind == self.okind: # Currently this is not supported return if self.kind == 'complex': inp = Array([5+2j,3+1j,1+2j], dtype=self.dtype) out = Array([5+2j,3+1j,1+2j], dtype=self.odtype) mult_fac = 12+17j else: inp = Array([5,3,1], dtype=self.dtype) out = Array([5,3,1], dtype=self.odtype) mult_fac=12 out_check = mult_fac * inp + out out.multiply_and_add(inp, mult_fac) self.assertEqual(out[0], out_check[0]) self.assertEqual(out[1], out_check[1]) self.assertEqual(out[2], out_check[2]) def array_test_maker(dtype,odtype): class tests(ArrayTestBase): __test__ = True def __init__(self,*args): self.dtype = dtype self.odtype = odtype unittest.TestCase.__init__(self,*args) tests.__name__ = _scheme + " " + dtype.__name__ + " with " + odtype.__name__ return tests types = [ (float32,[float32,complex64]), (float64, [float64,complex128]), (complex64,[complex64,float32]), (complex128, [float64,complex128]) ] suite = unittest.TestSuite() ind = 0 for ty, oktype in types: for ot in oktype: na = 'test' + str(ind) vars()[na] = array_test_maker(ty, ot) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(vars()[na])) ind += 1 if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
21,211
39.636015
122
py
pycbc
pycbc-master/test/test_cuts.py
""" Unit tests for cuts being applied to trigger and templates """ import unittest import argparse import copy import numpy as np from utils import simple_exit from pycbc.events import cuts, ranking from pycbc.tmpltbank import bank_conversions def parse_args(args): # Helper function to convert a list of flags/options into # an arguments structure parser = argparse.ArgumentParser() cuts.insert_cuts_option_group(parser) return parser.parse_args(args) class CutsErrorsTest(unittest.TestCase): def setUp(self): # Set up some things we will want to use in the tests: self.maxDiff = None # Tuples of inputs for tests which should fail test_parser_raises_error = [] test_parser_raises_error.append((['--template-cuts', 'this_is_not_right'], ValueError, 'incorrect_format_template_cut')) test_parser_raises_error.append((['--trigger-cuts', 'this_is_not_right'], ValueError, 'incorrect_format_trigger_cut')) test_parser_raises_error.append((['--trigger-cuts', 'chi_eff:0.9:upper'], NotImplementedError, 'template_param_given_as_trigger_cut')) test_parser_raises_error.append((['--template-cuts', 'newsnr:5.5:lower'], NotImplementedError, 'trigger_param_given_as_template_cut')) test_parser_raises_error.append((['--trigger-cuts', 'newsnr:5.5:nonsense'], NotImplementedError, 'nonsense_limit')) test_parser_raises_error.append((['--trigger-cuts', 'newsnr:notafloat:upper'], ValueError, 'threshold_not_a_float')) # Dynamically add error tests into the class for test_parser_error in test_parser_raises_error: args = parse_args(test_parser_error[0]) def check_sysexit_test(self, a=args, te=test_parser_error[1]): with self.assertRaises(te): cuts.ingest_cuts_option_group(a) setattr(CutsErrorsTest, 'test_parser_error_' + test_parser_error[2], check_sysexit_test) class CutsParserTest(unittest.TestCase): def setUp(self): # Set up some things we will want to use in the tests: self.maxDiff = None # Tuples of inputs for tests where we are checking the output tests_parser_output = [] # No cuts: empty dicts as output tests_parser_output.append(([], ({}, {}), "no_cuts_given")) # Multiple cuts on the same parameter / cut type, # should only use the strictest cut tests_parser_output.append((['--trigger-cuts', 'snr:4:lower', 'snr:5:lower'], ({('snr', np.greater): 5}, {}), "multiple_similar_cuts")) tests_parser_output.append((['--trigger-cuts', 'snr:5:lower', 'snr:4:lower'], ({('snr', np.greater): 5}, {}), "multiple_similar_cuts_2")) # Multiple cuts exactly the same, should give the warning but still complete tests_parser_output.append((['--trigger-cuts', 'snr:5:lower', 'snr:5:lower'], ({('snr', np.greater): 5}, {}), "multiple_same_cuts")) # Dynamically add value tests for the parser for test_values in tests_parser_output: args = parse_args(test_values[0]) def ingest_values_test(self, a=args, tv=test_values[1]): cuts_dicts = cuts.ingest_cuts_option_group(a) self.assertEqual(cuts_dicts, tv) setattr(CutsParserTest, 'test_parser_values_' + test_values[2], ingest_values_test) # Set up some random datasets to test the cuts: # Make this reproducible np.random.seed(1865) # Set values for triggers: n_triggers = 10000 trigger_dset = {} trigger_dset['snr'] = np.random.random(n_triggers) * 10 trigger_dset['chisq'] = np.random.random(n_triggers) * 100 trigger_dset['chisq_dof'] = np.ceil(np.random.random(n_triggers) * 100) + 2 trigger_dset['sg_chisq'] = np.random.random(n_triggers) * 2 # Set values for templates: template_dset = {} n_templates = 10000 # For one of the tests, we want to use an exact value of mass1, # so set it manually mass1 = list(np.random.random(n_templates - 1) * 100) + [59] mass2 = list(np.random.random(n_templates - 1) * 100) + [10] # Assert mass1 > mass2 template_dset['mass1'] = np.maximum(mass1, mass2) template_dset['mass2'] = np.minimum(mass1, mass2) # Spins must be in the range -1, 1, (but not exactly 1): def make_random_spins(n_templates): spins = np.random.normal(size=n_templates, scale=0.4) spins = np.maximum(spins, -0.998) spins = np.minimum(spins, 0.998) return spins template_dset['spin1z'] = make_random_spins(n_templates) template_dset['spin2z'] = make_random_spins(n_templates) template_dset['template_duration'] = np.random.random(n_templates) * 100 class CutsTest(unittest.TestCase): def setUp(self): # Set up some things we will want to use in the tests: self.maxDiff = None # Set up values to be tested: test_cut_output = [] # Lower limits on a trigger parameter we can read directly def snr_lower_test(trigs, trig_idx, **kwargs): return all(trigs['snr'][trig_idx] > 4) test_cut_output.append((['--trigger-cuts', 'snr:4:lower'], snr_lower_test, 'snr_cut_lower')) # Upper limits on a derived trigger parameter def newsnr_sgveto_upper_test(trigs, trig_idx, **kwargs): rwsnr = ranking.get_newsnr_sgveto(trigs) return all(rwsnr[trig_idx] < 10) test_cut_output.append((['--trigger-cuts', 'newsnr_sgveto:10:upper'], newsnr_sgveto_upper_test, 'nsnr_sgveto_cut_upper')) # Lower limit on directly-read template bank parameter def template_duration_test(temps, temp_idx, **kwargs): return all(temps['template_duration'][temp_idx] > 10) test_cut_output.append((['--template-cuts', 'template_duration:10:lower'], template_duration_test, 'template_duration_lower')) # make sure the "or equal to" is working properly def mass1_ge_test(temps, temp_idx, **kwargs): return any(temps['mass1'][temp_idx] == 59) test_cut_output.append((['--template-cuts', 'mass1:59:lower_inc'], mass1_ge_test, 'mass1_orequal')) # Upper and lower limits on a derived parameter from the bank def chi_eff_upper_lower_test(temps, temp_idx, **kwargs): chi_eff = bank_conversions.get_bank_property('chi_eff', temps, temp_idx) return all(np.logical_and(chi_eff > -0.5, chi_eff <= 0.8)) test_cut_output.append((['--template-cuts', 'chi_eff:0.8:upper', 'chi_eff:-0.5:lower_inc'], chi_eff_upper_lower_test, 'chi_eff_cut_upper_lower')) # FIXME: Once we can fake the statistic files, create a test # for template fits cuts # Dynamically add value tests for the parser for test_values in test_cut_output: args = parse_args(test_values[0]) def cut_values_test(self, a=args, test_func=test_values[1]): # Copy the global variables, as we need to use local in the tests triggers = copy.deepcopy(trigger_dset) bank = copy.deepcopy(template_dset) # Take in the arguments which define the cuts trigger_cut_dict, template_cut_dict = cuts.ingest_cuts_option_group(a) # Get out the indices which meet the cut criteria templates_idx = cuts.apply_template_cuts(bank, template_cut_dict) triggers_idx = cuts.apply_trigger_cuts(triggers, trigger_cut_dict) # Using the checks from the definition tuple, is the cut working? self.assertTrue(test_func(temps=bank, temp_idx=templates_idx, trigs=trigger_dset, trig_idx=triggers_idx)) setattr(CutsTest, 'test_cuts_correct_' + test_values[2], cut_values_test) # create and populate unittest's test suite suite = unittest.TestSuite() test_loader = unittest.TestLoader() suite.addTest(test_loader.loadTestsFromTestCase(CutsErrorsTest)) suite.addTest(test_loader.loadTestsFromTestCase(CutsParserTest)) suite.addTest(test_loader.loadTestsFromTestCase(CutsTest)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
8,904
34.907258
78
py
pycbc
pycbc-master/test/test_schemes.py
# Copyright (C) 2012 Alex Nitz, Andrew Miller, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # ''' The tests in this file are designed to ensure that the schemes behave as designed, and that data are moved to and from the GPU as they should, or apprpriate exceptions are raised. We only attempt this for two representative functions: one basic arithemtic operation, and one that should *not* move its data (regardless of scheme). We do not specifically test that the lalwrapped functions raise exceptions from the GPU, because that test is done in the test_lalwrap unit tests. ''' import pycbc import unittest from pycbc.types import * from pycbc.scheme import * import numpy from numpy import dtype, float32, float64, complex64, complex128 import lal from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("Scheme") # By importing the current schemes array type, it will make it # easier to check the array types later if isinstance(_context,CUDAScheme): import pycuda import pycuda.gpuarray from pycuda.gpuarray import GPUArray as SchemeArray elif isinstance(_context,CPUScheme): from numpy import ndarray as SchemeArray from numpy import ndarray as CPUArray class SchemeTestBase(unittest.TestCase): __test__ = False def setUp(self): self.context = _context self.scheme = _scheme # Determine kind (real or complex) from dtype: if self.dtype == float32 or self.dtype == float64: self.kind = 'real' else: self.kind = 'complex' if self.odtype == float32 or self.odtype == float64: self.okind = 'real' else: self.okind = 'complex' # Now set up the arrays we'll need. We run this from a factory # constructor that creates many different instances for the # various kind/precision combinations. if self.kind == 'real': self.a = Array([5,3,1],dtype=self.dtype) if self.okind == 'real': self.b = Array([10,8,6],dtype=self.odtype) self.answer = Array([50,24,6],dtype=self.dtype) else: self.b = Array([10+6j,8+4j,6+2j],dtype=self.odtype) self.answer = Array([50+30j,24+12j,6+2j],dtype=self.odtype) else: self.a = Array([5+1j,3+3j,1+5j],dtype=self.dtype) if self.okind == 'real': self.b = Array([10,8,6],dtype=self.odtype) self.answer = Array([50+10j,24+24j,6+30j],dtype=self.dtype) else: self.b = Array([10+6j,8+4j,6+2j],dtype=self.odtype) self.answer = Array([44+40j,12+36j,-4+32j],dtype=self.dtype) def test_move(self): ''' This test uses the __mul__ special method to verify that arrays are moved on and off of the GPU automatically when they should be, and that the _scheme property and array types are correct for the executing architecture. ''' # Make some copies a1 = type(self.a)(self.a) a2 = type(self.a)(self.a) b1 = type(self.b)(self.b) with self.context: # The following should move both of a1 and b1 onto the GPU (if self.context # isn't CPU) c = a1 * b1 # Check that the data types are correct self.assertTrue(isinstance(a1._data, SchemeArray)) self.assertTrue(isinstance(b1._data, SchemeArray)) self.assertTrue(isinstance(c._data, SchemeArray)) # Check that schemes are correct self.assertTrue(isinstance(a1._scheme, type(self.context))) self.assertTrue(isinstance(b1._scheme, type(self.context))) self.assertTrue(isinstance(c._scheme, type(self.context))) # And finally check that the values are correct self.assertEqual(a1,self.a) self.assertEqual(b1,self.b) self.assertEqual(c,self.answer) # Now check that nothing about a2 has changed, since it wasn't involved # in the computation self.assertTrue(isinstance(a2._data, CPUArray)) self.assertTrue(isinstance(a2._scheme, DefaultScheme)) self.assertEqual(a2,self.a) # Now move back to the CPU, and check that everything is correctly # transferred: c = a1 * b1 # Check that schemes are correct self.assertTrue(isinstance(a1._scheme, DefaultScheme)) self.assertTrue(isinstance(b1._scheme, DefaultScheme)) self.assertTrue(isinstance(c._scheme, DefaultScheme)) # Check that the data types are correct self.assertTrue(isinstance(a1._data, CPUArray)) self.assertTrue(isinstance(b1._data, CPUArray)) self.assertTrue(isinstance(c.data, CPUArray)) # And finally check that the values are correct self.assertEqual(a1,self.a) self.assertEqual(b1,self.b) self.assertEqual(c,self.answer) def test_do_not_move(self): ''' This test checks that the __eq__ special method (invoked via the '==' operator) does *not* change the scheme or type, since it does its comparisons by copying from the CPU to GPU, but should leave the original arrays in place, with their data properties and schemes unchanged. ''' acopy = type(self.a)(self.a) with self.context: # Force a move to the GPU by trivially multiplying by one: a1 = acopy*1 a2 = acopy*1 truth = (a1 == a2) # Now verify that nothing moved self.assertTrue(isinstance(a1._scheme, type(self.context))) self.assertTrue(isinstance(a2._scheme, type(self.context))) self.assertTrue(isinstance(a1.data, SchemeArray)) self.assertTrue(isinstance(a2.data, SchemeArray)) # Now the function that creates our various classes def scheme_test_maker(dtype,odtype): class tests(SchemeTestBase): __test__ = True def __init__(self,*args): self.dtype = dtype self.odtype = odtype unittest.TestCase.__init__(self,*args) tests.__name__ = _scheme + " " + dtype.__name__ + " with " + odtype.__name__ return tests types = [ (float32,[float32,complex64]), (float64,[float64,complex128]), (complex64,[complex64,float32]), (complex128,[float64,complex128]) ] suite = unittest.TestSuite() ind = 0 for ty,oktype in types: for ot in oktype: na = 'test' + str(ind) vars()[na] = scheme_test_maker(ty,ot) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(vars()[na])) ind += 1 if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
7,707
40.219251
87
py
pycbc
pycbc-master/test/test_frequencyseries.py
# Copyright (C) 2012 Alex Nitz, Andrew Miller, Tito Dal Canton, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # ''' These are the unittests for the pycbc frequencyseries type ''' import pycbc import unittest from pycbc.types import * from pycbc.scheme import * import numpy import lal import sys import os import tempfile from utils import array_base, parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("FrequencySeries") # By importing the current schemes array type, it will make it # easier to check the array types later if _scheme == 'cuda': import pycuda import pycuda.gpuarray from pycuda.gpuarray import GPUArray as SchemeArray elif _scheme == 'cpu': from numpy import ndarray as SchemeArray from numpy import ndarray as CPUArray class TestFrequencySeriesBase(array_base,unittest.TestCase): __test__ = False def setUp(self): self.scheme = _scheme self.context = _context # We need to check for correct creation from all dtypes, # and errors from incorrect operations so the other precision of # odtype needs to be available as well self.other_precision = {numpy.complex64 : numpy.complex128, numpy.complex128 : numpy.complex64, numpy.float32 : numpy.float64, numpy.float64 : numpy.float32} # Number of decimal places to compare for single precision if self.dtype == numpy.float32 or self.dtype == numpy.complex64: self.places = 5 self.tol = 1e-5 # Number of decimal places to compare for double precision else: self.places = 13 self.tol = 1e-13 # We will also need to check whether dtype and odtype are real or complex, # so that we can test non-zero imaginary parts. if self.dtype == numpy.float32 or self.dtype == numpy.float64: self.kind = 'real' else: self.kind = 'complex' if self.odtype == numpy.float32 or self.odtype == numpy.float64: self.okind = 'real' else: self.okind = 'complex' # Note that self.epoch is set in the factory class constructor at the end; # we need only set self.delta_f here. self.delta_f = 0.1 # We need to tell the arithmetic test functions what our type is: self.type = FrequencySeries # and the extra keyword arguments the constructors will need: self.kwds = {'epoch': self.epoch, 'delta_f': self.delta_f} # Now that the kinds are set, we need to call our parent method to set up all the # inputs and answers for our functions self.setNumbers() # The above call created instances for all of our inputs and various correct # outputs. But we make a copy of the scalar to check later. self.s = self.scalar # Finally, we want to have an array that we shouldn't be able to operate on, # because the precision is wrong, and one where the length is wrong. self.bad = FrequencySeries([1,1,1], 0.1, epoch=self.epoch, dtype = self.other_precision[self.odtype]) self.bad2 = FrequencySeries([1,1,1,1], 0.1, epoch=self.epoch, dtype = self.dtype) # These are FrequencySeries that have problems specific to FrequencySeries self.bad3 = FrequencySeries([1,1,1], 0.2, epoch=self.epoch, dtype = self.dtype) # This next one is actually okay for frequencyseries if self.epoch is None: self.bad4 = FrequencySeries([1,1,1], 0.1, epoch = lal.LIGOTimeGPS(1000, 1000), dtype = self.dtype) else: self.bad4 = FrequencySeries([1,1,1], 0.1, epoch=None, dtype = self.dtype) def test_numpy_init(self): with self.context: in1 = numpy.array([5,3,1],dtype=self.odtype) in2 = numpy.array([5,3,1],dtype=self.other_precision[self.odtype]) #We don't want to cast complex as real if not (self.kind == 'real' and self.okind == 'complex'): #First we must check that the dtype is correct when specified out1 = FrequencySeries(in1,0.1, dtype=self.dtype, epoch=self.epoch) out2 = FrequencySeries(in2,0.1, dtype=self.dtype, epoch=self.epoch) #to be sure that it is copied in1 += 1 in2 += 1 self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertEqual(out1.delta_f, 0.1) self.assertEqual(out1._epoch, self.epoch) self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) self.assertEqual(out2.delta_f,0.1) self.assertEqual(out2._epoch, self.epoch) in1-=1 in2-=1 # Also, when it is unspecified out3 = FrequencySeries(in1,0.1,epoch=self.epoch) in1 += 1 self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==self.odtype) self.assertEqual(out3.delta_f,0.1) self.assertEqual(out3._epoch, self.epoch) # Check for copy=false # On the CPU, this should be possible in3 = numpy.array([5,3,1],dtype=self.dtype) if self.scheme == 'cpu': out4 = FrequencySeries(in3,0.1,copy=False, epoch=self.epoch) in3 += 1 self.assertTrue(out4.dtype==self.dtype) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertEqual(out4[0],6) self.assertEqual(out4[1],4) self.assertEqual(out4[2],2) self.assertEqual(out4.delta_f,0.1) self.assertEqual(out4._epoch, self.epoch) # If we're in different scheme, this should raise an error else: self.assertRaises(TypeError, FrequencySeries, in3, 0.1, copy=False) # We also need to check initialization using GPU arrays if self.scheme == 'cuda': in4 = pycuda.gpuarray.zeros(3,self.dtype) if self.scheme != 'cpu': out4 = FrequencySeries(in4,0.1, copy=False, epoch=self.epoch) in4 += 1 self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],1) self.assertEqual(out4[1],1) self.assertEqual(out4[2],1) self.assertTrue(out4.dtype==self.dtype) self.assertEqual(out4.delta_f,0.1) self.assertEqual(out4._epoch, self.epoch) # We should be able to create an array from the wrong dtype, and # it should be cast as float64 in5 = numpy.array([1,2,3],dtype=numpy.int32) out5 = FrequencySeries(in5,0.1, epoch=self.epoch) in5 += 1 self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],1) self.assertEqual(out5[1],2) self.assertEqual(out5[2],3) self.assertEqual(out5.delta_f,0.1) self.assertEqual(out5._epoch, self.epoch) # Finally, just checking a few things specific to FrequencySeries inbad = numpy.array([],dtype=float64) self.assertRaises(ValueError, FrequencySeries, in1, -1) self.assertRaises(ValueError, FrequencySeries, inbad, .1) self.assertRaises(TypeError, FrequencySeries, in1, .1, epoch=(5,1)) def test_array_init(self): # this array is made outside the context so we can check that an error is raised when copy = false in a GPU scheme cpuarray = Array([1,2,3]) with self.context: in1 = Array([5,3,1],dtype=self.odtype) in2 = Array([5,3,1],dtype=self.other_precision[self.odtype]) self.assertTrue(type(in1._scheme) == type(self.context)) self.assertTrue(type(in1._data) is SchemeArray) self.assertTrue(type(in2._scheme) == type(self.context)) self.assertTrue(type(in2._data) is SchemeArray) # We don't want to cast complex as real if not (self.kind=='real' and self.okind == 'complex'): # First we must check that the dtype is correct when specified out1 = FrequencySeries(in1, 0.1, dtype=self.dtype, epoch=self.epoch) out2 = FrequencySeries(in2, 0.1, dtype=self.dtype, epoch=self.epoch) # to be sure that it is copied in1 += 1 in2 += 1 self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertEqual(out1.delta_f, 0.1) self.assertEqual(out1._epoch, self.epoch) if out1.dtype == numpy.float32: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.float64: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.complex64: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'complex') if out1.dtype == numpy.complex128: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'complex') self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) self.assertEqual(out2.delta_f, 0.1) self.assertEqual(out2._epoch, self.epoch) in1-=1 in2-=1 # Giving complex input and specifying a real dtype should raise an error else: self.assertRaises(TypeError, FrequencySeries, in1,0.1, dtype = self.dtype) self.assertRaises(TypeError, FrequencySeries, in2,0.1, dtype = self.dtype) # Also, when it is unspecified out3 = FrequencySeries(in1,0.1,epoch=self.epoch) in1 += 1 self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==self.odtype) self.assertEqual(out3.delta_f, 0.1) self.assertEqual(out3._epoch, self.epoch) # We should also be able to create from a CPU Array out4 = FrequencySeries(cpuarray,0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],1) self.assertEqual(out4[1],2) self.assertEqual(out4[2],3) self.assertTrue(out4.dtype==self.dtype) self.assertEqual(out4.delta_f, 0.1) self.assertEqual(out4._epoch, self.epoch) # Check for copy=false in3 = Array([5,3,1],dtype=self.dtype) out5 = FrequencySeries(in3,0.1,copy=False, epoch=self.epoch) in3 += 1 self.assertTrue(type(out5._scheme) == type(self.context)) self.assertTrue(type(out5._data) is SchemeArray) self.assertEqual(out5[0],6) self.assertEqual(out5[1],4) self.assertEqual(out5[2],2) self.assertTrue(out5.dtype==self.dtype) self.assertEqual(out5.delta_f, 0.1) self.assertEqual(out5._epoch, self.epoch) if self.scheme != 'cpu': self.assertRaises(TypeError,FrequencySeries,0.1,cpuarray,copy=False) # Things specific to FrequencySeries inbad = Array(numpy.array([],dtype=float64)) self.assertRaises(ValueError, FrequencySeries, in1, -1) self.assertRaises(ValueError, FrequencySeries, inbad, .1) self.assertRaises(TypeError, FrequencySeries, in1, .1, epoch=(5,2)) # Also checking that a cpu array can't be made out of another scheme without copying if self.scheme != 'cpu': self.assertRaises(TypeError, FrequencySeries, out4, 0.1, copy=False) out6 = FrequencySeries(out4, 0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out6._scheme) == CPUScheme) self.assertTrue(type(out6._data) is CPUArray) self.assertEqual(out6[0],1) self.assertEqual(out6[1],2) self.assertEqual(out6[2],3) self.assertTrue(out6.dtype==self.dtype) self.assertEqual(out6.delta_f, 0.1) self.assertEqual(out6._epoch, self.epoch) def test_list_init(self): with self.context: # When specified out1 = FrequencySeries([5,3,1],0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out1._scheme) == type(self.context)) self.assertTrue(type(out1._data) is SchemeArray) self.assertEqual(out1[0],5) self.assertEqual(out1[1],3) self.assertEqual(out1[2],1) self.assertTrue(out1.dtype==self.dtype) self.assertEqual(out1.delta_f, 0.1) self.assertEqual(out1._epoch, self.epoch) if out1.dtype == numpy.float32: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.float64: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'real') if out1.dtype == numpy.complex64: self.assertTrue(out1.precision == 'single') #self.assertTrue(out1.kind == 'complex') if out1.dtype == numpy.complex128: self.assertTrue(out1.precision == 'double') #self.assertTrue(out1.kind == 'complex') if self.kind == 'complex': out2 = FrequencySeries([5+0j,3+0j,1+0j], 0.1, dtype=self.dtype, epoch=self.epoch) self.assertTrue(type(out2._scheme) == type(self.context)) self.assertTrue(type(out2._data) is SchemeArray) self.assertEqual(out2[0],5) self.assertEqual(out2[1],3) self.assertEqual(out2[2],1) self.assertTrue(out2.dtype==self.dtype) self.assertEqual(out2.delta_f, 0.1) self.assertEqual(out2._epoch, self.epoch) else: self.assertRaises(TypeError, FrequencySeries,[5+0j, 3+0j, 1+0j], 0.1, dtype=self.dtype) #Also, when it is unspecified out3 = FrequencySeries([5.0,3,1],0.1,epoch=self.epoch) self.assertTrue(type(out3._scheme) == type(self.context)) self.assertTrue(type(out3._data) is SchemeArray) self.assertEqual(out3[0],5) self.assertEqual(out3[1],3) self.assertEqual(out3[2],1) self.assertTrue(out3.dtype==numpy.float64) self.assertEqual(out3.delta_f, 0.1) self.assertEqual(out3._epoch, self.epoch) out4 = FrequencySeries([5.0+0j,3+0j,1+0j],0.1,epoch = self.epoch) self.assertTrue(type(out4._scheme) == type(self.context)) self.assertTrue(type(out4._data) is SchemeArray) self.assertEqual(out4[0],5) self.assertEqual(out4[1],3) self.assertEqual(out4[2],1) self.assertTrue(out4.dtype==numpy.complex128) self.assertEqual(out4.delta_f, 0.1) self.assertEqual(out4._epoch, self.epoch) self.assertRaises(TypeError,FrequencySeries,[1,2,3],copy=False) # Things specific to FrequencySeries self.assertRaises(ValueError, FrequencySeries, [1,2,3], -1) self.assertRaises(ValueError, FrequencySeries, [], .1) self.assertRaises(TypeError, FrequencySeries, [1,2,3], .1, epoch=(5,2)) def test_mul(self): super(TestFrequencySeriesBase,self).test_mul() self.assertRaises(ValueError, self.a.__mul__,self.bad3) c = self.a * self.bad4 self.assertTrue(c._epoch==self.epoch) def test_rmul(self): super(TestFrequencySeriesBase,self).test_rmul() self.assertRaises(ValueError, self.a.__rmul__,self.bad3) c = self.a.__rmul__(self.bad4) self.assertTrue(c._epoch==self.epoch) def test_imul(self): super(TestFrequencySeriesBase,self).test_imul() self.assertRaises(ValueError, self.a.__imul__,self.bad3) self.a *= self.bad4 self.assertTrue(self.a._epoch==self.epoch) def test_add(self): super(TestFrequencySeriesBase,self).test_add() self.assertRaises(ValueError, self.a.__add__,self.bad3) c = self.a + self.bad4 self.assertTrue(c._epoch==self.epoch) def test_radd(self): super(TestFrequencySeriesBase,self).test_radd() self.assertRaises(ValueError, self.a.__radd__,self.bad3) c = self.a.__radd__(self.bad4) self.assertTrue(c._epoch==self.epoch) def test_iadd(self): super(TestFrequencySeriesBase,self).test_iadd() self.assertRaises(ValueError, self.a.__iadd__,self.bad3) self.a += self.bad4 self.assertTrue(self.a._epoch==self.epoch) def test_sub(self): super(TestFrequencySeriesBase,self).test_sub() self.assertRaises(ValueError, self.a.__sub__,self.bad3) c = self.a - self.bad4 self.assertTrue(c._epoch==self.epoch) def test_rsub(self): super(TestFrequencySeriesBase,self).test_rsub() self.assertRaises(ValueError, self.a.__rsub__,self.bad3) c = self.a.__rsub__(self.bad4) self.assertTrue(c._epoch==self.epoch) def test_isub(self): super(TestFrequencySeriesBase,self).test_isub() self.assertRaises(ValueError, self.a.__isub__,self.bad3) self.a -= self.bad4 self.assertTrue(self.a._epoch==self.epoch) def test_div(self): super(TestFrequencySeriesBase,self).test_div() self.assertRaises(ValueError, self.a.__div__,self.bad3) c = self.a / self.bad4 self.assertTrue(c._epoch==self.epoch) def test_rdiv(self): super(TestFrequencySeriesBase,self).test_rdiv() self.assertRaises(ValueError, self.a.__rdiv__,self.bad3) c = self.a.__rdiv__(self.bad4) self.assertTrue(c._epoch==self.epoch) def test_idiv(self): super(TestFrequencySeriesBase,self).test_idiv() self.assertRaises(ValueError, self.a.__idiv__,self.bad3) self.a /= self.bad4 self.assertTrue(self.a._epoch==self.epoch) def test_dot(self): super(TestFrequencySeriesBase,self).test_dot() self.assertRaises(ValueError, self.a.dot,self.bad3) self.a.dot(self.bad4) self.assertTrue(self.a._epoch==self.epoch) def test_sample_frequencies(self): with self.context: # Moving these to the current scheme self.a*=1 self.b*=1 self.bad3*=1 self.assertEqual(len(self.a.sample_frequencies), 3) self.assertAlmostEqual(self.a.sample_frequencies[-1] - self.a.sample_frequencies[0], 0.2) self.assertEqual(len(self.b.sample_frequencies), 3) self.assertAlmostEqual(self.b.sample_frequencies[-1] - self.b.sample_frequencies[0], 0.2) self.assertEqual(len(self.bad3.sample_frequencies), 3) self.assertAlmostEqual(self.bad3.sample_frequencies[-1] - self.bad3.sample_frequencies[0], 0.4) def test_save(self): with self.context: # make temporary file paths temp_file = tempfile.NamedTemporaryFile() temp_path_npy = temp_file.name + '.npy' temp_path_txt = temp_file.name + '.txt' # make a test frequency series a_numpy = numpy.arange(100, dtype=self.dtype) a = FrequencySeries(a_numpy, delta_f=0.1) # test saving to Numpy array a.save(temp_path_npy) b = numpy.load(temp_path_npy) self.assertEqual(b.shape, (a_numpy.shape[0], 2)) self.assertEqual(numpy.abs(b[:,0] - a.sample_frequencies.numpy()).max(), 0) self.assertEqual(numpy.abs(b[:,1] - a_numpy).max(), 0) os.remove(temp_path_npy) # test saving to text file a.save(temp_path_txt) b = numpy.loadtxt(temp_path_txt) if a.kind == 'complex': self.assertEqual(b.shape, (a_numpy.shape[0], 3)) b = numpy.vstack((b[:,0], b[:,1] + 1j * b[:,2])).T elif a.kind == 'real': self.assertEqual(b.shape, (a_numpy.shape[0], 2)) self.assertEqual(numpy.abs(b[:,0] - a.sample_frequencies.numpy()).max(), 0) self.assertEqual(numpy.abs(b[:,1] - a_numpy).max(), 0) os.remove(temp_path_txt) def fs_test_maker(dtype, odtype, epoch): class TestFrequencySeries(TestFrequencySeriesBase): __test__ = True def __init__(self, *args): self.dtype = dtype self.odtype = odtype self.epoch = epoch unittest.TestCase.__init__(self, *args) TestFrequencySeries.__name__ = _scheme + " " + dtype.__name__ + " with " + odtype.__name__ return TestFrequencySeries types = [ (float32,[float32,complex64]), (float64,[float64,complex128]), (complex64,[complex64,float32]), (complex128,[float64,complex128]) ] suite = unittest.TestSuite() # Unlike the regular array tests, we will need to test with an epoch, and with none epochs = [lal.LIGOTimeGPS(1000, 1000),None] i = 0 for t,otypes in types: for ot in otypes: for epoch in epochs: na = 'test' + str(i) vars()[na] = fs_test_maker(t, ot, epoch) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(vars()[na])) i += 1 if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
24,440
43.277174
122
py
pycbc
pycbc-master/test/test_transforms.py
# Copyright (C) 2017 Christopher M. Biwer # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import numpy import unittest from pycbc import transforms from utils import parse_args_cpu_only from utils import simple_exit # list of transforms without an inverse function and to ignore IGNORE = [t.name for t in transforms.common_cbc_transforms if t.inverse is None] # ranges to draw random numbers for each parameter RANGES = { "mass1" : (1.0, 100.0), "mass2" : (1.0, 100.0), "mchirp" : (1.0, 20.0), "q" : (1.0, 10.0), "spin1_a" : (0.0, 1.0), "spin1_polar" : (0, numpy.pi), "spin1_azimuthal" : (0.0, 2 * numpy.pi), "spin2_a" : (0.0, 1.0), "spin2_polar" : (0, numpy.pi), "spin2_azimuthal" : (0.0, 2 * numpy.pi), "chi_eff" : (-1.0, 1.0), "chi_a" : (0.0, 1.0), "chi_p" : (0.0, 1.0), "phi_s" : (0.0, 2 * numpy.pi), "phi_a" : (0.0, 2 * numpy.pi), "xi1" : (0.0, 1.0), "xi2" : (0.0, 1.0), "chirp_distance" : (2.0, 10.0), } # tests only need to happen on the CPU parse_args_cpu_only("Transforms") class TestTransforms(unittest.TestCase): def setUp(self): # set random seed numpy.random.seed(1024) def test_inverse(self): # set threshold how similar values must be threshold = 0.001 # loop over forward CBC transforms for trans in transforms.common_cbc_forward_transforms: # check if inverse exists if trans.name in IGNORE: continue if trans.name == 'spherical_to_cartesian': # spherical to cartesian requires the cartesian and spherical # parameter names to be specified, which we can get from # the inputs and outputs inv = trans.inverse(*trans._outputs+trans._inputs) else: inv = trans.inverse() # generate some random points in_map = {p : numpy.random.uniform(*RANGES[p]) for p in trans.inputs} # transforms to and back from inverse transform intermediate_map = trans.transform(in_map) out_map = inv.transform(intermediate_map) # check that input equals outputs to some threshold in_arr = numpy.array([in_map[p] for p in trans.inputs]) out_arr = numpy.array([out_map[p] for p in trans.inputs]) if not numpy.all(1.0 - in_arr / out_arr < threshold): raise ValueError( "Transform {} does not map back to itself.".format(trans.name)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestTransforms)) if __name__ == "__main__": results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
3,458
33.59
79
py
pycbc
pycbc-master/test/test_detector.py
# Copyright (C) 2018 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.detector module """ import pycbc.detector as det import unittest, numpy from numpy.random import uniform, seed seed(0) # We require lal as a reference comparison import lal from utils import simple_exit class TestDetector(unittest.TestCase): def setUp(self): self.d = [det.Detector(ifo) for ifo in det.get_available_detectors()] # not distributed sanely, but should provide some good coverage N = 1000 self.ra = uniform(0, numpy.pi * 2, size=N) self.dec = uniform(-numpy.pi, numpy.pi, size=N) self.pol = uniform(0, numpy.pi * 2, size=N) self.time = uniform(1126000000.0, 1336096017.0, size=N) def test_light_time(self): for d1 in self.d: for d2 in self.d: t1 = lal.LightTravelTime(d1.lal(), d2.lal()) * 1e-9 t2 = d1.light_travel_time_to_detector(d2) self.assertAlmostEqual(t1, t2, 7) def test_antenna_pattern(self): vals = list(zip(self.ra, self.dec, self.pol, self.time)) for ifo in self.d: fp = [] fc = [] for ra1, dec1, pol1, time1 in vals: gmst = lal.GreenwichMeanSiderealTime(time1) fp1, fc1 = tuple(lal.ComputeDetAMResponse(ifo.response, ra1, dec1, pol1, gmst)) fp.append(fp1) fc.append(fc1) fp2, fc2 = ifo.antenna_pattern(self.ra, self.dec, self.pol, self.time) fp = numpy.array(fp) fc = numpy.array(fc) diff1 = fp - fp2 diff2 = fc - fc2 diff = abs(numpy.concatenate([diff1, diff2])) tolerance = 2e-4 print("Max antenna diff:", ifo.name, diff.max()) self.assertLess(diff.max(), tolerance) def test_delay_from_detector(self): ra, dec, time = self.ra[0:10], self.dec[0:10], self.time[0:10] for d1 in self.d: for d2 in self.d: time1 = [] for ra1, dec1, tim1 in zip(ra, dec, time): t1 = lal.ArrivalTimeDiff(d1.location, d2.location, ra1, dec1, tim1) time1.append(t1) time1 = numpy.array(time1) time2 = d1.time_delay_from_detector(d2, ra, dec, time) self.assertLess(abs(time1 - time2).max(), 1e-3) def test_optimal_orientation(self): for d1 in self.d: ra, dec = d1.optimal_orientation(self.time[0]) ra1 = d1.longitude + lal.GreenwichMeanSiderealTime(self.time[0]) % (numpy.pi *2) dec1 = d1.latitude self.assertAlmostEqual(ra, ra1, 3) self.assertAlmostEqual(dec, dec1, 7) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestDetector)) if __name__ == '__main__': from astropy.utils import iers iers.conf.auto_download = False results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
4,019
34.892857
95
py
pycbc
pycbc-master/test/test_matchedfilter.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.filter.matchedfilter module """ import unittest from pycbc.types import * from pycbc.scheme import * from pycbc.filter import * from math import sqrt import numpy from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("Matched Filter") #import pycbc.fft.fftw #pycbc.fft.fftw.set_measure_level(0) class TestMatchedFilter(unittest.TestCase): def setUp(self,*args): self.context = _context self.scheme = _scheme # Use sine wave as test signal data = numpy.sin(numpy.arange(0,100,100/(4096.0*64))) self.filt = TimeSeries(data,dtype=float32,delta_t=1.0/4096) self.filt2 = (self.filt*1) self.filt2[0:int(len(self.filt2)/2)].fill(0) self.filt_offset = TimeSeries(numpy.roll(data,4096*32), dtype=float32, delta_t=1.0/4096) self.filtD = TimeSeries(data,dtype=float64,delta_t=1.0/4096) self.filt2D = (self.filtD*1) self.filt2D[0:int(len(self.filt2D)/2)].fill(0) self.filt_offsetD = TimeSeries(numpy.roll(data,4096*32), dtype=float64, delta_t=1.0/4096) self.filt_short =TimeSeries([0,1,2,3,4],dtype=float32,delta_t=1.0/4096) self.filt_highres = TimeSeries(data,dtype=float,delta_t=1.0/4096) frequency_series = make_frequency_series(self.filt_highres) # the number is 2pi*delta_t*(5+1/2), which is where the standard # SNR interpolation does the worst # the .3j phase is added to test the phase retrieval phase = numpy.exp(-0.008436894333371026j * frequency_series.sample_frequencies - .3j) self.filt_offset_subsample = ( frequency_series*phase ) self.psd = FrequencySeries(2 * numpy.ones_like(frequency_series), delta_f=frequency_series.delta_f) def test_correlate (self): from pycbc.filter.matchedfilter import correlate with self.context: a = Array([1j], dtype=complex64) b = Array([1j], dtype=complex64) c = zeros(1, dtype=complex64) correlate (a, b, c) self.assertEqual(1, c[0]) def test_ave_snr_noise(self): with self.context: #Test that the average snr in noise is 2 from numpy.random import normal noise = normal(0.0,2,4096*64) nplus= TimeSeries(noise,dtype=float32,delta_t=1.0/4096) ntilde = make_frequency_series(nplus) / nplus.delta_t # Calculate a Faux psd for normalization, replace with better algorithm psd = (ntilde).squared_norm() / float(len(nplus)) * nplus.delta_t *2.0 snr = matched_filter(self.filt, nplus, psd=psd) ave = snr.squared_norm().sum() /len(snr) self.assertAlmostEqual(2,ave,places=5) noise = normal(0.0,2,4096*64) nplus= TimeSeries(noise,dtype=float64,delta_t=1.0/4096) ntilde = make_frequency_series(nplus) / nplus.delta_t # Calculate a Faux psd for normalization, replace with better algorithm psd = (ntilde).squared_norm() / float(len(nplus)) * nplus.delta_t *2.0 snr = matched_filter(self.filtD,nplus,psd=psd) ave = snr.squared_norm().sum() /len(snr) self.assertAlmostEqual(2,ave,places=5) def test_perfect_match(self): with self.context: o,i = match(self.filt,self.filt) self.assertAlmostEqual(1,o,places=4) self.assertEqual(0,i) o,i = match(self.filtD,self.filtD) self.assertAlmostEqual(1,o,places=4) self.assertEqual(0,i) o,i = match(self.filt,self.filt, subsample_interpolation=True) self.assertAlmostEqual(1,o,places=4) self.assertAlmostEqual(0,i,places=1) o,i = match(self.filtD,self.filtD, subsample_interpolation=True) self.assertAlmostEqual(1,o,places=4) self.assertAlmostEqual(0,i,places=1) def test_perfect_match_offset(self): with self.context: o,i = match(self.filt,self.filt_offset) self.assertAlmostEqual(1,o,places=4) self.assertEqual(4096*32,i) o,i = match(self.filtD,self.filt_offsetD) self.assertAlmostEqual(1,o,places=4) self.assertEqual(4096*32,i) o,i = match(self.filt, self.filt_offset, subsample_interpolation=True) self.assertAlmostEqual(1, o, places=4) self.assertAlmostEqual(4096*32, i, places=1) o,i = match(self.filtD, self.filt_offsetD, subsample_interpolation=True) self.assertAlmostEqual(1, o, places=4) self.assertAlmostEqual(4096*32, i, places=1) def test_perfect_match_subsample_offset(self): with self.context: o, i, ph = optimized_match( self.filt_highres, self.filt_offset_subsample, return_phase=True ) self._check_accuracy_subsample_offset( o, i, ph ) # but the standard implementation is not correct # even when checked to a much lower degree of accuracy: # the following tests are just a sanity check, # they can be removed o2, _ = match(self.filt_highres, self.filt_offset_subsample) self.assertNotAlmostEqual(1., o2, places=7) o3, _ = match(self.filt_highres, self.filt_offset_subsample, subsample_interpolation=True) self.assertNotAlmostEqual(1., o3, places=7) def test_perfect_match_subsample_offset_bandlimited(self): with self.context: o, i, ph = optimized_match( self.filt_highres, self.filt_offset_subsample, return_phase=True, low_frequency_cutoff=20., high_frequency_cutoff=1500. ) self._check_accuracy_subsample_offset( o, i, ph ) def test_perfect_match_subsample_offset_with_psd(self): with self.context: o, i, ph = optimized_match( self.filt_highres, self.filt_offset_subsample, return_phase=True, psd=self.psd ) self._check_accuracy_subsample_offset( o, i, ph ) def _check_accuracy_subsample_offset(self, o, i, ph): self.assertAlmostEqual(1.0, o, places=10) self.assertAlmostEqual(5 + 1 / 2, i, places=4) self.assertAlmostEqual(0.3, ph, places=4) def test_imperfect_match(self): with self.context: f = make_frequency_series(self.filt) f2 = make_frequency_series(self.filt2) o,i = match(self.filt,self.filt2) self.assertAlmostEqual(sqrt(0.5),o,places=3) f = make_frequency_series(self.filtD) f2 = make_frequency_series(self.filt2D) o,i = match(self.filtD,self.filt2D) self.assertAlmostEqual(sqrt(0.5),o,places=3) f = make_frequency_series(self.filt) f2 = make_frequency_series(self.filt2) o,i = match(self.filt, self.filt2, subsample_interpolation=True) self.assertAlmostEqual(sqrt(0.5), o, places=3) f = make_frequency_series(self.filtD) f2 = make_frequency_series(self.filt2D) o,i = match(self.filtD, self.filt2D, subsample_interpolation=True) self.assertAlmostEqual(sqrt(0.5), o, places=3) self.assertAlmostEqual(132327.27060, i, places=2) def test_errors(self): with self.context: #Check that an incompatible data and filter produce an error self.assertRaises(ValueError,match,self.filt,self.filt[0:5]) #Check that an incompatible psd produces an error self.assertRaises(TypeError,match,self.filt,self.filt,psd=self.filt) psd = FrequencySeries(zeros(len(self.filt) // 2 + 1), delta_f=100000) self.assertRaises(ValueError,match,self.filt,self.filt,psd=psd) #Check that only TimeSeries or FrequencySeries are accepted self.assertRaises(TypeError,match,zeros(10),zeros(10)) self.assertRaises(ValueError,match,self.filt,self.filt[0:len(self.filt)-1]) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMatchedFilter)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
9,715
39.14876
93
py
pycbc
pycbc-master/test/test_infmodel.py
# Copyright (C) 2021 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for pycbc.inference.models """ import unittest import copy from utils import simple_exit from pycbc.catalog import Merger from pycbc.psd import interpolate, inverse_spectrum_truncation from pycbc.frame import read_frame from pycbc.filter import highpass, resample_to_delta_t from astropy.utils.data import download_file from pycbc.inference import models from pycbc.distributions import Uniform, JointDistribution, SinAngle, UniformAngle class TestModels(unittest.TestCase): @classmethod def setUpClass(cls): ###### Get data for references analysis of 170817 m = Merger("GW170817") ifos = ['H1', 'V1', 'L1'] cls.psds = {} cls.data = {} for ifo in ifos: print("Processing {} data".format(ifo)) # Download the gravitational wave data for GW170817 url = "https://dcc.ligo.org/public/0146/P1700349/001/" url += "{}-{}1_LOSC_CLN_4_V1-1187007040-2048.gwf" fname = download_file(url.format(ifo[0], ifo[0]), cache=True) ts = read_frame(fname, "{}:LOSC-STRAIN".format(ifo), start_time=int(m.time - 260), end_time=int(m.time + 40)) ts = highpass(ts, 15.0) ts = resample_to_delta_t(ts, 1.0/2048) ts = ts.time_slice(m.time-112, m.time + 16) cls.data[ifo] = ts.to_frequencyseries() psd = interpolate(ts.psd(4), ts.delta_f) psd = inverse_spectrum_truncation(psd, int(4 * psd.sample_rate), trunc_method='hann', low_frequency_cutoff=20.0) cls.psds[ifo] = psd cls.static = {'mass1':1.3757, 'mass2':1.3757, 'f_lower':20.0, 'approximant':"TaylorF2", 'polarization':0, 'ra': 3.44615914, 'dec': -0.40808407, 'tc': 1187008882.42840, } cls.variable = ['distance', 'inclination', ] cls.flow = {'H1':25, 'L1':25, 'V1':25} inclination_prior = SinAngle(inclination=None) distance_prior = Uniform(distance=(10, 100)) tc_prior = Uniform(tc=(m.time-0.1, m.time+0.1)) pol = UniformAngle(polarization=None) cls.prior = JointDistribution(cls.variable, inclination_prior, distance_prior) # set up for marginalized polarization tests cls.static2 = cls.static.copy() cls.static2.pop('polarization') cls.variable2 = cls.variable + ['polarization'] cls.prior2 = JointDistribution(cls.variable2, inclination_prior, distance_prior, pol) ###### Expected answers # Answer taken from marginalized gaussian model cls.q1 = {'distance':42.0, 'inclination':2.5} cls.a1 = 541.8235746138382 # answer taken from brute marginize pol + phase cls.a2 = 542.581 cls.pol_samples = 200 def test_base_phase_marg(self): model = models.MarginalizedPhaseGaussianNoise( self.variable, copy.deepcopy(self.data), low_frequency_cutoff=self.flow, psds=self.psds, static_params=self.static, prior=self.prior, ) model.update(**self.q1) self.assertAlmostEqual(self.a1, model.loglr, delta=1e-3) def test_relative_phase_marg(self): model = models.Relative(self.variable, copy.deepcopy(self.data), low_frequency_cutoff=self.flow, psds = self.psds, static_params = self.static, prior = self.prior, fiducial_params = {'mass1':1.3756}, epsilon = .1, ) model.update(**self.q1) self.assertAlmostEqual(self.a1, model.loglr, delta=0.002) def test_single_phase_marg(self): model = models.SingleTemplate( self.variable, copy.deepcopy(self.data), low_frequency_cutoff=self.flow, psds = self.psds, static_params = self.static, prior = self.prior, ) model.update(**self.q1) self.assertAlmostEqual(self.a1, model.loglr, delta=0.02) def test_single_pol_phase_marg(self): model = models.SingleTemplate( self.variable2, copy.deepcopy(self.data), low_frequency_cutoff=self.flow, psds = self.psds, static_params = self.static2, prior = self.prior2, marginalize_vector_samples = 1000, marginalize_vector_params = 'polarization', ) model.update(**self.q1) self.assertAlmostEqual(self.a2, model.loglr, delta=0.04) def test_brute_pol_phase_marg(self): # Uses the old polarization syntax untill we decide to remove it. # Untill then, this also tests that that interface stays working. model = models.BruteParallelGaussianMarginalize( self.variable, data=copy.deepcopy(self.data), low_frequency_cutoff=self.flow, psds = self.psds, static_params = self.static2, prior = self.prior, marginalize_phase=400, cores=1, base_model='marginalized_polarization', ) model.update(**self.q1) self.assertAlmostEqual(self.a2, model.loglr, delta=0.002) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestModels)) if __name__ == '__main__': from astropy.utils import iers iers.conf.auto_download = False results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
7,338
40.463277
82
py
pycbc
pycbc-master/test/test_fftw_pthreads.py
# Copyright (C) 2012 Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unit-tests for the pycbc.fft.fftw subpackage, testing only the pthreads threading backend. """ import pycbc.fft from pycbc.scheme import CPUScheme import unittest from sys import exit as _exit from utils import parse_args_cpu_only, simple_exit from fft_base import _BaseTestFFTClass parse_args_cpu_only("FFTW pthreads backend") # See if we can get set the FFTW backend to 'pthreads'; if not, say so and exit. if 'fftw' in pycbc.fft.get_backend_names(): import pycbc.fft.fftw try: pycbc.fft.fftw.set_threads_backend('pthreads') except: print("Unable to import pthreads threads backend to FFTW; skipping pthreads thread tests") _exit(0) else: print("FFTW does not seem to be an available CPU backend; skipping pthreads thread tests") _exit(0) # Most of the work is now done in fft_base. FFTTestClasses = [] for num_threads in [2,4,6,8]: kdict = {'backends' : ['fftw'], 'scheme' : 'cpu', 'context' : CPUScheme(num_threads=num_threads)} klass = type('FFTW_OpenMP_test', (_BaseTestFFTClass,),kdict) klass.__test__ = True FFTTestClasses.append(klass) # Finally, we create suites and run them if __name__ == '__main__': suite = unittest.TestSuite() for klass in FFTTestClasses: suite.addTest(unittest.TestLoader().loadTestsFromTestCase(klass)) results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
2,442
32.465753
98
py
pycbc
pycbc-master/test/fft_base.py
# Copyright (C) 2012 Josh Willis, Andrew Miller # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ This is a helper module that does most of the work for the unit-tests of the pycbc.fft subpackage. Its class and many private variables are called by the three test scripts test_fft_unthreaded, test_fftw_pthreads, and test_fftw_openmp. For both forward and reverse complex FFT, and forward R2C and reverse C2R FFT, these tests validate that the FFT produces the correct output and does not overwrite its input on small test cases where the answer may be hand-computed. They also verify that an FFT of (larger sized) random data followed by the reverse FFT gives back the original input (modulo the scaling factor of the length of the array, when testing just the basic Array class; that factor is not present for Time/FrequencySeries transformations). For Time and Frequency series these tests also compare their outputs on large, random input to direct calls to the XLAL *TimeFreqFFT and *FreqTimeFFT functions. A similar comparison is *not* made for the array tests, since the underlying LAL functions on a vector input are what the pycbc fft routines use (on the CPU), so there is little additional gain from the comparison to the tests already done. Finally, for all three classes of pycbc.types these unit tests also check that the correct exceptions are raised when several different kinds of erroneous inputs are given. For the R2C (resp. C2R), the tests are run with input arrays (resp. output arrays) that are both even and odd in length, since the length of those output arrays is not the same as that of the input. However these unit tests no longer check that the imaginary parts of DC (and for even-length also Nyquist) outputs are exactly zero, both because that complicates the testing framework considerably, and because it will not in general hold for GPU algorithms (and those cannot be made to enforce that, without unacceptable computational overhead). All of these tests are performed for inputs from each of the three basic types (Array, TimeSeries, and FrequencySeries) and for each precision (single and double). They are also checked for each possible backend of the current scheme, which means in particular that whichever backend is the default will be tested twice, once as 'Default' and once under its own name. """ import pycbc import pycbc.scheme import pycbc.types from pycbc.types import Array as ar, TimeSeries as ts, FrequencySeries as fs import numpy from numpy import dtype, float32, float64, complex64, complex128, zeros, real from numpy.random import randn import pycbc.fft from pycbc.fft.backend_support import set_backend import unittest import sys from utils import parse_args_all_schemes, simple_exit from lal import LIGOTimeGPS as LTG import lal as _lal # Because we run many similar tests where we only vary dtypes, precisions, # or Array/TimeSeries/FrequencySeries, it is helpful to define the following # dictionarys and functions and then call those from within the actual tests. # Map the same kind (real or complex) to the other precision _other_prec = {float32:float64, float64:float32, complex64:complex128, complex128:complex64} # Map the same precision to the other kind _other_kind = {float32:complex64, float64:complex128, complex64:float32, complex128:float64} # Map the dtype of a valid *forward* fft *input* array to the wrong kind, # but correct precision, dtype of the *output* array. This is either R2R or C2R. # The same mapping also send the dtype of a valid *output* array for an inverse # fft to the wrong kind but correct precision *input* array, corresponding to # a R2R or R2C transform. _bad_dtype = {float32: float32, float64: float64, complex64: float32, complex128: float64} # Our actual helper functions. Note that these perform the necessary operations # within the appropriate context, so they should not themselves be called inside # of a context block. def _test_fft(test_case, inarr, expec, tol): # Basic test to see that the forward FFT doesn't # overwrite its input and computes its output to # within the required accuracy. tc = test_case inty = type(inarr) in_pristine = inty(inarr) outty = type(expec) # Make a copy... outarr = outty(expec) # Clear it and change values (so we test something meaningful): outarr.clear() if hasattr(outarr,'_epoch'): outarr._epoch *= 5*tol if hasattr(outarr,'_delta_t'): outarr._delta_t *= 5*tol if hasattr(outarr,'_delta_f'): outarr._delta_f *= 5*tol with tc.context: set_backend(tc.backends) for api in ['func', 'class']: if api == 'func': pycbc.fft.fft(inarr, outarr) else: fft_class = pycbc.fft.FFT(inarr, outarr) fft_class.execute() if isinstance(inarr, ts): outarr *= inarr._delta_t elif isinstance(inarr, fs): outarr *= inarr._delta_f # First, verify that the input hasn't been overwritten emsg = 'FFT overwrote input array' tc.assertEqual(inarr, in_pristine,emsg) # Next, check that the output is correct to within tolerance. # That will require exact equality of all other meta-data emsg = ('FFT output differs by more than a factor of ' '{0} from expected'.format(tol)) if isinstance(outarr, ts) or isinstance(outarr, fs): tc.assertTrue( outarr.almost_equal_norm(expec, tol=tol, dtol=tol), msg=emsg ) else: tc.assertTrue( outarr.almost_equal_norm(expec, tol=tol), msg=emsg ) outarr.clear() def _test_ifft(test_case, inarr, expec, tol): # Basic test to see that the reverse FFT doesn't # overwrite its input and computes its output to # within the required accuracy. tc = test_case inty = type(inarr) in_pristine = inty(inarr) outty = type(expec) # Make a copy... outarr = outty(expec) # Clear it and change values (so we test something meaningful): outarr.clear() if hasattr(outarr,'_epoch'): outarr._epoch *= 5*tol if hasattr(outarr,'_delta_t'): outarr._delta_t *= 5*tol if hasattr(outarr,'_delta_f'): outarr._delta_f *= 5*tol with tc.context: set_backend(tc.backends) for api in ['func', 'class']: if api == 'func': pycbc.fft.ifft(inarr, outarr) else: ifft_class = pycbc.fft.IFFT(inarr, outarr) ifft_class.execute() if isinstance(inarr, ts): outarr *= inarr._delta_t elif isinstance(inarr, fs): outarr *= inarr._delta_f # First, verify that the input hasn't been overwritten emsg = 'Inverse FFT overwrote input array' tc.assertEqual(inarr, in_pristine, emsg) # Next, check that the output is correct to within tolerance. # That will require exact equality of all other meta-data emsg = ('Inverse FFT output differs by more than a factor ' 'of {0} from expected'.format(tol)) if isinstance(outarr, ts) or isinstance(outarr, fs): tc.assertTrue( outarr.almost_equal_norm(expec, tol=tol, dtol=tol), msg=emsg ) else: tc.assertTrue( outarr.almost_equal_norm(expec, tol=tol), msg=emsg ) outarr.clear() def _test_random(test_case, inarr, outarr, tol): tc = test_case # Test that applying a transform and its inverse to reasonably long, random # input gives back the (appropriately scaled) input. We must allow for # numerical error, and it seems more reliable to check using normwise error # (than elementwise). # # First test IFFT(FFT(random)) # The numpy randn(n) provides an array of n numbers drawn from standard # normal if dtype(inarr).kind == 'c': inarr._data[:] = randn(len(inarr)) + 1j*randn(len(inarr)) # If we're going to do a HC2R transform we must worry about DC/Nyquist # imaginary if dtype(outarr).kind == 'f': inarr._data[0] = real(inarr[0]) if (len(outarr) % 2) == 0: inarr._data[len(inarr)-1] = real(inarr[len(inarr)-1]) else: inarr._data[:] = randn(len(inarr)) incopy = type(inarr)(inarr) outarr.clear() with tc.context: set_backend(tc.backends) for api in ['func', 'class']: if api == 'func': pycbc.fft.fft(inarr, outarr) pycbc.fft.ifft(outarr, inarr) else: fft_class = pycbc.fft.FFT(inarr, outarr) fft_class.execute() if isinstance(inarr, ts): outarr *= inarr._delta_t elif isinstance(inarr, fs): outarr *= inarr._delta_f ifft_class = pycbc.fft.IFFT(outarr, inarr) ifft_class.execute() if isinstance(outarr, ts): inarr *= outarr._delta_t elif isinstance(outarr, fs): inarr *= outarr._delta_f if type(inarr) == pycbc.types.Array: # An Array FFTed and then IFFTEd will be scaled by its length # Frequency and TimeSeries have no scaling inarr /= len(inarr) emsg=("IFFT(FFT(random)) did not reproduce original array to " "within tolerance {0}".format(tol)) if isinstance(incopy, ts) or isinstance(incopy, fs): tc.assertTrue( incopy.almost_equal_norm(inarr, tol=tol, dtol=tol), msg=emsg ) else: tc.assertTrue( incopy.almost_equal_norm(inarr, tol=tol), msg=emsg ) # Perform arithmetic on outarr and inarr to pull them off of the GPU: outarr *= 1.0 inarr *= 1.0 # Now the same for FFT(IFFT(random)) if dtype(outarr).kind == 'c': outarr._data[:] = randn(len(outarr)) + 1j*randn(len(outarr)) # If we're going to do a HC2R transform we must worry about # DC/Nyquist imaginary if dtype(inarr).kind == 'f': outarr._data[0] = real(outarr[0]) if (len(inarr) % 2) == 0: outarr._data[len(outarr)-1] = real(outarr[len(outarr)-1]) else: outarr._data[:] = randn(len(outarr)) inarr.clear() outcopy = type(outarr)(outarr) with tc.context: set_backend(tc.backends) for api in ['func', 'class']: if api == 'func': pycbc.fft.ifft(outarr, inarr) pycbc.fft.fft(inarr, outarr) else: ifft_class = pycbc.fft.IFFT(outarr, inarr) ifft_class.execute() if isinstance(outarr, ts): inarr *= outarr._delta_t elif isinstance(outarr, fs): inarr *= outarr._delta_f fft_class = pycbc.fft.FFT(inarr, outarr) fft_class.execute() if isinstance(inarr, ts): outarr *= inarr._delta_t elif isinstance(inarr, fs): outarr *= inarr._delta_f if type(inarr) == pycbc.types.Array: # An Array FFTed and then IFFTEd will be scaled by its length # Frequency and TimeSeries have no scaling outarr /= len(inarr) emsg = ("FFT(IFFT(random)) did not reproduce original array to " "within tolerance {0}".format(tol)) if isinstance(outcopy, ts) or isinstance(outcopy, fs): tc.assertTrue( outcopy.almost_equal_norm(outarr, tol=tol, dtol=tol), msg=emsg ) else: tc.assertTrue( outcopy.almost_equal_norm(outarr, tol=tol), msg=emsg ) def _test_raise_excep_fft(test_case,inarr,outarr,other_args=None): # As far as can be told from the unittest module documentation, the # 'assertRaises' tests do not permit a custom message. So more # comments than usual here, to help diagnose and test failures. # # The 'other_args' argument is needed to pass additional keywords to # the constructors of some types (T/F series); we cannot simply copy since # the whole point is to vary the input/output in some way that should cause # an exception. if other_args is None: other_args = {} tc = test_case with tc.context: set_backend(tc.backends) def class_fft(inarr, outarr): fft_class = pycbc.fft.FFT(inarr, outarr) fft_class.execute() outty = type(outarr) outzer = pycbc.types.zeros(len(outarr)) # If we give an output array that is wrong only in length, raise ValueError: out_badlen = outty(pycbc.types.zeros(len(outarr)+1), dtype=outarr.dtype, **other_args) args = [inarr, out_badlen] tc.assertRaises(ValueError, pycbc.fft.fft, *args) tc.assertRaises(ValueError, class_fft, *args) # If we give an output array that has the wrong precision, # raise ValueError: out_badprec = outty(outzer, dtype=_other_prec[dtype(outarr).type], **other_args) args = [inarr, out_badprec] tc.assertRaises(ValueError, pycbc.fft.fft, *args) tc.assertRaises(ValueError, class_fft, *args) # If we give an output array that has the wrong kind (real or # complex) but correct precision, then raise a ValueError. This only # makes sense if we try to do either C2R or R2R. out_badkind = outty(outzer, dtype=_bad_dtype[dtype(inarr).type], **other_args) args = [inarr, out_badkind] tc.assertRaises(ValueError, pycbc.fft.fft, *args) tc.assertRaises(ValueError, class_fft, *args) # If we give an output array that isn't a PyCBC type, raise TypeError: out_badtype = numpy.zeros(len(outarr),dtype=outarr.dtype) args = [inarr, out_badtype] tc.assertRaises(TypeError, pycbc.fft.fft, *args) tc.assertRaises(TypeError, class_fft, *args) # If we give an input array that isn't a PyCBC type, raise TypeError: in_badtype = numpy.zeros(len(inarr), dtype=inarr.dtype) args = [in_badtype, outarr] tc.assertRaises(TypeError, pycbc.fft.fft, *args) tc.assertRaises(TypeError, class_fft, *args) def _test_raise_excep_ifft(test_case, inarr, outarr, other_args=None): # As far as can be told from the unittest module documentation, the # 'assertRaises' tests do not permit a custom message. So more # comments than usual here, to help diagnose and test failures. # # The 'other_args' argument is needed to pass additional keywords to # the constructors of some types (T/F series); we cannot simply copy since # the whole point is to vary the input/output in some way that should cause # an exception. if other_args is None: other_args = {} tc = test_case with tc.context: set_backend(tc.backends) def class_ifft(inarr, outarr): ifft_class = pycbc.fft.IFFT(inarr, outarr) ifft_class.execute() outty = type(outarr) outzer = pycbc.types.zeros(len(outarr)) # If we give an output array that is wrong only in length, # raise ValueError: out_badlen = outty(pycbc.types.zeros(len(outarr)+1), dtype=outarr.dtype, **other_args) args = [inarr, out_badlen] tc.assertRaises(ValueError, pycbc.fft.ifft, *args) tc.assertRaises(ValueError, class_ifft, *args) # If we give an output array that has the wrong precision, # raise ValueError: out_badprec = outty(outzer, dtype=_other_prec[dtype(outarr).type], **other_args) args = [inarr, out_badprec] tc.assertRaises(ValueError, pycbc.fft.ifft, *args) tc.assertRaises(ValueError, class_ifft, *args) # If we give an output array that has the wrong kind (real or complex) # but correct precision, then raise a ValueError. Here we must adjust # the kind of the *input* array, not output. But that makes it hard, # because the 'other_args' parameter will be wrong for that. # Very hacky, but oh well... new_args = other_args.copy() if new_args != {}: try: delta = new_args.pop('delta_t') new_args.update({'delta_f' : delta}) except KeyError: delta = new_args.pop('delta_f') new_args.update({'delta_t' : delta}) in_badkind = type(inarr)(pycbc.types.zeros(len(inarr)), dtype=_bad_dtype[dtype(outarr).type], **new_args) args = [in_badkind, outarr] # This will run pycbc.fft.ifft(in_badkind, outarr) if str(outarr.dtype) not in ['complex64', 'complex128']: tc.assertRaises((ValueError, KeyError), pycbc.fft.ifft, *args) tc.assertRaises((ValueError, KeyError), class_ifft, *args) # If we give an output array that isn't a PyCBC type, raise TypeError: out_badtype = numpy.zeros(len(outarr), dtype=outarr.dtype) args = [inarr, out_badtype] tc.assertRaises(TypeError, pycbc.fft.ifft, *args) tc.assertRaises(TypeError, class_ifft, *args) # If we give an input array that isn't a PyCBC type, raise TypeError: in_badtype = numpy.zeros(len(inarr), dtype=inarr.dtype) args = [in_badtype, outarr] tc.assertRaises(TypeError, pycbc.fft.ifft, *args) tc.assertRaises(TypeError, class_ifft, *args) class _BaseTestFFTClass(unittest.TestCase): """ This is the base class from which unit tests for all FFT backends are derived. """ __test__ = False def setUp(self): # Dictionary to convert a dtype to a relative precision to test self.tdict = { float32: 1e-6, float64: 1e-14, complex64: 1e-6, complex128: 1e-14} if self.backends[0] == 'mkl': # MKL precision is not as high self.tdict = { float32: 1e-4, float64: 1e-6, complex64: 1e-4, complex128: 1e-6} # Next we set up various lists that are used to build our 'known' # test, which are repeated for a variety of different precisions # and basic types. All of the lists should be consistent with a # direct calculation in the formulas of the "What FFTW actually # computes" section of the FFTW manual. # First, R2C transforms. We have both even and odd length inputs, # since the appropriate values for the output lengths vary. self.in_r2c_e = [1.0,-1.0,2.0,-2.0] self.out_r2c_e = [0.0+0.0j,-1.0-1.0j,6.0+0.0j] self.in_r2c_o = [1.0,2.0,2.0] self.out_r2c_o = [5.0+0.0j,-1.0+0.0j] # Next, C2R transforms, again for both even and odd lengths self.in_c2r_e = [0.0+0.0j,-1.0-1.0j,6.0+0.0j] self.out_c2r_e = [4.0, -4.0, 8.0, -8.0] self.in_c2r_o = [5.0+0.0j,-1.0+0.0j] self.out_c2r_o = [3.0,6.0,6.0] # Finally, C2C transforms, where we don't do both even and odd, # but do have different lists for fwd and rev (i.e., fft and ifft) self.in_c2c_fwd = [1.0+1.0j,2.0-2.0j] self.out_c2c_fwd = [3.0-1.0j,-1.0+3.0j] self.in_c2c_rev = [3.0-1.0j,-1.0+3.0j] self.out_c2c_rev = [2.0+2.0j,4.0-4.0j] # For Time/FrequencySeries, we want to test with a non-trivial epoch self.epoch = LTG(3,4) # When we need a delta_t or delta_f for input, use this. # Output-appropriate variable is computed. self.delta = 1.0/4096.0 # Length of our random arrays, for both real and complex arrays. self.rand_len_r = 2046 self.rand_len_c = 1024 def test_fwd_real_arr(self): for fwd_dtype in [float32,float64]: # Even input inarr = ar(self.in_r2c_e,dtype=fwd_dtype) outexp = ar(self.out_r2c_e,dtype=_other_kind[fwd_dtype]) _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Odd input inarr = ar(self.in_r2c_o,dtype=fwd_dtype) outexp = ar(self.out_r2c_o,dtype=_other_kind[fwd_dtype]) _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Random rand_inarr = ar(zeros(self.rand_len_r,dtype=fwd_dtype)) rand_outarr = ar(zeros(self.rand_len_c,dtype=_other_kind[fwd_dtype])) _test_random(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # Clean these up since they could be big: del rand_inarr del rand_outarr # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). _test_raise_excep_fft(self,inarr,outexp) def test_fwd_real_ts(self): for fwd_dtype in [float32,float64]: delta_t = self.delta # Even input inarr = ts(self.in_r2c_e,dtype=fwd_dtype,delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(inarr.delta_t * len(inarr)) outexp = fs(self.out_r2c_e,dtype=_other_kind[fwd_dtype],delta_f=delta_f,epoch=self.epoch) outexp *= delta_t _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Odd input inarr = ts(self.in_r2c_o,dtype=fwd_dtype,delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(inarr.delta_t * len(inarr)) outexp = fs(self.out_r2c_o,dtype=_other_kind[fwd_dtype],delta_f=delta_f,epoch=self.epoch) outexp *= delta_t _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Random rand_inarr = ts(zeros(self.rand_len_r,dtype=fwd_dtype),epoch=self.epoch,delta_t=delta_t) delta_f = 1.0/(rand_inarr.delta_t * len(rand_inarr)) rand_outarr = fs(zeros(self.rand_len_c,dtype=_other_kind[fwd_dtype]),epoch=self.epoch, delta_f=delta_f) _test_random(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # Reuse random arrays for the LAL tests: #_test_lal_tf_fft(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # Clean these up since they could be big: del rand_inarr del rand_outarr # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_f": self.delta, "epoch": self.epoch} _test_raise_excep_fft(self,inarr,outexp,output_args) def test_fwd_real_fs(self): for fwd_dtype in [float32,float64]: delta_f = self.delta # Even input inarr = fs(self.in_r2c_e,dtype=fwd_dtype,delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(inarr.delta_f * len(inarr)) outexp = ts(self.out_r2c_e,dtype=_other_kind[fwd_dtype],delta_t=delta_t,epoch=self.epoch) outexp *= delta_f _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Odd input inarr = fs(self.in_r2c_o,dtype=fwd_dtype,delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(inarr.delta_f * len(inarr)) outexp = ts(self.out_r2c_o,dtype=_other_kind[fwd_dtype],delta_t=delta_t,epoch=self.epoch) outexp *= delta_f _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Random rand_inarr = fs(zeros(self.rand_len_r,dtype=fwd_dtype),epoch=self.epoch,delta_f=delta_f) delta_t = 1.0/(rand_inarr.delta_f * len(rand_inarr)) rand_outarr = ts(zeros(self.rand_len_c,dtype=_other_kind[fwd_dtype]),epoch=self.epoch, delta_t=delta_t) _test_random(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # LAL doesn't have forward FFT funcs starting from a FS, so skip _test_lal # Clean these up since they could be big: del rand_inarr del rand_outarr # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_t": self.delta, "epoch": self.epoch} _test_raise_excep_fft(self,inarr,outexp,output_args) def test_rev_real_arr(self): for rev_dtype in [float32,float64]: # Even input inarr = ar(self.in_c2r_e,dtype=_other_kind[rev_dtype]) outexp = ar(self.out_c2r_e,dtype=rev_dtype) _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Odd input inarr = ar(self.in_c2r_o,dtype=_other_kind[rev_dtype]) outexp = ar(self.out_c2r_o,dtype=rev_dtype) _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Random---we don't do that in 'reverse' tests, since both # directions are already tested in forward, and if we just passed # in arrays in the other order we'd only get exceptions # # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). _test_raise_excep_ifft(self,inarr,outexp) def test_rev_real_ts(self): for rev_dtype in [float32,float64]: delta_t = self.delta # Even input inarr = ts(self.in_c2r_e,dtype=_other_kind[rev_dtype],delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(delta_t*len(self.out_c2r_e)) outexp = fs(self.out_c2r_e,dtype=rev_dtype,delta_f=delta_f,epoch=self.epoch) outexp *= delta_t _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Odd input inarr = ts(self.in_c2r_o,dtype=_other_kind[rev_dtype],delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(delta_t*len(self.out_c2r_o)) outexp = fs(self.out_c2r_o,dtype=rev_dtype,delta_f=delta_f,epoch=self.epoch) outexp *= delta_t _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Random---we don't do that in 'reverse' tests, since both # directions are already tested in forward, and if we just passed # in arrays in the other order we'd only get exceptions # # LAL doesn't have reverse FFT funcs starting from a TimeSeries, so # we skip those tests as well. # # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_f": self.delta, "epoch": self.epoch} _test_raise_excep_ifft(self,inarr,outexp,output_args) def test_rev_real_fs(self): for rev_dtype in [float32,float64]: delta_f = self.delta # Even input inarr = fs(self.in_c2r_e,dtype=_other_kind[rev_dtype],delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(delta_f*len(self.out_c2r_e)) outexp = ts(self.out_c2r_e,dtype=rev_dtype,delta_t=delta_t,epoch=self.epoch) outexp *= delta_f _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Odd input inarr = fs(self.in_c2r_o,dtype=_other_kind[rev_dtype],delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(delta_f*len(self.out_c2r_o)) outexp = ts(self.out_c2r_o,dtype=rev_dtype,delta_t=delta_t,epoch=self.epoch) outexp *= delta_f _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_t": self.delta, "epoch": self.epoch} _test_raise_excep_ifft(self,inarr,outexp,output_args) def test_fwd_complex_arr(self): for fwd_dtype in [complex64,complex128]: # Don't do separate even/odd tests for complex inarr = ar(self.in_c2c_fwd,dtype=fwd_dtype) outexp = ar(self.out_c2c_fwd,dtype=fwd_dtype) _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Random rand_inarr = ar(zeros(self.rand_len_c,dtype=fwd_dtype)) rand_outarr = ar(zeros(self.rand_len_c,dtype=fwd_dtype)) _test_random(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # Clean these up since they could be big: del rand_inarr del rand_outarr # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). _test_raise_excep_fft(self,inarr,outexp) def test_fwd_complex_ts(self): for fwd_dtype in [complex64,complex128]: delta_t = self.delta # Don't do separate even/odd tests for complex inarr = ts(self.in_c2c_fwd,dtype=fwd_dtype,delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(delta_t * len(inarr)) outexp = fs(self.out_c2c_fwd,dtype=fwd_dtype,delta_f=delta_f,epoch=self.epoch) outexp *= delta_t _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Random rand_inarr = ts(zeros(self.rand_len_c,dtype=fwd_dtype),delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(delta_t*len(rand_inarr)) rand_outarr = fs(zeros(self.rand_len_c,dtype=fwd_dtype),delta_f=delta_f,epoch=self.epoch) _test_random(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # Reuse random arrays for the LAL tests: # COMMENTED OUT: The LAL Complex TimeFreqFFT and FreqTimeFFT functions perform # a repacking of data because they seem to assume that the array represents both # positive and negative frequencies. We don't do this, so we don't compare. #_test_lal_tf_fft(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # Clean these up since they could be big: del rand_inarr del rand_outarr # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_f": self.delta, "epoch": self.epoch} _test_raise_excep_fft(self,inarr,outexp,output_args) def test_fwd_complex_fs(self): for fwd_dtype in [complex64,complex128]: delta_f = self.delta # Don't do separate even/odd tests for complex inarr = fs(self.in_c2c_fwd,dtype=fwd_dtype,delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(delta_f * len(inarr)) outexp = ts(self.out_c2c_fwd,dtype=fwd_dtype,delta_t=delta_t,epoch=self.epoch) outexp *= delta_f _test_fft(self,inarr,outexp,self.tdict[fwd_dtype]) # Random rand_inarr = fs(zeros(self.rand_len_c,dtype=fwd_dtype),delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(delta_t*len(rand_inarr)) rand_outarr = ts(zeros(self.rand_len_c,dtype=fwd_dtype),delta_t=delta_t,epoch=self.epoch) _test_random(self,rand_inarr,rand_outarr,self.tdict[fwd_dtype]) # LAL doesn't have forward FFT funcs starting from a FS, so skip _test_lal # Clean these up since they could be big: del rand_inarr del rand_outarr # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_t": self.delta, "epoch": self.epoch} _test_raise_excep_fft(self,inarr,outexp,output_args) def test_rev_complex_arr(self): for rev_dtype in [complex64,complex128]: # Don't do separate even/odd tests for complex inarr = ar(self.in_c2c_rev,dtype=rev_dtype) outexp = ar(self.out_c2c_rev,dtype=rev_dtype) _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Random---we don't do that in 'reverse' tests, since both # directions are already tested in forward, and if we just passed # in arrays in the other order we'd only get exceptions # # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). _test_raise_excep_ifft(self,inarr,outexp) def test_rev_complex_ts(self): for rev_dtype in [complex64,complex128]: delta_t = self.delta # Don't do separate even/odd tests for complex inarr = ts(self.in_c2c_rev,dtype=rev_dtype,delta_t=delta_t,epoch=self.epoch) delta_f = 1.0/(delta_t*len(self.out_c2c_rev)) outexp = fs(self.out_c2c_rev,dtype=rev_dtype,delta_f=delta_f,epoch=self.epoch) outexp *= delta_t _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Random---we don't do that in 'reverse' tests, since both # directions are already tested in forward, and if we just passed # in arrays in the other order we'd only get exceptions # # LAL doesn't have reverse FFT funcs starting from a TimeSeries, so # we skip those tests as well. # # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_f": self.delta, "epoch": self.epoch} _test_raise_excep_ifft(self,inarr,outexp,output_args) def test_rev_complex_fs(self): for rev_dtype in [complex64,complex128]: delta_f = self.delta # Don't do separate even/odd tests for complex inarr = fs(self.in_c2c_rev,dtype=rev_dtype,delta_f=delta_f,epoch=self.epoch) delta_t = 1.0/(delta_f*len(self.out_c2c_rev)) outexp = ts(self.out_c2c_rev,dtype=rev_dtype,delta_t=delta_t,epoch=self.epoch) outexp *= delta_f _test_ifft(self,inarr,outexp,self.tdict[rev_dtype]) # Random---we don't do that in 'reverse' tests, since both # directions are already tested in forward, and if we just passed # in arrays in the other order we'd only get exceptions # # However, we do still generate the arrays for T/F series, so that we may # do the LAL comparison test. As usual, we then delete those arrays. # # COMMENTED OUT: The LAL Complex TimeFreqFFT and FreqTimeFFT functions perform # a repacking of data because they seem to assume that the array represents both # positive and negative frequencies. We don't do this, so we don't compare. #rand_inarr = fs(zeros(self.rand_len_c,dtype=rev_dtype),epoch=self.epoch, # delta_f=self.delta) #rand_outarr = ts(zeros(self.rand_len_c,dtype=rev_dtype),epoch=self.epoch, # delta_t=self.delta) #_test_lal_tf_ifft(self,rand_inarr,rand_outarr,self.tdict[rev_dtype]) #del rand_inarr #del rand_outarr # # Check that exceptions are raised. Need input and # output arrays; just reuse inarr and outexp (values won't # matter, we're just checking exceptions). output_args = {"delta_t": self.delta, "epoch": self.epoch} _test_raise_excep_ifft(self,inarr,outexp,output_args)
37,722
47.737726
101
py
pycbc
pycbc-master/test/test_spatmplt.py
# Copyright (C) 2012 Alex Nitz, Josh Willis # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.waveform module """ import unittest from pycbc.types import zeros, complex64 from pycbc.filter import overlap from pycbc.waveform import get_fd_waveform, get_waveform_filter from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("Waveform") class TestSPAtmplt(unittest.TestCase): def setUp(self,*args): self.context = _context self.scheme = _scheme def test_spatmplt(self): fl = 25 delta_f = 1.0 / 256 for m1 in [1, 1.4, 20]: for m2 in [1.4, 20]: for s1 in [-2, -1, -0.5, 0, 0.5, 1, 2]: for s2 in [-2, -1, -0.5, 0, 0.5, 1, 2]: # Generate TaylorF2 from lalsimulation, restricting to the capabilities of spatmplt hpr,_ = get_fd_waveform( mass1=m1, mass2=m2, spin1z=s1, spin2z=s2, delta_f=delta_f, f_lower=fl, approximant="TaylorF2", amplitude_order=0, spin_order=-1, phase_order=-1) hpr=hpr.astype(complex64) with self.context: # Generate the spatmplt waveform out = zeros(len(hpr), dtype=complex64) hp = get_waveform_filter(out, mass1=m1, mass2=m2, spin1z=s1, spin2z=s2, delta_f=delta_f, f_lower=fl, approximant="SPAtmplt", amplitude_order=0, spin_order=-1, phase_order=-1) # Check the diff is sane mag = abs(hpr).sum() diff = abs(hp - hpr).sum() / mag self.assertTrue(diff < 0.01) # Point to point overlap (no phase or time maximization) o = overlap(hp, hpr) self.assertAlmostEqual(1.0, o, places=4) print("checked m1: %s m2:: %s s1z: %s s2z: %s] overlap = %s, diff = %s" % (m1, m2, s1, s2, o, diff)) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSPAtmplt)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
3,434
41.9375
128
py
pycbc
pycbc-master/test/test_tmpltbank.py
# Copyright (C) 2013 Ian Harry # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ============================================================================= # # Preamble # # ============================================================================= # """ These are the unittests for the pycbc.tmpltbank module """ import math import numpy from astropy.utils.data import download_file import pycbc.tmpltbank # Old LigoLW output functions are not imported at tmpltbank level import pycbc.tmpltbank.bank_output_utils as llw_output import pycbc.psd import pycbc.pnutils from pycbc import pnutils from pycbc.types import Array from pycbc.filter import match from pycbc.waveform import get_fd_waveform import matplotlib matplotlib.use('Agg') import unittest from utils import parse_args_cpu_only, simple_exit # This will return whatever is appropriate, depending on whether this # particular instance of the unittest was called for CPU, CUDA, or OpenCL parse_args_cpu_only("Template bank module") import argparse parser = argparse.ArgumentParser() DATA_FILE_URL = 'https://github.com/gwastro/pycbc-config/raw/master/test_data_files/{}' def update_mass_parameters(tmpltbank_class): """ Choose various sets of mass parameters for testing. """ num_comp_masses = 3 min_mass1 = [1,2,6] max_mass1 = [5,8,12] min_mass2 = [1,1,1] max_mass2 = [5,5,5] num_tot_masses = 3 # These *must* be provided min_tot_mass = [None, 2.5, 3.5] max_tot_mass = [None, 11, 7.5] num_chirp_masses = 3 max_chirp_mass = [None, 2.43, 3.5] min_chirp_mass = [None, 1.218, 2.43] num_etas = 3 max_eta = [0.25, 0.24, 0.23] min_eta = [None, 0.16, 0.17] max_iter_idx = num_comp_masses * num_tot_masses *\ num_chirp_masses * num_etas for idx in range(max_iter_idx): comp_masses_idx = idx % num_comp_masses tmpltbank_class.min_mass1 = min_mass1[comp_masses_idx] tmpltbank_class.max_mass1 = max_mass1[comp_masses_idx] tmpltbank_class.min_mass2 = min_mass2[comp_masses_idx] tmpltbank_class.max_mass2 = max_mass2[comp_masses_idx] reduced_idx = idx // num_comp_masses tot_mass_idx = reduced_idx % num_tot_masses tmpltbank_class.min_total_mass = min_tot_mass[tot_mass_idx] tmpltbank_class.max_total_mass = max_tot_mass[tot_mass_idx] reduced_idx = reduced_idx // num_tot_masses chirp_mass_idx = reduced_idx % num_chirp_masses tmpltbank_class.min_chirp_mass = min_chirp_mass[chirp_mass_idx] tmpltbank_class.max_chirp_mass = max_chirp_mass[chirp_mass_idx] reduced_idx = reduced_idx // num_chirp_masses eta_idx = reduced_idx tmpltbank_class.max_eta = max_eta[eta_idx] tmpltbank_class.min_eta = min_eta[eta_idx] yield idx return class TmpltbankTestClass(unittest.TestCase): def setUp(self): self.deltaF = 0.1 self.f_low = 15 self.f_upper = 2000 self.f0 = 70 self.sampleRate = 4096 self.pnOrder = 'threePointFivePN' self.min_mass1 = 1 self.min_mass2 = 1 self.max_mass1 = 5 self.max_mass2 = 5 self.max_ns_spin_mag = 0.5 self.max_bh_spin_mag = 0.9 self.ns_bh_boundary_mass = 2.0 self.min_total_mass = 2.5 self.max_total_mass = 6.0 self.max_chirp_mass = 2.4375415772291475 self.min_chirp_mass = 1.2187707886145738 self.max_eta = 0.24 self.min_eta = 0.16 # Sanity check these pycbc.tmpltbank.verify_mass_range_options(self, parser=parser) # Need to use F2 metric for ethinca self.ethincaOrder = 'threePointFivePN' self.ethincaCutoff = 'SchwarzISCO' self.ethincaFreqStep = 10. self.segLen = 1./self.deltaF self.psdSize = int(self.segLen * self.sampleRate / 2.) + 1 apy_fname = download_file( DATA_FILE_URL.format('ZERO_DET_high_P.txt'), cache=True ) self.psd = pycbc.psd.from_txt(apy_fname, self.psdSize, self.deltaF, self.f_low, is_asd_file=True) match_psd_size = int(256 * self.sampleRate / 2.) + 1 self.psd_for_match = pycbc.psd.from_txt(apy_fname, match_psd_size, 1./256., self.f_low, is_asd_file=True) metricParams = pycbc.tmpltbank.metricParameters(self.pnOrder,\ self.f_low, self.f_upper, self.deltaF, self.f0) metricParams.psd = self.psd massRangeParams = pycbc.tmpltbank.massRangeParameters(self.min_mass1,\ self.max_mass1, self.min_mass2, self.max_mass2,\ maxNSSpinMag=self.max_ns_spin_mag,\ maxBHSpinMag=self.max_bh_spin_mag,\ maxTotMass=self.max_total_mass,\ minTotMass=self.min_total_mass,\ max_chirp_mass=self.max_chirp_mass,\ min_chirp_mass=self.min_chirp_mass,\ maxEta=self.max_eta,\ minEta=self.min_eta,\ ns_bh_boundary_mass=self.ns_bh_boundary_mass) # And again with the nsbh flag massRangeParams2 = pycbc.tmpltbank.massRangeParameters(self.min_mass1,\ self.max_mass1, self.min_mass2, self.max_mass2,\ maxNSSpinMag=self.max_ns_spin_mag,\ maxBHSpinMag=self.max_bh_spin_mag,\ maxTotMass=self.max_total_mass,\ minTotMass=self.min_total_mass,\ max_chirp_mass=self.max_chirp_mass,\ min_chirp_mass=self.min_chirp_mass,\ maxEta=self.max_eta,\ minEta=self.min_eta,\ nsbhFlag=True) metricParams = pycbc.tmpltbank.determine_eigen_directions(metricParams) vals=pycbc.tmpltbank.estimate_mass_range(100000, massRangeParams,\ metricParams, self.f_upper, covary=False) cov = numpy.cov(vals) _,self.evecsCV = numpy.linalg.eig(cov) metricParams.evecsCV = {} metricParams.evecsCV[self.f_upper] = self.evecsCV vals=pycbc.tmpltbank.estimate_mass_range(100000, massRangeParams,\ metricParams, self.f_upper, covary=False) self.metricParams = metricParams self.massRangeParams = massRangeParams self.massRangeParams2 = massRangeParams2 self.ethincaParams = pycbc.tmpltbank.ethincaParameters( self.ethincaOrder, self.ethincaCutoff, self.ethincaFreqStep, full_ethinca=False, time_ethinca=False) self.xis = vals def test_eigen_directions(self): fname='stockEvals.dat' apy_fname = download_file(DATA_FILE_URL.format(fname), cache=False) evalsStock = Array(numpy.loadtxt(apy_fname)) fname='stockEvecs.dat' apy_fname = download_file(DATA_FILE_URL.format(fname), cache=False) evecsStock = Array(numpy.loadtxt(apy_fname)) maxEval = max(evalsStock) evalsCurr = Array(self.metricParams.evals[self.f_upper]) evecsCurr = Array(self.metricParams.evecs[self.f_upper]) # Uncomment these lines to regenerate the data files #numpy.savetxt('newEvals.dat', evalsCurr) #numpy.savetxt('newEvecs.dat', evecsCurr) errMsg = "pycbc.tmpltbank.determine_eigen_directions has failed " errMsg += "sanity check." evalsDiff = abs(evalsCurr - evalsStock)/maxEval self.assertTrue(not (evalsDiff > 1E-5).any(), msg=errMsg) for stock,test in zip(evecsStock.data,evecsCurr.data): stockScaled = stock * evalsCurr.data**0.5 testScaled = test * evalsCurr.data**0.5 diff = stockScaled - testScaled self.assertTrue(not (diff > 1E-4).any(), msg=errMsg) def test_get_random_mass(self): # Want to do this for a variety of mass combinations for i in update_mass_parameters(self): curr_min_mass = self.min_total_mass curr_max_mass = self.max_total_mass try: pycbc.tmpltbank.verify_mass_range_options(self, parser=parser) except ValueError: # Some of the inputs are unphysical and will fail. # These cases are known to fail, the inputs are unphysical # 35 has inconsistent total mass and eta restrictions # 38 Component mass, [upper] chirp mass and [lower] eta limits # rule out the entire space. # 41 Same as 38 # 44 Same as 38 # 62 From component mass and total mass limits only total masses # between 7 and 7.5 are possible. This range all has eta # lower than the limit of 0.17. # 65 Same as 38 # 68 Same as 38 # 71 Same as 38 # 80 Same as 62 if i in [35,38,41,44,62,65,68,71,80]: continue raise # Check that if the mass limits have changed, it was right to do so # This is not exhaustive, but gets most things if (curr_min_mass is None) or \ not math.isclose(self.min_total_mass, curr_min_mass, rel_tol=1e-06): min_comp_mass = self.min_mass1 + self.min_mass2 min_eta = self.min_mass1 * self.min_mass2 /\ (min_comp_mass * min_comp_mass) min_chirp_mass = min_comp_mass * min_eta**(3./5.) if (min_comp_mass is not None) and \ math.isclose(self.min_total_mass, min_comp_mass, rel_tol=1e-06): # Okay, the total mass is changed by the components pass elif (self.min_eta and min_eta < self.min_eta) or \ (self.max_eta and min_eta > self.max_eta): # Okay, not possible from eta pass elif self.min_chirp_mass and \ min_chirp_mass < self.min_chirp_mass: # Okay, not possible from chirp mass pass else: err_msg = "Minimum total mass changed unexpectedly." self.fail(err_msg) if (curr_max_mass is None) or \ not math.isclose(self.max_total_mass, curr_max_mass, rel_tol=1e-06): max_comp_mass = self.max_mass1 + self.max_mass2 max_eta = self.max_mass1 * self.max_mass2 /\ (max_comp_mass * max_comp_mass) max_chirp_mass = max_comp_mass * max_eta**(3./5.) if (max_comp_mass is not None) and \ math.isclose(self.max_total_mass, max_comp_mass, rel_tol=1e-06): # Okay, the total mass is changed by the components pass elif (self.min_eta and max_eta < self.min_eta) or\ (self.max_eta and max_eta > self.max_eta): # Okay, not possible from eta pass elif self.max_chirp_mass and \ max_chirp_mass > self.max_chirp_mass: # Okay, not possible from chirp mass pass else: err_msg = "Maximum total mass changed unexpectedly." self.fail(err_msg) massRangeParams = pycbc.tmpltbank.massRangeParameters(\ self.min_mass1,\ self.max_mass1, self.min_mass2, self.max_mass2,\ maxNSSpinMag=self.max_ns_spin_mag,\ maxBHSpinMag=self.max_bh_spin_mag,\ maxTotMass=self.max_total_mass,\ minTotMass=self.min_total_mass,\ max_chirp_mass=self.max_chirp_mass,\ min_chirp_mass=self.min_chirp_mass,\ maxEta=self.max_eta,\ minEta=self.min_eta,\ ns_bh_boundary_mass=self.ns_bh_boundary_mass) # And again with the nsbh flag massRangeParams2 = pycbc.tmpltbank.massRangeParameters(\ self.min_mass1,\ self.max_mass1, self.min_mass2, self.max_mass2,\ maxNSSpinMag=self.max_ns_spin_mag,\ maxBHSpinMag=self.max_bh_spin_mag,\ maxTotMass=self.max_total_mass,\ minTotMass=self.min_total_mass,\ max_chirp_mass=self.max_chirp_mass,\ min_chirp_mass=self.min_chirp_mass,\ maxEta=self.max_eta,\ minEta=self.min_eta,\ nsbhFlag=True) mass1, mass2, spin1z, spin2z = \ pycbc.tmpltbank.get_random_mass(100000, massRangeParams) mass = mass1 + mass2 errMsg = "pycbc.tmpltbank.get_random_mass returns invalid ranges." self.assertTrue(not (mass < self.min_total_mass).any(),msg=errMsg) self.assertTrue(not (mass > self.max_total_mass).any(),msg=errMsg) self.assertTrue(not (mass1 > self.max_mass1 * 1.001).any(), msg=errMsg) self.assertTrue(not (mass1 < self.min_mass1 * 0.999).any(), msg=errMsg) self.assertTrue(not (mass2 > self.max_mass2 * 1.001).any(), msg=errMsg) self.assertTrue(not (mass2 < self.min_mass2 * 0.999).any(), msg=errMsg) self.assertTrue(not (mass1 < mass2).any(),msg=errMsg) # Chirp mass and eta mchirp, eta = pnutils.mass1_mass2_to_mchirp_eta(mass1,mass2) if self.max_chirp_mass: self.assertTrue(not (mchirp > self.max_chirp_mass*1.0001).any(), msg=errMsg) if self.min_chirp_mass: self.assertTrue(not (mchirp < self.min_chirp_mass*0.9999).any(), msg=errMsg) if self.min_eta: self.assertTrue(not (eta < self.min_eta*0.9999).any(), msg=errMsg) self.assertTrue(not (eta > self.max_eta*1.0001).any(), msg=errMsg) nsSpin1 = spin1z[mass1 < self.ns_bh_boundary_mass] nsSpin2 = spin2z[mass2 < self.ns_bh_boundary_mass] bhSpin1 = spin1z[mass1 > self.ns_bh_boundary_mass] bhSpin2 = spin2z[mass2 > self.ns_bh_boundary_mass] self.assertTrue(not (abs(nsSpin1) > 0.5).any(), msg=errMsg) self.assertTrue(not (abs(nsSpin2) > 0.5).any(), msg=errMsg) self.assertTrue(not (abs(bhSpin1) > 0.9).any(), msg=errMsg) self.assertTrue(not (abs(bhSpin2) > 0.9).any(), msg=errMsg) # Check that *some* spins are bigger than 0.5 if len(bhSpin1): self.assertTrue((abs(bhSpin1) > 0.5).any(), msg=errMsg) if len(bhSpin2): self.assertTrue((abs(bhSpin2) > 0.5).any(), msg=errMsg) # Check nsbh flag mass1, mass2, spin1z, spin2z = \ pycbc.tmpltbank.get_random_mass(100000, massRangeParams2) self.assertTrue(not (abs(spin1z) > 0.9).any(), msg=errMsg) self.assertTrue(not (abs(spin2z) > 0.5).any(), msg=errMsg) self.assertTrue((abs(spin1z) > 0.5).any(), msg=errMsg) def test_metric_match_prediction(self): mass1a, mass2a, spin1za, spin2za = \ pycbc.tmpltbank.get_random_mass(10, self.massRangeParams) mass1b, mass2b, spin1zb, spin2zb = \ pycbc.tmpltbank.get_random_mass(10, self.massRangeParams) for idx in range(10): masses1 = [mass1a[idx], mass2a[idx], spin1za[idx], spin2za[idx]] masses2 = [mass1b[idx], mass2b[idx], spin1zb[idx], spin2zb[idx]] dist, _, _ = pycbc.tmpltbank.get_point_distance \ (masses1, masses2, self.metricParams, self.f_upper) opt_dist = 0.02 while dist > opt_dist * 1.01 or dist < opt_dist * 0.99: dist_fac = opt_dist / dist dist_fac = dist_fac**0.5 if dist_fac < 0.01: dist_fac = 0.01 if dist_fac > 2: dist_fac = 2 for idx, curr_mass2 in enumerate(masses2): masses2[idx] = masses1[idx] + \ (curr_mass2 - masses1[idx]) * dist_fac dist, _, _ = pycbc.tmpltbank.get_point_distance \ (masses1, masses2, self.metricParams, self.f_upper) self.assertFalse(numpy.isnan(dist)) htilde1, _ = get_fd_waveform\ (approximant='TaylorF2', mass1=masses1[0], mass2=masses1[1], spin1z=masses1[2], spin2z=masses1[3], delta_f=1.0/256, f_lower=15, f_final=2000) htilde2, _ = get_fd_waveform\ (approximant='TaylorF2', mass1=masses2[0], mass2=masses2[1], spin1z=masses2[2], spin2z=masses2[3], delta_f=1.0/256, f_lower=15, f_final=2000) overlap, _ = match(htilde1, htilde2, psd=self.psd_for_match, low_frequency_cutoff=15) self.assertTrue(overlap > 0.97 and overlap < 0.985) def test_chirp_params(self): chirps=pycbc.tmpltbank.get_chirp_params(2.2, 1.8, 0.2, 0.3, self.metricParams.f0, self.metricParams.pnOrder) fname = 'stockChirps.dat' apy_fname = download_file(DATA_FILE_URL.format(fname), cache=False) stockChirps = numpy.loadtxt(apy_fname) diff = (chirps - stockChirps) / stockChirps errMsg = "Calculated chirp params differ from that expected." self.assertTrue( not (abs(diff) > 1E-4).any(), msg=errMsg) def test_hexagonal_placement(self): arrz = pycbc.tmpltbank.generate_hexagonal_lattice(10, 0, 10, 0, 0.03) arrz = numpy.array(arrz) fname = 'stockHexagonal.dat' apy_fname = download_file(DATA_FILE_URL.format(fname), cache=False) stockGrid = numpy.loadtxt(apy_fname) diff = arrz - stockGrid errMsg = "Calculated lattice differs from that expected." self.assertTrue( not (diff > 1E-4).any(), msg=errMsg) def test_anstar_placement(self): arrz = pycbc.tmpltbank.generate_anstar_3d_lattice(0, 10, 0, 10, 0, \ 10, 0.03) arrz = numpy.array(arrz) fname = 'stockAnstar3D.dat' apy_fname = download_file(DATA_FILE_URL.format(fname), cache=False) stockGrid = numpy.loadtxt(apy_fname) # Uncomment this line to regenerate the data file #numpy.savetxt("new_example.dat", arrz) errMsg = "Calculated lattice differs from that expected." self.assertTrue(len(arrz) == len(stockGrid), msg=errMsg) diff = arrz - stockGrid self.assertTrue( not (diff > 1E-4).any(), msg=errMsg) def test_get_mass_distribution(self): # Just run the function, no checking output pycbc.tmpltbank.get_mass_distribution([1.35,0.239,0.4,-0.2], 2, \ self.massRangeParams, self.metricParams, \ self.f_upper, \ numJumpPoints=123, chirpMassJumpFac=0.0002, \ etaJumpFac=0.009, spin1zJumpFac=0.1, \ spin2zJumpFac=0.2) def test_get_phys_cov_masses(self): evecs = self.metricParams.evecs[self.f_upper] evals = self.metricParams.evals[self.f_upper] masses1 = [2.2,1.8,0.4,0.3] masses2 = [2.21,1.79,0.41,0.29] xis1 = pycbc.tmpltbank.get_cov_params(masses1[0], masses1[1], masses1[2], masses1[3], self.metricParams, self.f_upper) xis2 = pycbc.tmpltbank.get_cov_params(masses2[0], masses2[1], masses2[2], masses2[3], self.metricParams, self.f_upper) testXis = [xis1[0],xis1[1]] b_mtot, b_eta = pnutils.mass1_mass2_to_mtotal_eta(masses2[0], masses2[1]) bestMasses = [b_mtot, b_eta, masses2[2], masses2[3]] bestXis = xis2 output = pycbc.tmpltbank.get_physical_covaried_masses(testXis, \ bestMasses, bestXis, 0.0001, self.massRangeParams, \ self.metricParams, self.f_upper) # Test that returned xis are close enough diff = (output[6][0] - testXis[0])**2 diff += (output[6][1] - testXis[1])**2 errMsg = 'pycbc.tmpltbank.get_physical_covaried_masses ' errMsg += 'failed to find a point within the desired limits.' self.assertTrue( diff < 1E-4,msg=errMsg) # Test that returned masses and xis agree massT = output[0] + output[1] etaT = output[0]*output[1] / (massT*massT) spinSetT = pycbc.pnutils.get_beta_sigma_from_aligned_spins(\ etaT, output[2], output[3]) xisT = pycbc.tmpltbank.get_cov_params(output[0], output[1], output[2], output[3], self.metricParams, self.f_upper) errMsg = "Recovered xis do not agree with those expected." self.assertTrue( abs(xisT[0] - output[6][0]) < 1E-5, msg=errMsg) self.assertTrue( abs(xisT[1] - output[6][1]) < 1E-5, msg=errMsg) self.assertTrue( abs(xisT[2] - output[6][2]) < 1E-5, msg=errMsg) self.assertTrue( abs(xisT[3] - output[6][3]) < 1E-5, msg=errMsg) # Test again with nsbh flag on output = pycbc.tmpltbank.get_physical_covaried_masses(testXis, \ bestMasses, bestXis, 0.0001, self.massRangeParams2, \ self.metricParams, self.f_upper) # Test that returned xis are close enough diff = (output[6][0] - testXis[0])**2 diff += (output[6][1] - testXis[1])**2 errMsg = 'pycbc.tmpltbank.get_physical_covaried_masses ' errMsg += 'failed to find a point within the desired limits.' self.assertTrue( diff < 1E-4,msg=errMsg) # Test that returned masses and xis agree xisT = pycbc.tmpltbank.get_cov_params(output[0], output[1], output[2], output[3], self.metricParams, self.f_upper) errMsg = "Recovered xis do not agree with those expected." self.assertTrue( abs(xisT[0] - output[6][0]) < 1E-5, msg=errMsg) self.assertTrue( abs(xisT[1] - output[6][1]) < 1E-5, msg=errMsg) self.assertTrue( abs(xisT[2] - output[6][2]) < 1E-5, msg=errMsg) self.assertTrue( abs(xisT[3] - output[6][3]) < 1E-5, msg=errMsg) def test_stack_xi_direction(self): # Just run the function, no checking output evecs = self.metricParams.evecs[self.f_upper] evals = self.metricParams.evals[self.f_upper] masses1 = [2.2,1.8,0.4,0.3] masses2 = [2.21,1.79,0.41,0.29] xis1 = pycbc.tmpltbank.get_cov_params(masses1[0], masses1[1], \ masses1[2], masses1[3], self.metricParams, self.f_upper) xis2 = pycbc.tmpltbank.get_cov_params(masses2[0], masses2[1], \ masses2[2], masses2[3], self.metricParams, self.f_upper) testXis = [xis1[0],xis1[1]] b_mtot, b_eta = pnutils.mass1_mass2_to_mtotal_eta(masses2[0], masses2[1]) bestMasses = [b_mtot, b_eta, masses2[2], masses2[3]] bestXis = xis2 depths = pycbc.tmpltbank.stack_xi_direction_brute(testXis, \ bestMasses, bestXis, 3, 0.03, self.massRangeParams, \ self.metricParams, self.f_upper, numIterations=50) def test_point_distance(self): masses1 = [2,2,0.4,0.6] masses2 = [2.02,1.97,0.41,0.59] dist, xis1, xis2 = pycbc.tmpltbank.get_point_distance(masses1, \ masses2, self.metricParams, self.f_upper) diff = abs((dist - 23.3681922039) / dist) errMsg = "Obtained distance does not agree with expected value." self.assertTrue( diff < 1E-5, msg=errMsg) def test_conv_to_sngl(self): # Just run the function, no checking output masses1 = [(2,2,0.4,0.3),(4.01,0.249,0.41,0.29)] llw_output.convert_to_sngl_inspiral_table(masses1, "a") def test_ethinca_calc(self): # Just run the function, no checking output m1 = 2. m2 = 2. s1z = 0. s2z = 0. # ethinca calc breaks unless f0 = fLow self.metricParams.f0 = self.metricParams.fLow output = llw_output.calculate_ethinca_metric_comps( self.metricParams, self.ethincaParams, m1, m2, s1z, s2z) # restore initial f0 value self.metricParams.f0 = self.f0 def tearDown(self): pass suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TmpltbankTestClass)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
26,607
45.84507
87
py
pycbc
pycbc-master/test/test_skymax.py
import copy import unittest import random import os import numpy from numpy import complex128, real, sqrt, sin, cos, angle, ceil, log from numpy import zeros, argmax, array from astropy.utils.data import download_file from pycbc import DYN_RANGE_FAC from pycbc.waveform import get_td_waveform, get_fd_waveform, td_approximants, fd_approximants from pycbc.pnutils import nearest_larger_binary_number from pycbc.types import FrequencySeries, TimeSeries, complex_same_precision_as from pycbc.types import load_frequencyseries from pycbc.filter import sigmasq, overlap_cplx, matched_filter_core from pycbc.filter import compute_max_snr_over_sky_loc_stat from pycbc.filter import compute_max_snr_over_sky_loc_stat_no_phase from pycbc.filter import compute_u_val_for_sky_loc_stat_no_phase from pycbc.filter import compute_u_val_for_sky_loc_stat from pycbc import psd from pycbc import vetoes from utils import parse_args_all_schemes, simple_exit _scheme, _context = parse_args_all_schemes("correlate") expected_results = {} for idx in range(4): expected_results[idx] = {} for jdx in range(4): expected_results[idx][jdx] = {} expected_results[0][0]['Ip_snr'] = 100.0 expected_results[0][0]['Ip_angle'] = -1.88619488652e-18 expected_results[0][0]['Ip_argmax'] = 0 expected_results[0][0]['Ic_snr'] = 98.7349100759 expected_results[0][0]['Ic_angle'] = 1.60960753393 expected_results[0][0]['Ic_argmax'] = 3 expected_results[0][1]['Ip_snr'] = 96.3390579783 expected_results[0][1]['Ip_angle'] = 0.511744420131 expected_results[0][1]['Ip_argmax'] = 1387 expected_results[0][1]['Ic_snr'] = 96.3390579783 expected_results[0][1]['Ic_angle'] = 2.08254074693 expected_results[0][1]['Ic_argmax'] = 1387 expected_results[0][2]['Ip_snr'] = 98.8434423546 expected_results[0][2]['Ip_angle'] = 0.566451407787 expected_results[0][2]['Ip_argmax'] = 1100 expected_results[0][2]['Ic_snr'] = 98.8485523538 expected_results[0][2]['Ic_angle'] = 2.10418718318 expected_results[0][2]['Ic_argmax'] = 1099 expected_results[0][3]['Ip_snr'] = 96.4239530554 expected_results[0][3]['Ip_angle'] = -0.946889162447 expected_results[0][3]['Ip_argmax'] = 1447 expected_results[0][3]['Ic_snr'] = 95.6528566731 expected_results[0][3]['Ic_angle'] = 1.10466400896 expected_results[0][3]['Ic_argmax'] = 1484 expected_results[1][0]['Ip_snr'] = 96.3390579783 expected_results[1][0]['Ip_angle'] = -0.511744420131 expected_results[1][0]['Ip_argmax'] = 260757 expected_results[1][0]['Ic_snr'] = 94.4282712604 expected_results[1][0]['Ic_angle'] = 0.957548192904 expected_results[1][0]['Ic_argmax'] = 260753 expected_results[1][1]['Ip_snr'] = 100.0 expected_results[1][1]['Ip_angle'] = -1.73241699772e-18 expected_results[1][1]['Ip_argmax'] = 0 expected_results[1][1]['Ic_snr'] = 100.0 expected_results[1][1]['Ic_angle'] = 1.57079632679 expected_results[1][1]['Ic_argmax'] = 0 expected_results[1][2]['Ip_snr'] = 97.6397283701 expected_results[1][2]['Ip_angle'] = 0.156578884423 expected_results[1][2]['Ip_argmax'] = 261862 expected_results[1][2]['Ic_snr'] = 97.7584101045 expected_results[1][2]['Ic_angle'] = 1.69481552225 expected_results[1][2]['Ic_argmax'] = 261861 expected_results[1][3]['Ip_snr'] = 99.4434573331 expected_results[1][3]['Ip_angle'] = -1.50330148916 expected_results[1][3]['Ip_argmax'] = 58 expected_results[1][3]['Ic_snr'] = 98.0771994342 expected_results[1][3]['Ic_angle'] = 0.521399663782 expected_results[1][3]['Ic_argmax'] = 94 expected_results[2][0]['Ip_snr'] = 98.8434423546 expected_results[2][0]['Ip_angle'] = -0.566451407787 expected_results[2][0]['Ip_argmax'] = 261044 expected_results[2][0]['Ic_snr'] = 96.8727372348 expected_results[2][0]['Ic_angle'] = 0.921151545297 expected_results[2][0]['Ic_argmax'] = 261041 expected_results[2][1]['Ip_snr'] = 97.6397283701 expected_results[2][1]['Ip_angle'] = -0.156578884423 expected_results[2][1]['Ip_argmax'] = 282 expected_results[2][1]['Ic_snr'] = 97.6397283701 expected_results[2][1]['Ic_angle'] = 1.41421744237 expected_results[2][1]['Ic_argmax'] = 282 expected_results[2][2]['Ip_snr'] = 100.0 expected_results[2][2]['Ip_angle'] = 2.41820532326e-18 expected_results[2][2]['Ip_argmax'] = 0 expected_results[2][2]['Ic_snr'] = 99.988543227 expected_results[2][2]['Ic_angle'] = 1.5377922043 expected_results[2][2]['Ic_argmax'] = 262143 expected_results[2][3]['Ip_snr'] = 97.3725007917 expected_results[2][3]['Ip_angle'] = -1.61086911817 expected_results[2][3]['Ip_argmax'] = 342 expected_results[2][3]['Ic_snr'] = 96.2035744912 expected_results[2][3]['Ic_angle'] = 0.442670138337 expected_results[2][3]['Ic_argmax'] = 379 expected_results[3][0]['Ip_snr'] = 96.4239530554 expected_results[3][0]['Ip_angle'] = 0.946889162447 expected_results[3][0]['Ip_argmax'] = 260697 expected_results[3][0]['Ic_snr'] = 94.4958639934 expected_results[3][0]['Ic_angle'] = 2.41579357775 expected_results[3][0]['Ic_argmax'] = 260693 expected_results[3][1]['Ip_snr'] = 99.4434573331 expected_results[3][1]['Ip_angle'] = 1.50330148916 expected_results[3][1]['Ip_argmax'] = 262086 expected_results[3][1]['Ic_snr'] = 99.4434573331 expected_results[3][1]['Ic_angle'] = 3.07409781595 expected_results[3][1]['Ic_argmax'] = 262086 expected_results[3][2]['Ip_snr'] = 97.3725007917 expected_results[3][2]['Ip_angle'] = 1.61086911817 expected_results[3][2]['Ip_argmax'] = 261802 expected_results[3][2]['Ic_snr'] = 97.3906866656 expected_results[3][2]['Ic_angle'] = -3.11216854627 expected_results[3][2]['Ic_argmax'] = 261802 expected_results[3][3]['Ip_snr'] = 100.0 expected_results[3][3]['Ip_angle'] = 9.03608368726e-19 expected_results[3][3]['Ip_argmax'] = 0 expected_results[3][3]['Ic_snr'] = 99.4335056063 expected_results[3][3]['Ic_angle'] = 2.02876392072 expected_results[3][3]['Ic_argmax'] = 36 def generate_detector_strain(template_params, h_plus, h_cross): polarization = 0 if hasattr(template_params, 'polarization'): polarization = template_params.polarization f_plus = cos(polarization) f_cross = sin(polarization) return h_plus * f_plus + h_cross * f_cross def make_padded_frequency_series(vec, filter_N=None, delta_f=None): """Convert vec (TimeSeries or FrequencySeries) to a FrequencySeries. If filter_N and/or delta_f are given, the output will take those values. If not told otherwise the code will attempt to pad a timeseries first such that the waveform will not wraparound. However, if delta_f is specified to be shorter than the waveform length then wraparound *will* be allowed. """ if filter_N is None: power = ceil(log(len(vec), 2)) + 1 N = 2 ** power else: N = filter_N n = N / 2 + 1 if isinstance(vec, FrequencySeries): vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), delta_f=1.0, copy=False) if len(vectilde) < len(vec): cplen = len(vectilde) else: cplen = len(vec) vectilde[0:cplen] = vec[0:cplen] delta_f = vec.delta_f elif isinstance(vec, TimeSeries): # First determine if the timeseries is too short for the specified df # and increase if necessary curr_length = len(vec) new_length = int(nearest_larger_binary_number(curr_length)) while new_length * vec.delta_t < 1./delta_f: new_length = new_length * 2 vec.resize(new_length) # Then convert to frequencyseries v_tilde = vec.to_frequencyseries() # Then convert frequencyseries to required length and spacing by keeping # only every nth sample if delta_f needs increasing, and cutting at # Nyquist if the max frequency is too high. # NOTE: This assumes that the input and output data is using binary # lengths. i_delta_f = v_tilde.get_delta_f() v_tilde = v_tilde.numpy() df_ratio = int(delta_f / i_delta_f) n_freq_len = int((n-1) * df_ratio +1) assert(n <= len(v_tilde)) df_ratio = int(delta_f / i_delta_f) v_tilde = v_tilde[:n_freq_len:df_ratio] vectilde = FrequencySeries(v_tilde, delta_f=delta_f, dtype=complex128) return FrequencySeries(vectilde * DYN_RANGE_FAC, delta_f=delta_f, dtype=complex128) def get_waveform(wf_params, start_frequency, sample_rate, length, filter_rate, sky_max_template=False): delta_f = filter_rate / float(length) if wf_params.approximant in fd_approximants(): hp, hc = get_fd_waveform(wf_params, delta_f=delta_f, f_lower=start_frequency) elif wf_params.approximant in td_approximants(): hp, hc = get_td_waveform(wf_params, delta_t=1./sample_rate, f_lower=start_frequency) if not sky_max_template: hvec = generate_detector_strain(wf_params, hp, hc) return make_padded_frequency_series(hvec, length, delta_f=delta_f) else: return make_padded_frequency_series(hp, length, delta_f=delta_f), \ make_padded_frequency_series(hc, length, delta_f=delta_f) class DummyClass(object): pass class TestChisq(unittest.TestCase): __test__ = False def setUp(self, *args): # Where are my data files? self.context = _context self.scheme = _scheme self.tolerance = 1e-6 self.filter_t_length = 16 self.low_freq_filter = 30. self.sample_rate = 16384 self.filter_N = int(self.filter_t_length * self.sample_rate) self.filter_n = int(self.filter_N / 2 + 1) self.filter_delta_f = 1.0 / self.filter_t_length self.psd = psd.from_string('aLIGOZeroDetHighPowerGWINC', self.filter_n, self.filter_delta_f, self.low_freq_filter) self.psd *= DYN_RANGE_FAC*DYN_RANGE_FAC wps1 = DummyClass() wps1.mass1 = 123.7627 wps1.mass2 = 72.55471 wps1.inclination = 1.125029 wps1.coa_phase = 2.906049 wps1.approximant='EOBNRv2HM' self.wps1 = wps1 wps2 = DummyClass() wps2.mass1 = 131.460647583 wps2.mass2 = 69.0030059814 wps2.inclination = 0.8432287 wps2.coa_phase = 0.2 wps2.approximant = 'SEOBNRv4_ROM' self.wps2 = wps2 wps3 = copy.deepcopy(wps2) wps3.approximant = 'EOBNRv2HM_ROM' self.wps3 = wps3 wps4 = copy.deepcopy(wps2) wps4.spin1x = 0.8 wps4.spin2y = -0.9 wps4.approximant = 'IMRPhenomPv2' self.wps4 = wps4 self.wps_list = [wps1, wps2, wps3, wps4] self.sm_power_chisq = vetoes.SingleDetSkyMaxPowerChisq(num_bins='1') self.sm_power_chisq2 = vetoes.SingleDetSkyMaxPowerChisq(num_bins='10') self.power_chisq = vetoes.SingleDetPowerChisq(num_bins='1') self.power_chisq2 = vetoes.SingleDetPowerChisq(num_bins='10') def test_filtering(self): idx = self.idx jdx = self.jdx # Uncomment these lines if needing to regenerate data files #w1 = self.wps_list[idx] #w2 = self.wps_list[jdx] #stilde = get_waveform(w1, self.low_freq_filter-1, # self.sample_rate, self.filter_N, # self.sample_rate) #try: # stilde.save('data/skymaxtest_stilde_%d.hdf' % idx) #except: # pass url = ('https://github.com/gwastro/pycbc-config/raw/master/' 'test_data_files/{}') fname = f'skymaxtest_stilde_{idx}.hdf' apy_fname = download_file(url.format(fname), cache=False) # Astropy will not download with the .hdf extension, which we need, # so symlink os.symlink(apy_fname, fname) stilde = load_frequencyseries(fname) os.unlink(fname) s_norm = sigmasq(stilde, psd=self.psd, low_frequency_cutoff=self.low_freq_filter) stilde /= sqrt(float(s_norm)) stilde *= 100 # Uncomment these lines if needing to regenerate data files #hplus, hcross = get_waveform(w2, self.low_freq_filter-1, # self.sample_rate, self.filter_N, # self.sample_rate, sky_max_template=True) #try: # hplus.save('data/skymaxtest_hplus_%d.hdf' % jdx) # hcross.save('data/skymaxtest_hcross_%d.hdf' % jdx) #except: # pass fname = f'skymaxtest_hplus_{jdx}.hdf' apy_fname = download_file(url.format(fname), cache=False) # Astropy will not download with the .hdf extension, which we need, # so symlink os.symlink(apy_fname, fname) hplus = load_frequencyseries(fname) os.unlink(fname) fname = f'skymaxtest_hcross_{jdx}.hdf' apy_fname = download_file(url.format(fname), cache=False) # Astropy will not download with the .hdf extension, which we need, # so symlink os.symlink(apy_fname, fname) hcross = load_frequencyseries(fname) os.unlink(fname) hplus.f_lower = self.low_freq_filter hplus.params = random.randint(0,100000000000) hcross.f_lower = self.low_freq_filter hcross.params = random.randint(0,100000000000) hp_norm = sigmasq(hplus, psd=self.psd, low_frequency_cutoff=self.low_freq_filter) hc_norm = sigmasq(hcross, psd=self.psd, low_frequency_cutoff=self.low_freq_filter) hplus /= sqrt(float(hp_norm)) hcross /= sqrt(float(hc_norm)) hpc_corr = overlap_cplx(hplus, hcross, psd=self.psd, low_frequency_cutoff=self.low_freq_filter, normalized=False) hpc_corr_R = real(hpc_corr) I_plus, corr_plus, n_plus = matched_filter_core\ (hplus, stilde, psd=self.psd, low_frequency_cutoff=self.low_freq_filter, h_norm=1.) # FIXME: Remove the deepcopies before merging with master I_plus = copy.deepcopy(I_plus) corr_plus = copy.deepcopy(corr_plus) I_cross, corr_cross, n_cross = matched_filter_core\ (hcross, stilde, psd=self.psd, low_frequency_cutoff=self.low_freq_filter, h_norm=1.) I_cross = copy.deepcopy(I_cross) corr_cross = copy.deepcopy(corr_cross) I_plus = I_plus * n_plus I_cross = I_cross * n_cross IPM = abs(I_plus.data).argmax() ICM = abs(I_cross.data).argmax() self.assertAlmostEqual(abs(I_plus[IPM]), expected_results[idx][jdx]['Ip_snr']) self.assertAlmostEqual(angle(I_plus[IPM]), expected_results[idx][jdx]['Ip_angle']) self.assertEqual(IPM, expected_results[idx][jdx]['Ip_argmax']) self.assertAlmostEqual(abs(I_cross[ICM]), expected_results[idx][jdx]['Ic_snr']) self.assertAlmostEqual(angle(I_cross[ICM]), expected_results[idx][jdx]['Ic_angle']) self.assertEqual(ICM, expected_results[idx][jdx]['Ic_argmax']) #print "expected_results[{}][{}]['Ip_snr'] = {}" .format(idx,jdx,abs(I_plus[IPM])) #print "expected_results[{}][{}]['Ip_angle'] = {}".format(idx,jdx,angle(I_plus[IPM])) #print "expected_results[{}][{}]['Ip_argmax'] = {}".format(idx,jdx, IPM) #print "expected_results[{}][{}]['Ic_snr'] = {}" .format(idx,jdx,abs(I_cross[ICM])) #print "expected_results[{}][{}]['Ic_angle'] = {}".format(idx,jdx,angle(I_cross[ICM])) #print "expected_results[{}][{}]['Ic_argmax'] = {}".format(idx,jdx, ICM) det_stat_prec = compute_max_snr_over_sky_loc_stat\ (I_plus, I_cross, hpc_corr_R, hpnorm=1., hcnorm=1., thresh=0.1, analyse_slice=slice(0,len(I_plus.data))) det_stat_hom = compute_max_snr_over_sky_loc_stat_no_phase\ (I_plus, I_cross, hpc_corr_R, hpnorm=1., hcnorm=1., thresh=0.1, analyse_slice=slice(0,len(I_plus.data))) idx_max_prec = argmax(det_stat_prec.data) idx_max_hom = argmax(det_stat_hom.data) max_ds_prec = det_stat_prec[idx_max_prec] max_ds_hom = det_stat_hom[idx_max_hom] uvals_prec, _ = compute_u_val_for_sky_loc_stat\ (I_plus.data, I_cross.data, hpc_corr_R, indices=[idx_max_prec], hpnorm=1., hcnorm=1.) with numpy.errstate(divide="ignore"): uvals_hom, _ = compute_u_val_for_sky_loc_stat_no_phase\ (I_plus.data, I_cross.data, hpc_corr_R, indices=[idx_max_hom], hpnorm=1., hcnorm=1.) ht = hplus * uvals_hom[0] + hcross ht_norm = sigmasq(ht, psd=self.psd, low_frequency_cutoff=self.low_freq_filter) ht /= sqrt(float(ht_norm)) ht.f_lower = self.low_freq_filter ht.params = random.randint(0,100000000000) I_t, corr_t, n_t = matched_filter_core\ (ht, stilde, psd=self.psd, low_frequency_cutoff=self.low_freq_filter, h_norm=1.) I_t = I_t * n_t self.assertAlmostEqual(abs(real(I_t.data[idx_max_hom])), max_ds_hom) self.assertEqual(abs(real(I_t.data[idx_max_hom])), max(abs(real(I_t.data)))) with numpy.errstate(invalid='ignore', divide='ignore'): chisq, _ = self.power_chisq.values\ (corr_t, array([max_ds_hom]) / n_plus, n_t, self.psd, array([idx_max_hom]), ht) ht = hplus * uvals_prec[0] + hcross ht_norm = sigmasq(ht, psd=self.psd, low_frequency_cutoff=self.low_freq_filter) ht /= sqrt(float(ht_norm)) ht.f_lower = self.low_freq_filter ht.params = random.randint(0,100000000000) I_t, corr_t, n_t = matched_filter_core\ (ht, stilde, psd=self.psd, low_frequency_cutoff=self.low_freq_filter, h_norm=1.) I_t = I_t * n_t with numpy.errstate(divide="ignore", invalid='ignore'): chisq, _ = self.power_chisq.values\ (corr_t, array([max_ds_prec]) / n_plus, n_t, self.psd, array([idx_max_prec]), ht) self.assertAlmostEqual(abs(I_t.data[idx_max_prec]), max_ds_prec) self.assertEqual(idx_max_prec, abs(I_t.data).argmax()) self.assertTrue(chisq < 1E-4) def skymax_test_maker(class_name, idx, jdx): class Test(class_name): __test__ = True idx = idx jdx = jdx Test.__name__ = "Test %s" % '_'.join([str(idx),str(jdx)]) return Test suite = unittest.TestSuite() for idx in range(4): for jdx in range(4): curr_cls = skymax_test_maker(TestChisq, idx, jdx) vars()[curr_cls.__name__] = curr_cls suite.addTest(unittest.TestLoader().loadTestsFromTestCase(curr_cls)) del curr_cls if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
19,047
40.680525
94
py
pycbc
pycbc-master/test/test_live_coinc_compare.py
"""Mock simulation to easily test and profile PyCBC Live's coincidence code.""" import unittest from types import SimpleNamespace import numpy as np # Some duplicate imports, but I want to copy code without changing it! import numpy, logging, pycbc.pnutils, pycbc.conversions, copy, lal import cProfile from astropy.utils.data import download_file from pycbc import gps_now from pycbc.events.coinc import LiveCoincTimeslideBackgroundEstimator as Coincer import pycbc.events.coinc from utils import simple_exit import validation_code.old_coinc as old_coinc OriginalCoincer = old_coinc.LiveCoincTimeslideBackgroundEstimator class SingleDetTrigSimulator: """An object that simulates single-detector triggers in the same format as produced by the matched-filtering processes of PyCBC Live. """ def __init__(self, num_templates, analysis_chunk, detectors, num_trigs_per_block): self.num_templates = num_templates self.detectors = detectors self.analysis_chunk = analysis_chunk self.start_time = gps_now() self.num_trigs = num_trigs_per_block def get_trigs(self): trigs = {} for det in self.detectors: rand_end = np.random.randint( self.start_time*4096, (self.start_time + self.analysis_chunk)*4096, size=self.num_trigs ) rand_end = (rand_end / 4096.).astype(np.float64) trigs[det] = { "snr": np.random.uniform(4.5, 10, size=self.num_trigs).astype(np.float32), "end_time": rand_end, "chisq": np.random.uniform(0.5, 1.5, size=self.num_trigs).astype(np.float32), "chisq_dof": np.ones(self.num_trigs, dtype=np.int32) * 10, "coa_phase": np.random.uniform(0, 2*np.pi, size=self.num_trigs).astype(np.float32), "sigmasq": np.ones(self.num_trigs, dtype=np.float32), # FIXME (maybe) "template_id": np.random.uniform( 0, self.num_templates, size=self.num_trigs ).astype(np.int32) } self.start_time += self.analysis_chunk return trigs class TestPyCBCLiveCoinc(unittest.TestCase): def setUp(self, *args): # Uncomment for more verbosity # logging.basicConfig(format="%(asctime)s %(message)s", # level=logging.INFO) # simulate the `args` object we normally get from the command line arguments url = 'https://github.com/gwastro/pycbc-config/raw/master/' url += 'test_data_files/{}-PTA_HISTOGRAM.hdf' stat_file_paths = [ download_file(url.format("H1L1"), cache=True), ] args = SimpleNamespace( sngl_ranking="snr", ranking_statistic="phasetd", statistic_files=[stat_file_paths], statistic_keywords=None, timeslide_interval=0.1, background_ifar_limit=100, store_background=True ) # number of templates in the bank self.num_templates = 10 # duration of analysis segment analysis_chunk = 2000 # combination of two detectors to analyze detectors = ["H1", "L1"] # number of single-detector triggers per detector per chunk num_single_trigs = 400 self.num_iterations = 15 # create the single-detector trigger simulator single_det_trig_sim = SingleDetTrigSimulator( self.num_templates, analysis_chunk, detectors, num_single_trigs ) self.new_trigs = [single_det_trig_sim.get_trigs() for _ in range(self.num_iterations)] # create the current "coincer" object self.new_coincer = Coincer.from_cli(args, self.num_templates, analysis_chunk, detectors) # create the validation "coincer" object self.old_coincer = OriginalCoincer.from_cli(args, self.num_templates, analysis_chunk, detectors) def test_coincer_runs(self): # the following loop simulates the "infinite" analysis loop # (though we only do a few iterations here) def assess_same_output(newout, oldout): checkkeys = [ 'background/time', 'background/count', 'background/stat', 'foreground/ifar', 'foreground/stat', 'foreground/type' ] for ifo in ['H1', 'L1']: checkkeys += [ f'foreground/{ifo}/snr', f'foreground/{ifo}/end_time', f'foreground/{ifo}/chisq', f'foreground/{ifo}/chisq_dof', f'foreground/{ifo}/coa_phase', f'foreground/{ifo}/sigmasq', f'foreground/{ifo}/template_id', f'foreground/{ifo}/stat' ] for key in checkkeys: if key not in newout: self.assertTrue(key not in oldout) else: self.assertTrue(key in oldout) if type(newout[key]) is np.ndarray: self.assertTrue(len(newout[key]) == len(oldout[key])) self.assertTrue( numpy.isclose(newout[key], oldout[key]).all() ) else: self.assertTrue(newout[key] == oldout[key]) for i in range(self.num_iterations): logging.info("Iteration %d", i) single_det_trigs = self.new_trigs[i] cres = self.new_coincer.add_singles(single_det_trigs) ocres = self.old_coincer.add_singles(single_det_trigs) assess_same_output(cres, ocres) # Are they the same coincs now? new_coincer = self.new_coincer old_coincer = self.old_coincer self.assertTrue(len(new_coincer.coincs.data) == len(old_coincer.coincs.data)) self.assertTrue(numpy.isclose(new_coincer.coincs.data, old_coincer.coincs.data, rtol=1e-06).all()) for ifo in new_coincer.singles: lgc = True for temp in range(self.num_templates): # Check that all singles, for all templates, are identical lgc = lgc & (new_coincer.singles[ifo].data(temp) == old_coincer.singles[ifo].data(temp)).all() self.assertTrue(lgc) suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPyCBCLiveCoinc)) if __name__ == '__main__': results = unittest.TextTestRunner(verbosity=2).run(suite) simple_exit(results)
6,818
37.965714
110
py
pycbc
pycbc-master/test/validation_code/old_stat.py
# Copyright (C) 2016 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #### NOTE ##### # This code is a verbatim copy of the stat.py code as of 20th August 2022. # It's here to verify that any changes being made to this code are not changing # the physical output ############### # # ============================================================================= # # Preamble # # ============================================================================= # """ This module contains functions for calculating coincident ranking statistic values. """ import logging import numpy from pycbc.events import ranking from pycbc.events import coinc_rate class Stat(object): """Base class which should be extended to provide a coincident statistic""" def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed for some statistics A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, needed for some statistics The list of detector names """ import h5py self.files = {} files = files or [] for filename in files: f = h5py.File(filename, 'r') stat = f.attrs['stat'] if hasattr(stat, 'decode'): stat = stat.decode() if stat in self.files: raise RuntimeError("We already have one file with stat attr =" " %s. Can't provide more than one!" % stat) logging.info("Found file %s for stat %s", filename, stat) self.files[stat] = f # Provide the dtype of the single detector method's output # This is used by background estimation codes that need to maintain # a buffer of such values. self.single_dtype = numpy.float32 # True if a larger single detector statistic will produce a larger # coincident statistic self.single_increasing = True self.ifos = ifos or [] self.sngl_ranking = sngl_ranking self.sngl_ranking_kwargs = {} for key, value in kwargs.items(): if key.startswith('sngl_ranking_'): self.sngl_ranking_kwargs[key[13:]] = value def get_sngl_ranking(self, trigs): """ Returns the ranking for the single detector triggers. Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ return ranking.get_sngls_ranking_from_trigs( trigs, self.sngl_ranking, **self.sngl_ranking_kwargs ) def single(self, trigs): # pylint:disable=unused-argument """ Calculate the necessary single detector information Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ err_msg = "This function is a stub that should be overridden by the " err_msg += "sub-classes. You shouldn't be seeing this error!" raise NotImplementedError(err_msg) def rank_stat_single(self, single_info): """ Calculate the statistic for a single detector candidate Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ err_msg = "This function is a stub that should be overridden by the " err_msg += "sub-classes. You shouldn't be seeing this error!" raise NotImplementedError(err_msg) def rank_stat_coinc(self, s, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. """ err_msg = "This function is a stub that should be overridden by the " err_msg += "sub-classes. You shouldn't be seeing this error!" raise NotImplementedError(err_msg) def _check_coinc_lim_subclass(self, allowed_names): """ Check that we are not using coinc_lim_for_thresh when not valid. coinc_lim_for_thresh is only defined for the statistic it is present in. If we subclass, we must check explicitly that it is still valid and indicate this in the code. If the code does not have this explicit check you will see the failure message here. Parameters ----------- allowed_names : list list of allowed classes for the specific sub-classed method. """ if type(self).__name__ not in allowed_names: err_msg = "This is being called from a subclass which has not " err_msg += "been checked for validity with this method. If it is " err_msg += "valid for the subclass to come here, include in the " err_msg += "list of allowed_names above." raise NotImplementedError(err_msg) def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. """ err_msg = "This function is a stub that should be overridden by the " err_msg += "sub-classes. You shouldn't be seeing this error!" raise NotImplementedError(err_msg) class QuadratureSumStatistic(Stat): """Calculate the quadrature sum coincident detection statistic""" def single(self, trigs): """ Calculate the necessary single detector information Here just the ranking is computed and returned. Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ return self.get_sngl_ranking(trigs) def rank_stat_single(self, single_info): """ Calculate the statistic for a single detector candidate Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ return self.single(single_info[1]) def rank_stat_coinc(self, sngls_list, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. Parameters ---------- sngls_list: list List of (ifo, single detector statistic) tuples slide: (unused in this statistic) step: (unused in this statistic) to_shift: list List of integers indicating what multiples of the time shift will be applied (unused in this statistic) Returns ------- numpy.ndarray Array of coincident ranking statistic values """ cstat = sum(sngl[1] ** 2. for sngl in sngls_list) ** 0.5 # For single-detector "cuts" the single ranking is set to -1 for sngls in sngls_list: cstat[sngls == -1] = 0 return cstat def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. Parameters ---------- s: list List of (ifo, single detector statistic) tuples for all detectors except limifo. thresh: float The threshold on the coincident statistic. limifo: string The ifo for which the limit is to be found. Returns ------- numpy.ndarray Array of limits on the limifo single statistic to exceed thresh. """ # Safety against subclassing and not rethinking this allowed_names = ['QuadratureSumStatistic'] self._check_coinc_lim_subclass(allowed_names) s0 = thresh ** 2. - sum(sngl[1] ** 2. for sngl in s) s0[s0 < 0] = 0 return s0 ** 0.5 class PhaseTDStatistic(QuadratureSumStatistic): """ Statistic that re-weights combined newsnr using coinc parameters. The weighting is based on the PDF of time delays, phase differences and amplitude ratios between triggers in different ifos. """ def __init__(self, sngl_ranking, files=None, ifos=None, pregenerate_hist=True, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, unused here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, needed here The list of detector names pregenerate_hist: bool, optional If False, do not pregenerate histogram on class instantiation. Default is True. """ QuadratureSumStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) self.single_dtype = [ ('snglstat', numpy.float32), ('coa_phase', numpy.float32), ('end_time', numpy.float64), ('sigmasq', numpy.float32), ('snr', numpy.float32) ] # Assign attribute so that it can be replaced with other functions self.has_hist = False self.hist_ifos = None self.ref_snr = 5.0 self.relsense = {} self.swidth = self.pwidth = self.twidth = None self.srbmin = self.srbmax = None self.max_penalty = None self.pdtype = [] self.weights = {} self.param_bin = {} self.two_det_flag = (len(ifos) == 2) self.two_det_weights = {} if pregenerate_hist and not len(ifos) == 1: self.get_hist() def get_hist(self, ifos=None): """ Read in a signal density file for the ifo combination Parameters ---------- ifos: list The list of ifos. Needed if not given when initializing the class instance. """ ifos = ifos or self.ifos selected = None for name in self.files: # Pick out the statistic files that provide phase / time/ amp # relationships and match to the ifos in use if 'phasetd_newsnr' in name: ifokey = name.split('_')[2] num = len(ifokey) / 2 if num != len(ifos): continue match = [ifo in ifokey for ifo in ifos] if False in match: continue selected = name break if selected is None and len(ifos) > 1: raise RuntimeError("Couldn't figure out which stat file to use") logging.info("Using signal histogram %s for ifos %s", selected, ifos) histfile = self.files[selected] self.hist_ifos = histfile.attrs['ifos'] # Patch for pre-hdf5=3.0 histogram files try: logging.info("Decoding hist ifos ..") self.hist_ifos = [i.decode('UTF-8') for i in self.hist_ifos] except (UnicodeDecodeError, AttributeError): pass n_ifos = len(self.hist_ifos) # Histogram bin attributes self.twidth = histfile.attrs['twidth'] self.pwidth = histfile.attrs['pwidth'] self.swidth = histfile.attrs['swidth'] self.srbmin = histfile.attrs['srbmin'] self.srbmax = histfile.attrs['srbmax'] bin_volume = (self.twidth * self.pwidth * self.swidth) ** (n_ifos - 1) self.hist_max = - 1. * numpy.inf # Read histogram for each ifo, to use if that ifo has smallest SNR in # the coinc for ifo in self.hist_ifos: weights = histfile[ifo]['weights'][:] # renormalise to PDF self.weights[ifo] = weights / (weights.sum() * bin_volume) param = histfile[ifo]['param_bin'][:] if param.dtype == numpy.int8: # Older style, incorrectly sorted histogram file ncol = param.shape[1] self.pdtype = [('c%s' % i, param.dtype) for i in range(ncol)] self.param_bin[ifo] = numpy.zeros(len(self.weights[ifo]), dtype=self.pdtype) for i in range(ncol): self.param_bin[ifo]['c%s' % i] = param[:, i] lsort = self.param_bin[ifo].argsort() self.param_bin[ifo] = self.param_bin[ifo][lsort] self.weights[ifo] = self.weights[ifo][lsort] else: # New style, efficient histogram file # param bin and weights have already been sorted self.param_bin[ifo] = param self.pdtype = self.param_bin[ifo].dtype # Max_penalty is a small number to assigned to any bins without # histogram entries. All histograms in a given file have the same # min entry by design, so use the min of the last one read in. self.max_penalty = self.weights[ifo].min() self.hist_max = max(self.hist_max, self.weights[ifo].max()) if self.two_det_flag: # The density of signals is computed as a function of 3 binned # parameters: time difference (t), phase difference (p) and # SNR ratio (s). These are computed for each combination of # detectors, so for detectors 6 differences are needed. However # many combinations of these parameters are highly unlikely and # no instances of these combinations occurred when generating # the statistic files. Rather than storing a bunch of 0s, these # values are just not stored at all. This reduces the size of # the statistic file, but means we have to identify the correct # value to read for every trigger. For 2 detectors we can # expand the weights lookup table here, basically adding in all # the "0" values. This makes looking up a value in the # "weights" table a O(N) rather than O(NlogN) operation. It # sacrifices RAM to do this, so is a good tradeoff for 2 # detectors, but not for 3! if not hasattr(self, 'c0_size'): self.c0_size = {} self.c1_size = {} self.c2_size = {} self.c0_size[ifo] = 2 * (abs(self.param_bin[ifo]['c0']).max() + 1) self.c1_size[ifo] = 2 * (abs(self.param_bin[ifo]['c1']).max() + 1) self.c2_size[ifo] = 2 * (abs(self.param_bin[ifo]['c2']).max() + 1) array_size = [self.c0_size[ifo], self.c1_size[ifo], self.c2_size[ifo]] dtypec = self.weights[ifo].dtype self.two_det_weights[ifo] = \ numpy.zeros(array_size, dtype=dtypec) + self.max_penalty id0 = self.param_bin[ifo]['c0'].astype(numpy.int32) \ + self.c0_size[ifo] // 2 id1 = self.param_bin[ifo]['c1'].astype(numpy.int32) \ + self.c1_size[ifo] // 2 id2 = self.param_bin[ifo]['c2'].astype(numpy.int32) \ + self.c2_size[ifo] // 2 self.two_det_weights[ifo][id0, id1, id2] = self.weights[ifo] relfac = histfile.attrs['sensitivity_ratios'] for ifo, sense in zip(self.hist_ifos, relfac): self.relsense[ifo] = sense self.has_hist = True def logsignalrate(self, stats, shift, to_shift): """ Calculate the normalized log rate density of signals via lookup Parameters ---------- stats: dict of dicts Single-detector quantities for each detector shift: numpy array of float Time shift vector for each coinc to be ranked to_shift: list of ints Multiple of the time shift to apply, ordered as self.ifos Returns ------- value: log of coinc signal rate density for the given single-ifo triggers and time shifts """ # Convert time shift vector to dict, as hist ifos and self.ifos may # not be in same order to_shift = {ifo: s for ifo, s in zip(self.ifos, to_shift)} if not self.has_hist: self.get_hist() # Figure out which ifo of the contributing ifos has the smallest SNR, # to use as reference for choosing the signal histogram. snrs = numpy.array([numpy.array(stats[ifo]['snr'], ndmin=1) for ifo in self.ifos]) smin = numpy.argmin(snrs, axis=0) # Store a list of the triggers using each ifo as reference rtypes = {ifo: numpy.where(smin == j)[0] for j, ifo in enumerate(self.ifos)} # Get reference ifo information rate = numpy.zeros(len(shift), dtype=numpy.float32) for ref_ifo in self.ifos: rtype = rtypes[ref_ifo] ref = stats[ref_ifo] pref = numpy.array(ref['coa_phase'], ndmin=1)[rtype] tref = numpy.array(ref['end_time'], ndmin=1)[rtype] sref = numpy.array(ref['snr'], ndmin=1)[rtype] sigref = numpy.array(ref['sigmasq'], ndmin=1) ** 0.5 sigref = sigref[rtype] senseref = self.relsense[self.hist_ifos[0]] binned = [] other_ifos = [ifo for ifo in self.ifos if ifo != ref_ifo] for ifo in other_ifos: sc = stats[ifo] p = numpy.array(sc['coa_phase'], ndmin=1)[rtype] t = numpy.array(sc['end_time'], ndmin=1)[rtype] s = numpy.array(sc['snr'], ndmin=1)[rtype] sense = self.relsense[ifo] sig = numpy.array(sc['sigmasq'], ndmin=1) ** 0.5 sig = sig[rtype] # Calculate differences pdif = (pref - p) % (numpy.pi * 2.0) tdif = shift[rtype] * to_shift[ref_ifo] + \ tref - shift[rtype] * to_shift[ifo] - t sdif = s / sref * sense / senseref * sigref / sig # Put into bins tbin = (tdif / self.twidth).astype(int) pbin = (pdif / self.pwidth).astype(int) sbin = (sdif / self.swidth).astype(int) binned += [tbin, pbin, sbin] # Convert binned to same dtype as stored in hist nbinned = numpy.zeros(len(pbin), dtype=self.pdtype) for i, b in enumerate(binned): nbinned['c%s' % i] = b # Read signal weight from precalculated histogram if self.two_det_flag: # High-RAM, low-CPU option for two-det rate[rtype] = numpy.zeros(len(nbinned)) + self.max_penalty id0 = nbinned['c0'].astype(numpy.int32) \ + self.c0_size[ref_ifo] // 2 id1 = nbinned['c1'].astype(numpy.int32) \ + self.c1_size[ref_ifo] // 2 id2 = nbinned['c2'].astype(numpy.int32) \ + self.c2_size[ref_ifo] // 2 # look up keys which are within boundaries within = (id0 > 0) & (id0 < self.c0_size[ref_ifo]) within = within & (id1 > 0) & (id1 < self.c1_size[ref_ifo]) within = within & (id2 > 0) & (id2 < self.c2_size[ref_ifo]) within = numpy.where(within)[0] rate[rtype[within]] = \ self.two_det_weights[ref_ifo][id0[within], id1[within], id2[within]] else: # Low[er]-RAM, high[er]-CPU option for >two det loc = numpy.searchsorted(self.param_bin[ref_ifo], nbinned) loc[loc == len(self.weights[ref_ifo])] = 0 rate[rtype] = self.weights[ref_ifo][loc] # These weren't in our histogram so give them max penalty # instead of random value missed = numpy.where( self.param_bin[ref_ifo][loc] != nbinned )[0] rate[rtype[missed]] = self.max_penalty # Scale by signal population SNR rate[rtype] *= (sref / self.ref_snr) ** -4.0 return numpy.log(rate) def single(self, trigs): """ Calculate the necessary single detector information Here the ranking as well as phase, endtime and sigma-squared values. Parameters ---------- trigs: dict of numpy.ndarrays, h5py group or similar dict-like object Object holding single detector trigger information. 'snr', 'chisq', 'chisq_dof', 'coa_phase', 'end_time', and 'sigmasq' are required keys. Returns ------- numpy.ndarray Array of single detector parameter values """ sngl_stat = self.get_sngl_ranking(trigs) singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype) singles['snglstat'] = sngl_stat singles['coa_phase'] = trigs['coa_phase'][:] singles['end_time'] = trigs['end_time'][:] singles['sigmasq'] = trigs['sigmasq'][:] singles['snr'] = trigs['snr'][:] return numpy.array(singles, ndmin=1) def rank_stat_single(self, single_info): """ Calculate the statistic for a single detector candidate Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ return self.single(single_info[1]) def rank_stat_coinc(self, sngls_list, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic, defined in Eq 2 of [Nitz et al, 2017](https://doi.org/10.3847/1538-4357/aa8f50). """ rstat = sum(s[1]['snglstat'] ** 2 for s in sngls_list) cstat = rstat + 2. * self.logsignalrate(dict(sngls_list), slide * step, to_shift) cstat[cstat < 0] = 0 return cstat ** 0.5 def coinc_lim_for_thresh(self, sngls_list, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest. Calculate the required single detector statistic to exceed the threshold for each of the input triggers. """ # Safety against subclassing and not rethinking this allowed_names = ['PhaseTDStatistic'] self._check_coinc_lim_subclass(allowed_names) if not self.has_hist: self.get_hist() lim_stat = [b['snglstat'] for a, b in sngls_list if a == limifo][0] s1 = thresh ** 2. - lim_stat ** 2. # Assume best case scenario and use maximum signal rate s1 -= 2. * self.hist_max s1[s1 < 0] = 0 return s1 ** 0.5 class ExpFitStatistic(QuadratureSumStatistic): """ Detection statistic using an exponential falloff noise model. Statistic approximates the negative log noise coinc rate density per template over single-ifo newsnr values. """ def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, not used here The list of detector names """ if not files: raise RuntimeError("Statistic files not specified") QuadratureSumStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) # the stat file attributes are hard-coded as '%{ifo}-fit_coeffs' parsed_attrs = [f.split('-') for f in self.files.keys()] self.bg_ifos = [at[0] for at in parsed_attrs if (len(at) == 2 and at[1] == 'fit_coeffs')] if not len(self.bg_ifos): raise RuntimeError("None of the statistic files has the required " "attribute called {ifo}-fit_coeffs !") self.fits_by_tid = {} self.alphamax = {} for i in self.bg_ifos: self.fits_by_tid[i] = self.assign_fits(i) self.get_ref_vals(i) self.single_increasing = False def assign_fits(self, ifo): """ Extract fits from fit files Parameters ----------- ifo: str The detector to get fits for. Returns ------- rate_dict: dict A dictionary containing the fit information in the `alpha`, `rate` and `thresh` keys/. """ coeff_file = self.files[ifo+'-fit_coeffs'] template_id = coeff_file['template_id'][:] # the template_ids and fit coeffs are stored in an arbitrary order # create new arrays in template_id order for easier recall tid_sort = numpy.argsort(template_id) fits_by_tid_dict = {} fits_by_tid_dict['smoothed_fit_coeff'] = \ coeff_file['fit_coeff'][:][tid_sort] fits_by_tid_dict['smoothed_rate_above_thresh'] = \ coeff_file['count_above_thresh'][:][tid_sort].astype(float) fits_by_tid_dict['smoothed_rate_in_template'] = \ coeff_file['count_in_template'][:][tid_sort].astype(float) # The by-template fits may have been stored in the smoothed fits file if 'fit_by_template' in coeff_file: coeff_fbt = coeff_file['fit_by_template'] fits_by_tid_dict['fit_by_fit_coeff'] = \ coeff_fbt['fit_coeff'][:][tid_sort] fits_by_tid_dict['fit_by_rate_above_thresh'] = \ coeff_fbt['count_above_thresh'][:][tid_sort].astype(float) fits_by_tid_dict['fit_by_rate_in_template'] = \ coeff_file['count_in_template'][:][tid_sort].astype(float) # Keep the fit threshold in fits_by_tid fits_by_tid_dict['thresh'] = coeff_file.attrs['stat_threshold'] return fits_by_tid_dict def get_ref_vals(self, ifo): """ Get the largest `alpha` value over all templates for given ifo. This is stored in `self.alphamax[ifo]` in the class instance. Parameters ----------- ifo: str The detector to get fits for. """ self.alphamax[ifo] = self.fits_by_tid[ifo]['smoothed_fit_coeff'].max() def find_fits(self, trigs): """ Get fit coeffs for a specific ifo and template id(s) Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. The coincidence executable will always call this using a bunch of trigs from a single template, there template_num is stored as an attribute and we just return the single value for all templates. If multiple templates are in play we must return arrays. Returns -------- alphai: float or numpy array The alpha fit value(s) ratei: float or numpy array The rate fit value(s) thresh: float or numpy array The thresh fit value(s) """ try: tnum = trigs.template_num # exists if accessed via coinc_findtrigs ifo = trigs.ifo except AttributeError: tnum = trigs['template_id'] # exists for SingleDetTriggers assert len(self.ifos) == 1 # Should be exactly one ifo provided ifo = self.ifos[0] # fits_by_tid is a dictionary of dictionaries of arrays # indexed by ifo / coefficient name / template_id alphai = self.fits_by_tid[ifo]['smoothed_fit_coeff'][tnum] ratei = self.fits_by_tid[ifo]['smoothed_rate_above_thresh'][tnum] thresh = self.fits_by_tid[ifo]['thresh'] return alphai, ratei, thresh def lognoiserate(self, trigs): """ Calculate the log noise rate density over single-ifo ranking Read in single trigger information, compute the ranking and rescale by the fitted coefficients alpha and rate Parameters ----------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns --------- lognoisel: numpy.array Array of log noise rate density for each input trigger. """ alphai, ratei, thresh = self.find_fits(trigs) sngl_stat = self.get_sngl_ranking(trigs) # alphai is constant of proportionality between single-ifo newsnr and # negative log noise likelihood in given template # ratei is rate of trigs in given template compared to average # thresh is stat threshold used in given ifo lognoisel = - alphai * (sngl_stat - thresh) + numpy.log(alphai) + \ numpy.log(ratei) return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32) def single(self, trigs): """ Calculate the necessary single detector information In this case the ranking rescaled (see the lognoiserate method here). Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ return self.lognoiserate(trigs) def rank_stat_single(self, single_info): """ Calculate the statistic for a single detector candidate Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ err_msg = "Sorry! No-one has implemented this method yet! " raise NotImplementedError(err_msg) def rank_stat_coinc(self, s, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. """ err_msg = "Sorry! No-one has implemented this method yet! " raise NotImplementedError(err_msg) def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. """ err_msg = "Sorry! No-one has implemented this method yet! " raise NotImplementedError(err_msg) # Keeping this here to help write the new coinc method. def coinc_OLD(self, s0, s1, slide, step): # pylint:disable=unused-argument """Calculate the final coinc ranking statistic""" # Approximate log likelihood ratio by summing single-ifo negative # log noise likelihoods loglr = - s0 - s1 # add squares of threshold stat values via idealized Gaussian formula threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos] loglr += sum([t**2. / 2. for t in threshes]) # convert back to a coinc-SNR-like statistic # via log likelihood ratio \propto rho_c^2 / 2 return (2. * loglr) ** 0.5 # Keeping this here to help write the new coinc_lim method def coinc_lim_for_thresh_OLD(self, s0, thresh): """Calculate the required single detector statistic to exceed the threshold for each of the input triggers. Parameters ---------- s0: numpy.ndarray Single detector ranking statistic for the first detector. thresh: float The threshold on the coincident statistic. Returns ------- numpy.ndarray Array of limits on the second detector single statistic to exceed thresh. """ s1 = - (thresh ** 2.) / 2. - s0 threshes = [self.fits_by_tid[i]['thresh'] for i in self.bg_ifos] s1 += sum([t**2. / 2. for t in threshes]) return s1 class ExpFitCombinedSNR(ExpFitStatistic): """ Reworking of ExpFitStatistic designed to resemble network SNR Use a monotonic function of the negative log noise rate density which approximates combined (new)snr for coincs with similar newsnr in each ifo """ def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, not used here The list of detector names """ ExpFitStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) # for low-mass templates the exponential slope alpha \approx 6 self.alpharef = 6. self.single_increasing = True def use_alphamax(self): """ Compute the reference alpha from the fit files. Use the harmonic mean of the maximum individual ifo slopes as the reference value of alpha. """ inv_alphas = [1. / self.alphamax[i] for i in self.bg_ifos] self.alpharef = 1. / (sum(inv_alphas) / len(inv_alphas)) def single(self, trigs): """ Calculate the necessary single detector information Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ logr_n = self.lognoiserate(trigs) _, _, thresh = self.find_fits(trigs) # shift by log of reference slope alpha logr_n += -1. * numpy.log(self.alpharef) # add threshold and rescale by reference slope stat = thresh - (logr_n / self.alpharef) return numpy.array(stat, ndmin=1, dtype=numpy.float32) def rank_stat_single(self, single_info): """ Calculate the statistic for single detector candidates Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ if self.single_increasing: sngl_multiifo = single_info[1]['snglstat'] else: sngl_multiifo = -1.0 * single_info[1]['snglstat'] return sngl_multiifo def rank_stat_coinc(self, s, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. Parameters ---------- sngls_list: list List of (ifo, single detector statistic) tuples slide: (unused in this statistic) step: (unused in this statistic) to_shift: list List of integers indicating what multiples of the time shift will be applied (unused in this statistic) Returns ------- numpy.ndarray Array of coincident ranking statistic values """ # scale by 1/sqrt(number of ifos) to resemble network SNR return sum(sngl[1] for sngl in s) / len(s)**0.5 def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. Parameters ---------- s: list List of (ifo, single detector statistic) tuples for all detectors except limifo. thresh: float The threshold on the coincident statistic. limifo: string The ifo for which the limit is to be found. Returns ------- numpy.ndarray Array of limits on the limifo single statistic to exceed thresh. """ # Safety against subclassing and not rethinking this allowed_names = ['ExpFitCombinedSNR'] self._check_coinc_lim_subclass(allowed_names) return thresh * ((len(s) + 1) ** 0.5) - sum(sngl[1] for sngl in s) class PhaseTDExpFitStatistic(PhaseTDStatistic, ExpFitCombinedSNR): """ Statistic combining exponential noise model with signal histogram PDF """ # default is 2-ifo operation with exactly 1 'phasetd' file def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, needed here The list of detector names """ # read in both foreground PDF and background fit info ExpFitCombinedSNR.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) # need the self.single_dtype value from PhaseTDStatistic PhaseTDStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) def single(self, trigs): """ Calculate the necessary single detector information In this case the ranking rescaled (see the lognoiserate method here) with the phase, end time, sigma and SNR values added in. Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ # same single-ifo stat as ExpFitCombinedSNR sngl_stat = ExpFitCombinedSNR.single(self, trigs) singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype) singles['snglstat'] = sngl_stat singles['coa_phase'] = trigs['coa_phase'][:] singles['end_time'] = trigs['end_time'][:] singles['sigmasq'] = trigs['sigmasq'][:] singles['snr'] = trigs['snr'][:] return numpy.array(singles, ndmin=1) def rank_stat_single(self, single_info): """ Calculate the statistic for a single detector candidate Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ err_msg = "Sorry! No-one has implemented this method yet! " raise NotImplementedError(err_msg) def rank_stat_coinc(self, s, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. """ err_msg = "Sorry! No-one has implemented this method yet! " raise NotImplementedError(err_msg) def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. """ err_msg = "Sorry! No-one has implemented this method yet! " raise NotImplementedError(err_msg) # Keeping the old statistic code here for now to help with reimplementing def coinc_OLD(self, s0, s1, slide, step): # logsignalrate function inherited from PhaseTDStatistic logr_s = self.logsignalrate(s0, s1, slide * step) # rescale by ExpFitCombinedSNR reference slope as for sngl stat cstat = s0['snglstat'] + s1['snglstat'] + logr_s / self.alpharef # cut off underflowing and very small values cstat[cstat < 8.] = 8. # scale to resemble network SNR return cstat / (2.**0.5) def coinc_lim_for_thresh_OLD(self, s0, thresh): # if the threshold is below this value all triggers will # pass because of rounding in the coinc method if thresh <= (8. / (2.**0.5)): return -1. * numpy.ones(len(s0['snglstat'])) * numpy.inf if not self.has_hist: self.get_hist() # Assume best case scenario and use maximum signal rate logr_s = self.hist_max s1 = (2 ** 0.5) * thresh - s0['snglstat'] - logr_s / self.alpharef return s1 class ExpFitBgRateStatistic(ExpFitStatistic): """ Detection statistic using an exponential falloff noise model. Statistic calculates the log noise coinc rate for each template over single-ifo newsnr values. """ def __init__(self, sngl_ranking, files=None, ifos=None, benchmark_lograte=-14.6, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, not used here The list of detector names benchmark_lograte: float, default=-14.6 benchmark_lograte is log of a representative noise trigger rate. The default comes from H1L1 (O2) and is 4.5e-7 Hz. """ super(ExpFitBgRateStatistic, self).__init__(sngl_ranking, files=files, ifos=ifos, **kwargs) self.benchmark_lograte = benchmark_lograte # Reassign the rate to be number per time rather than an arbitrarily # normalised number for ifo in self.bg_ifos: self.reassign_rate(ifo) def reassign_rate(self, ifo): """ Reassign the rate to be number per time rather Reassign the rate to be number per time rather than an arbitrarily normalised number. Parameters ----------- ifo: str The ifo to consider. """ coeff_file = self.files[ifo+'-fit_coeffs'] analysis_time = float(coeff_file.attrs['analysis_time']) self.fits_by_tid[ifo]['smoothed_rate_above_thresh'] /= analysis_time self.fits_by_tid[ifo]['smoothed_rate_in_template'] /= analysis_time # The by-template fits may have been stored in the smoothed fits file if 'fit_by_template' in coeff_file: self.fits_by_tid[ifo]['fit_by_rate_above_thresh'] /= analysis_time self.fits_by_tid[ifo]['fit_by_rate_in_template'] /= analysis_time def rank_stat_coinc(self, s, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. Parameters ---------- sngls_list: list List of (ifo, single detector statistic) tuples slide: (unused in this statistic) step: (unused in this statistic) to_shift: list List of integers indicating what multiples of the time shift will be applied (unused in this statistic) Returns ------- numpy.ndarray Array of coincident ranking statistic values """ # ranking statistic is -ln(expected rate density of noise triggers) # plus normalization constant sngl_dict = {sngl[0]: sngl[1] for sngl in s} ln_noise_rate = coinc_rate.combination_noise_lograte( sngl_dict, kwargs['time_addition']) loglr = - ln_noise_rate + self.benchmark_lograte return loglr def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. Parameters ---------- s: list List of (ifo, single detector statistic) tuples for all detectors except limifo. thresh: float The threshold on the coincident statistic. limifo: string The ifo for which the limit is to be found. Returns ------- numpy.ndarray Array of limits on the limifo single statistic to exceed thresh. """ # Safety against subclassing and not rethinking this allowed_names = ['ExpFitBgRateStatistic'] self._check_coinc_lim_subclass(allowed_names) sngl_dict = {sngl[0]: sngl[1] for sngl in s} sngl_dict[limifo] = numpy.zeros(len(s[0][1])) ln_noise_rate = coinc_rate.combination_noise_lograte( sngl_dict, kwargs['time_addition']) loglr = - thresh - ln_noise_rate + self.benchmark_lograte return loglr class ExpFitFgBgNormStatistic(PhaseTDStatistic, ExpFitBgRateStatistic): """ Statistic combining PhaseTD, ExpFitBg and additional foreground info. """ def __init__(self, sngl_ranking, files=None, ifos=None, reference_ifos='H1,L1', **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs The list of detector names reference_ifos: string of comma separated ifo prefixes Detectors to be used as the reference network for network sensitivity comparisons. Each must be in fits_by_tid """ # read in background fit info and store it ExpFitBgRateStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) # if ifos not already set, determine via background fit info self.ifos = self.ifos or self.bg_ifos # PhaseTD statistic single_dtype plus network sensitivity benchmark PhaseTDStatistic.__init__(self, sngl_ranking, files=files, ifos=self.ifos, **kwargs) self.single_dtype.append(('benchmark_logvol', numpy.float32)) for ifo in self.bg_ifos: self.assign_median_sigma(ifo) ref_ifos = reference_ifos.split(',') # benchmark_logvol is a benchmark sensitivity array over template id hl_net_med_sigma = numpy.amin([self.fits_by_tid[ifo]['median_sigma'] for ifo in ref_ifos], axis=0) self.benchmark_logvol = 3.0 * numpy.log(hl_net_med_sigma) self.single_increasing = False def assign_median_sigma(self, ifo): """ Read and sort the median_sigma values from input files. Parameters ---------- ifo: str The ifo to consider. """ coeff_file = self.files[ifo + '-fit_coeffs'] template_id = coeff_file['template_id'][:] tid_sort = numpy.argsort(template_id) self.fits_by_tid[ifo]['median_sigma'] = \ coeff_file['median_sigma'][:][tid_sort] def lognoiserate(self, trigs, alphabelow=6): """ Calculate the log noise rate density over single-ifo ranking Read in single trigger information, make the newsnr statistic and rescale by the fitted coefficients alpha and rate Parameters ----------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. alphabelow: float, default=6 Use this slope to fit the noise triggers below the point at which fits are present in the input files. Returns --------- lognoisel: numpy.array Array of log noise rate density for each input trigger. """ alphai, ratei, thresh = self.find_fits(trigs) newsnr = self.get_sngl_ranking(trigs) # Above the threshold we use the usual fit coefficient (alpha) # below threshold use specified alphabelow bt = newsnr < thresh lognoisel = - alphai * (newsnr - thresh) + numpy.log(alphai) + \ numpy.log(ratei) lognoiselbt = - alphabelow * (newsnr - thresh) + \ numpy.log(alphabelow) + numpy.log(ratei) lognoisel[bt] = lognoiselbt[bt] return numpy.array(lognoisel, ndmin=1, dtype=numpy.float32) def single(self, trigs): """ Calculate the necessary single detector information In this case the ranking rescaled (see the lognoiserate method here) with the phase, end time, sigma, SNR, template_id and the benchmark_logvol values added in. Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ # single-ifo stat = log of noise rate sngl_stat = self.lognoiserate(trigs) # populate other fields to calculate phase/time/amp consistency # and sigma comparison singles = numpy.zeros(len(sngl_stat), dtype=self.single_dtype) singles['snglstat'] = sngl_stat singles['coa_phase'] = trigs['coa_phase'][:] singles['end_time'] = trigs['end_time'][:] singles['sigmasq'] = trigs['sigmasq'][:] singles['snr'] = trigs['snr'][:] try: tnum = trigs.template_num # exists if accessed via coinc_findtrigs except AttributeError: tnum = trigs['template_id'] # exists for SingleDetTriggers # Should only be one ifo fit file provided assert len(self.ifos) == 1 # Store benchmark log volume as single-ifo information since the coinc # method does not have access to template id singles['benchmark_logvol'] = self.benchmark_logvol[tnum] return numpy.array(singles, ndmin=1) def rank_stat_single(self, single_info): """ Calculate the statistic for single detector candidates Parameters ---------- single_info: tuple Tuple containing two values. The first is the ifo (str) and the second is the single detector triggers. Returns ------- numpy.ndarray The array of single detector statistics """ sngls = single_info[1] ln_noise_rate = sngls['snglstat'] ln_noise_rate -= self.benchmark_lograte network_sigmasq = sngls['sigmasq'] network_logvol = 1.5 * numpy.log(network_sigmasq) benchmark_logvol = sngls['benchmark_logvol'] network_logvol -= benchmark_logvol ln_s = -4 * numpy.log(sngls['snr'] / self.ref_snr) loglr = network_logvol - ln_noise_rate + ln_s # cut off underflowing and very small values loglr[loglr < -30.] = -30. return loglr def rank_stat_coinc(self, s, slide, step, to_shift, **kwargs): # pylint:disable=unused-argument """ Calculate the coincident detection statistic. Parameters ---------- sngls_list: list List of (ifo, single detector statistic) tuples slide: (unused in this statistic) step: (unused in this statistic) to_shift: list List of integers indicating what multiples of the time shift will be applied (unused in this statistic) Returns ------- numpy.ndarray Array of coincident ranking statistic values """ sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s} ln_noise_rate = coinc_rate.combination_noise_lograte( sngl_rates, kwargs['time_addition']) ln_noise_rate -= self.benchmark_lograte # Network sensitivity for a given coinc type is approximately # determined by the least sensitive ifo network_sigmasq = numpy.amin([sngl[1]['sigmasq'] for sngl in s], axis=0) # Volume \propto sigma^3 or sigmasq^1.5 network_logvol = 1.5 * numpy.log(network_sigmasq) # Get benchmark log volume as single-ifo information : # benchmark_logvol for a given template is not ifo-dependent, so # choose the first ifo for convenience benchmark_logvol = s[0][1]['benchmark_logvol'] network_logvol -= benchmark_logvol # Use prior histogram to get Bayes factor for signal vs noise # given the time, phase and SNR differences between IFOs # First get signal PDF logr_s stat = {ifo: st for ifo, st in s} logr_s = self.logsignalrate(stat, slide * step, to_shift) # Find total volume of phase-time-amplitude space occupied by noise # coincs # Extent of time-difference space occupied noise_twindow = coinc_rate.multiifo_noise_coincident_area( self.hist_ifos, kwargs['time_addition']) # Volume is the allowed time difference window, multiplied by 2pi for # each phase difference dimension and by allowed range of SNR ratio # for each SNR ratio dimension : there are (n_ifos - 1) dimensions # for both phase and SNR n_ifos = len(self.hist_ifos) hist_vol = noise_twindow * \ (2 * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \ (n_ifos - 1) # Noise PDF is 1/volume, assuming a uniform distribution of noise # coincs logr_n = - numpy.log(hist_vol) # Combine to get final statistic: log of # ((rate of signals / rate of noise) * PTA Bayes factor) loglr = network_logvol - ln_noise_rate + logr_s - logr_n # cut off underflowing and very small values loglr[loglr < -30.] = -30. return loglr def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. Parameters ---------- s: list List of (ifo, single detector statistic) tuples for all detectors except limifo. thresh: float The threshold on the coincident statistic. limifo: string The ifo for which the limit is to be found. Returns ------- numpy.ndarray Array of limits on the limifo single statistic to exceed thresh. """ # Safety against subclassing and not rethinking this allowed_names = ['ExpFitFgBgNormStatistic', 'ExpFitFgBgNormBBHStatistic', 'DQExpFitFgBgNormStatistic'] self._check_coinc_lim_subclass(allowed_names) if not self.has_hist: self.get_hist() # if the threshold is below this value all triggers will # pass because of rounding in the coinc method if thresh <= -30: return numpy.ones(len(s[0][1]['snglstat'])) * numpy.inf sngl_rates = {sngl[0]: sngl[1]['snglstat'] for sngl in s} # Add limifo to singles dict so that overlap time is calculated correctly sngl_rates[limifo] = numpy.zeros(len(s[0][1])) ln_noise_rate = coinc_rate.combination_noise_lograte( sngl_rates, kwargs['time_addition']) ln_noise_rate -= self.benchmark_lograte # Assume best case and use the maximum sigma squared from all triggers network_sigmasq = numpy.ones(len(s[0][1])) * kwargs['max_sigmasq'] # Volume \propto sigma^3 or sigmasq^1.5 network_logvol = 1.5 * numpy.log(network_sigmasq) # Get benchmark log volume as single-ifo information : # benchmark_logvol for a given template is not ifo-dependent, so # choose the first ifo for convenience benchmark_logvol = s[0][1]['benchmark_logvol'] network_logvol -= benchmark_logvol # Assume best case scenario and use maximum signal rate logr_s = numpy.log(self.hist_max * (kwargs['min_snr'] / self.ref_snr) ** -4.0) # Find total volume of phase-time-amplitude space occupied by noise # coincs # Extent of time-difference space occupied noise_twindow = coinc_rate.multiifo_noise_coincident_area( self.hist_ifos, kwargs['time_addition']) # Volume is the allowed time difference window, multiplied by 2pi for # each phase difference dimension and by allowed range of SNR ratio # for each SNR ratio dimension : there are (n_ifos - 1) dimensions # for both phase and SNR n_ifos = len(self.hist_ifos) hist_vol = noise_twindow * \ (2 * numpy.pi * (self.srbmax - self.srbmin) * self.swidth) ** \ (n_ifos - 1) # Noise PDF is 1/volume, assuming a uniform distribution of noise # coincs logr_n = - numpy.log(hist_vol) loglr = - thresh + network_logvol - ln_noise_rate + logr_s - logr_n return loglr class ExpFitFgBgNormBBHStatistic(ExpFitFgBgNormStatistic): """ The ExpFitFgBgNormStatistic with a mass weighting factor. This is the same as the ExpFitFgBgNormStatistic except the likelihood is multiplied by a signal rate prior modelled as uniform over chirp mass. As templates are distributed roughly according to mchirp^(-11/3) we weight by the inverse of this. This ensures that loud events at high mass where template density is sparse are not swamped by events at lower masses where template density is high. """ def __init__(self, sngl_ranking, files=None, ifos=None, max_chirp_mass=None, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, not used here The list of detector names max_chirp_mass: float, default=None If given, if a template's chirp mass is above this value it will be reweighted as if it had this chirp mass. This is to avoid the problem where the distribution fails to be accurate at high mass and we can have a case where a single highest-mass template might produce *all* the loudest background (and foreground) events. """ ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) self.mcm = max_chirp_mass self.curr_mchirp = None def logsignalrate(self, stats, shift, to_shift): """ Calculate the normalized log rate density of signals via lookup This calls back to the Parent class and then applies the chirp mass weighting factor. Parameters ---------- stats: list of dicts giving single-ifo quantities, ordered as self.ifos shift: numpy array of float, size of the time shift vector for each coinc to be ranked to_shift: list of int, multiple of the time shift to apply ordered as self.ifos Returns ------- value: log of coinc signal rate density for the given single-ifo triggers and time shifts """ # model signal rate as uniform over chirp mass, background rate is # proportional to mchirp^(-11/3) due to density of templates logr_s = ExpFitFgBgNormStatistic.logsignalrate( self, stats, shift, to_shift ) logr_s += numpy.log((self.curr_mchirp / 20.0) ** (11./3.0)) return logr_s def single(self, trigs): """ Calculate the necessary single detector information In this case the ranking rescaled (see the lognoiserate method here) with the phase, end time, sigma, SNR, template_id and the benchmark_logvol values added in. This also stored the current chirp mass for use when computing the coinc statistic values. Parameters ---------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns ------- numpy.ndarray The array of single detector values """ from pycbc.conversions import mchirp_from_mass1_mass2 self.curr_mchirp = mchirp_from_mass1_mass2(trigs.param['mass1'], trigs.param['mass2']) if self.mcm is not None: # Careful - input might be a str, so cast to float self.curr_mchirp = min(self.curr_mchirp, float(self.mcm)) return ExpFitFgBgNormStatistic.single(self, trigs) def coinc_lim_for_thresh(self, s, thresh, limifo, **kwargs): # pylint:disable=unused-argument """ Optimization function to identify coincs too quiet to be of interest Calculate the required single detector statistic to exceed the threshold for each of the input triggers. Parameters ---------- s: list List of (ifo, single detector statistic) tuples for all detectors except limifo. thresh: float The threshold on the coincident statistic. limifo: string The ifo for which the limit is to be found. Returns ------- numpy.ndarray Array of limits on the limifo single statistic to exceed thresh. """ loglr = ExpFitFgBgNormStatistic.coinc_lim_for_thresh( self, s, thresh, limifo, **kwargs) loglr += numpy.log((self.curr_mchirp / 20.0) ** (11./3.0)) return loglr class DQExpFitFgBgNormStatistic(ExpFitFgBgNormStatistic): """ The ExpFitFgBgNormStatistic with DQ-based reranking. This is the same as the ExpFitFgBgNormStatistic except the likelihood is multiplied by the relative signal rate based on the relevant DQ likelihood value. """ def __init__(self, sngl_ranking, files=None, ifos=None, **kwargs): """ Create a statistic class instance Parameters ---------- sngl_ranking: str The name of the ranking to use for the single-detector triggers. files: list of strs, needed here A list containing the filenames of hdf format files used to help construct the coincident statistics. The files must have a 'stat' attribute which is used to associate them with the appropriate statistic class. ifos: list of strs, not used here The list of detector names """ ExpFitFgBgNormStatistic.__init__(self, sngl_ranking, files=files, ifos=ifos, **kwargs) self.dq_val_by_time = {} self.dq_bin_by_id = {} for k in self.files.keys(): parsed_attrs = k.split('-') if len(parsed_attrs) < 3: continue if parsed_attrs[2] == 'dq_ts_reference': ifo = parsed_attrs[0] dq_type = parsed_attrs[1] dq_vals = self.assign_dq_val(k) dq_bins = self.assign_bin_id(k) if ifo not in self.dq_val_by_time: self.dq_val_by_time[ifo] = {} self.dq_bin_by_id[ifo] = {} self.dq_val_by_time[ifo][dq_type] = dq_vals self.dq_bin_by_id[ifo][dq_type] = dq_bins def assign_bin_id(self, key): """ Assign bin ID values Assign each template id to a bin name based on a referenced statistic file. Parameters ---------- key: str statistic file key string Returns --------- bin_dict: dict of strs Dictionary containing the bin name for each template id """ ifo = key.split('-')[0] dq_file = self.files[key] bin_names = dq_file.attrs['names'][:] locs = [] names = [] for bin_name in bin_names: bin_locs = dq_file[ifo + '/locs/' + bin_name][:] locs = list(locs)+list(bin_locs.astype(int)) names = list(names)+list([bin_name]*len(bin_locs)) bin_dict = dict(zip(locs, names)) return bin_dict def assign_dq_val(self, key): """ Assign dq values to each time for every bin based on a referenced statistic file. Parameters ---------- key: str statistic file key string Returns --------- dq_dict: dict of {time: dq_value} dicts for each bin Dictionary containing the mapping between the time and the dq value for each individual bin. """ ifo = key.split('-')[0] dq_file = self.files[key] times = dq_file[ifo+'/times'][:] bin_names = dq_file.attrs['names'][:] dq_dict = {} for bin_name in bin_names: dq_vals = dq_file[ifo+'/dq_vals/'+bin_name][:] dq_dict[bin_name] = dict(zip(times, dq_vals)) return dq_dict def find_dq_val(self, trigs): """Get dq values for a specific ifo and times""" time = trigs['end_time'].astype(int) try: tnum = trigs.template_num ifo = trigs.ifo except AttributeError: tnum = trigs['template_id'] assert len(self.ifos) == 1 # Should be exactly one ifo provided ifo = self.ifos[0] dq_val = numpy.zeros(len(time)) if ifo in self.dq_val_by_time: for (i, t) in enumerate(time): for k in self.dq_val_by_time[ifo].keys(): if isinstance(tnum, numpy.ndarray): bin_name = self.dq_bin_by_id[ifo][k][tnum[i]] else: bin_name = self.dq_bin_by_id[ifo][k][tnum] val = self.dq_val_by_time[ifo][k][bin_name][int(t)] dq_val[i] = max(dq_val[i], val) return dq_val def lognoiserate(self, trigs): """ Calculate the log noise rate density over single-ifo ranking Read in single trigger information, compute the ranking and rescale by the fitted coefficients alpha and rate Parameters ----------- trigs: dict of numpy.ndarrays, h5py group (or similar dict-like object) Dictionary-like object holding single detector trigger information. Returns --------- lognoisel: numpy.array Array of log noise rate density for each input trigger. """ logr_n = ExpFitFgBgNormStatistic.lognoiserate( self, trigs) logr_n += self.find_dq_val(trigs) return logr_n statistic_dict = { 'quadsum': QuadratureSumStatistic, 'single_ranking_only': QuadratureSumStatistic, 'phasetd': PhaseTDStatistic, 'exp_fit_stat': ExpFitStatistic, 'exp_fit_csnr': ExpFitCombinedSNR, 'phasetd_exp_fit_stat': PhaseTDExpFitStatistic, 'dq_phasetd_exp_fit_fgbg_norm': DQExpFitFgBgNormStatistic, 'exp_fit_bg_rate': ExpFitBgRateStatistic, 'phasetd_exp_fit_fgbg_norm': ExpFitFgBgNormStatistic, 'phasetd_exp_fit_fgbg_bbh_norm': ExpFitFgBgNormBBHStatistic, } def get_statistic(stat): """ Error-handling sugar around dict lookup for coincident statistics Parameters ---------- stat : string Name of the coincident statistic Returns ------- class Subclass of Stat base class Raises ------ RuntimeError If the string is not recognized as corresponding to a Stat subclass """ try: return statistic_dict[stat] except KeyError: raise RuntimeError('%s is not an available detection statistic' % stat) def insert_statistic_option_group(parser, default_ranking_statistic=None): """ Add ranking statistic options to the optparser object. Adds the options used to initialize a PyCBC Stat class. Parameters ----------- parser : object OptionParser instance. default_ranking_statisic : str Allows setting a default statistic for the '--ranking-statistic' option. The option is no longer required if a default is provided. Returns -------- strain_opt_group : optparser.argument_group The argument group that is added to the parser. """ statistic_opt_group = parser.add_argument_group( "Options needed to initialize a PyCBC Stat class for computing the " "ranking of events from a PyCBC search." ) statistic_opt_group.add_argument( "--ranking-statistic", default=default_ranking_statistic, choices=statistic_dict.keys(), required=True if default_ranking_statistic is None else False, help="The coinc ranking statistic to calculate" ) statistic_opt_group.add_argument( "--sngl-ranking", choices=ranking.sngls_ranking_function_dict.keys(), required=True, help="The single-detector trigger ranking to use." ) statistic_opt_group.add_argument( "--statistic-files", nargs='*', action='append', default=[], help="Files containing ranking statistic info" ) statistic_opt_group.add_argument( "--statistic-keywords", nargs='*', default=[], help="Provide additional key-word arguments to be sent to " "the statistic class when it is initialized. Should " "be given in format --statistic-keywords " "KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ..." ) return statistic_opt_group def parse_statistic_keywords_opt(stat_kwarg_list): """ Parse the list of statistic keywords into an appropriate dictionary. Take input from the input argument ["KWARG1:VALUE1", "KWARG2:VALUE2", "KWARG3:VALUE3"] and convert into a dictionary. Parameters ---------- stat_kwarg_list : list Statistic keywords in list format Returns ------- stat_kwarg_dict : dict Statistic keywords in dict format """ stat_kwarg_dict = {} for inputstr in stat_kwarg_list: try: key, value = inputstr.split(':') stat_kwarg_dict[key] = value except ValueError: err_txt = "--statistic-keywords must take input in the " \ "form KWARG1:VALUE1 KWARG2:VALUE2 KWARG3:VALUE3 ... " \ "Received {}".format(' '.join(stat_kwarg_list)) raise ValueError(err_txt) return stat_kwarg_dict def get_statistic_from_opts(opts, ifos): """ Return a Stat class from an optparser object. This will assume that the options in the statistic_opt_group are present and will use these options to call stat.get_statistic and initialize the appropriate Stat subclass with appropriate kwargs. Parameters ---------- opts : optparse.OptParser instance The command line options ifos : list The list of detector names Returns ------- class Subclass of Stat base class """ # Allow None inputs if opts.statistic_files is None: opts.statistic_files = [] if opts.statistic_keywords is None: opts.statistic_keywords = [] # flatten the list of lists of filenames to a single list (may be empty) opts.statistic_files = sum(opts.statistic_files, []) extra_kwargs = parse_statistic_keywords_opt(opts.statistic_keywords) stat_class = get_statistic(opts.ranking_statistic)( opts.sngl_ranking, opts.statistic_files, ifos=ifos, **extra_kwargs ) return stat_class
78,377
36.62746
81
py
pycbc
pycbc-master/test/validation_code/__init__.py
0
0
0
py
pycbc
pycbc-master/test/validation_code/old_coinc.py
# Copyright (C) 2015 Alex Nitz # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #### NOTE ##### # This code is a verbatim copy of the coinc.py code as of 20th August 2022. # It's here to verify that any changes being made to this code are not changing # the physical output ############### # # ============================================================================= # # Preamble # # ============================================================================= # """ This modules contains functions for calculating and manipulating coincident triggers. """ import numpy, logging, pycbc.pnutils, pycbc.conversions, copy, lal from pycbc.detector import Detector, ppdets def background_bin_from_string(background_bins, data): """ Return template ids for each bin as defined by the format string Parameters ---------- bins: list of strings List of strings which define how a background bin is taken from the list of templates. data: dict of numpy.ndarrays Dict with parameter key values and numpy.ndarray values which define the parameters of the template bank to bin up. Returns ------- bins: dict Dictionary of location indices indexed by a bin name """ used = numpy.array([], dtype=numpy.uint32) bins = {} for mbin in background_bins: locs = None name, bin_type_list, boundary_list = tuple(mbin.split(':')) bin_type_list = bin_type_list.split(',') boundary_list = boundary_list.split(',') for bin_type, boundary in zip(bin_type_list, boundary_list): if boundary[0:2] == 'lt': member_func = lambda vals, bd=boundary : vals < float(bd[2:]) elif boundary[0:2] == 'gt': member_func = lambda vals, bd=boundary : vals > float(bd[2:]) else: raise RuntimeError("Can't parse boundary condition! Must begin " "with 'lt' or 'gt'") if bin_type == 'component' and boundary[0:2] == 'lt': # maximum component mass is less than boundary value vals = numpy.maximum(data['mass1'], data['mass2']) elif bin_type == 'component' and boundary[0:2] == 'gt': # minimum component mass is greater than bdary vals = numpy.minimum(data['mass1'], data['mass2']) elif bin_type == 'total': vals = data['mass1'] + data['mass2'] elif bin_type == 'chirp': vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta( data['mass1'], data['mass2'])[0] elif bin_type == 'ratio': vals = pycbc.conversions.q_from_mass1_mass2( data['mass1'], data['mass2']) elif bin_type == 'eta': vals = pycbc.pnutils.mass1_mass2_to_mchirp_eta( data['mass1'], data['mass2'])[1] elif bin_type == 'chi_eff': vals = pycbc.conversions.chi_eff(data['mass1'], data['mass2'], data['spin1z'], data['spin2z']) elif bin_type == 'SEOBNRv2Peak': vals = pycbc.pnutils.get_freq('fSEOBNRv2Peak', data['mass1'], data['mass2'], data['spin1z'], data['spin2z']) elif bin_type == 'SEOBNRv4Peak': vals = pycbc.pnutils.get_freq('fSEOBNRv4Peak', data['mass1'], data['mass2'], data['spin1z'], data['spin2z']) elif bin_type == 'SEOBNRv2duration': vals = pycbc.pnutils.get_imr_duration( data['mass1'], data['mass2'], data['spin1z'], data['spin2z'], data['f_lower'], approximant='SEOBNRv2') elif bin_type == 'SEOBNRv4duration': vals = pycbc.pnutils.get_imr_duration( data['mass1'][:], data['mass2'][:], data['spin1z'][:], data['spin2z'][:], data['f_lower'][:], approximant='SEOBNRv4') else: raise ValueError('Invalid bin type %s' % bin_type) sub_locs = member_func(vals) del vals sub_locs = numpy.where(sub_locs)[0] if locs is not None: # find intersection of boundary conditions locs = numpy.intersect1d(locs, sub_locs) else: locs = sub_locs # make sure we don't reuse anything from an earlier bin locs = numpy.delete(locs, numpy.where(numpy.in1d(locs, used))[0]) used = numpy.concatenate([used, locs]) bins[name] = locs return bins def timeslide_durations(start1, start2, end1, end2, timeslide_offsets): """ Find the coincident time for each timeslide. Find the coincident time for each timeslide, where the first time vector is slid to the right by the offset in the given timeslide_offsets vector. Parameters ---------- start1: numpy.ndarray Array of the start of valid analyzed times for detector 1 start2: numpy.ndarray Array of the start of valid analyzed times for detector 2 end1: numpy.ndarray Array of the end of valid analyzed times for detector 1 end2: numpy.ndarray Array of the end of valid analyzed times for detector 2 timseslide_offset: numpy.ndarray Array of offsets (in seconds) for each timeslide Returns -------- durations: numpy.ndarray Array of coincident time for each timeslide in the offset array """ from pycbc.events import veto durations = [] seg2 = veto.start_end_to_segments(start2, end2) for offset in timeslide_offsets: seg1 = veto.start_end_to_segments(start1 + offset, end1 + offset) durations.append(abs((seg1 & seg2).coalesce())) return numpy.array(durations) def time_coincidence(t1, t2, window, slide_step=0): """ Find coincidences by time window Parameters ---------- t1 : numpy.ndarray Array of trigger times from the first detector t2 : numpy.ndarray Array of trigger times from the second detector window : float Coincidence window maximum time difference, arbitrary units (usually s) slide_step : float (default 0) If calculating background coincidences, the interval between background slides, arbitrary units (usually s) Returns ------- idx1 : numpy.ndarray Array of indices into the t1 array for coincident triggers idx2 : numpy.ndarray Array of indices into the t2 array slide : numpy.ndarray Array of slide ids """ if slide_step: fold1 = t1 % slide_step fold2 = t2 % slide_step else: fold1 = t1 fold2 = t2 sort1 = fold1.argsort() sort2 = fold2.argsort() fold1 = fold1[sort1] fold2 = fold2[sort2] if slide_step: # FIXME explain this fold2 = numpy.concatenate([fold2 - slide_step, fold2, fold2 + slide_step]) sort2 = numpy.concatenate([sort2, sort2, sort2]) left = numpy.searchsorted(fold2, fold1 - window) right = numpy.searchsorted(fold2, fold1 + window) idx1 = numpy.repeat(sort1, right - left) idx2 = [sort2[l:r] for l, r in zip(left, right)] if len(idx2) > 0: idx2 = numpy.concatenate(idx2) else: idx2 = numpy.array([], dtype=numpy.int64) if slide_step: diff = ((t1 / slide_step)[idx1] - (t2 / slide_step)[idx2]) slide = numpy.rint(diff) else: slide = numpy.zeros(len(idx1)) return idx1.astype(numpy.uint32), idx2.astype(numpy.uint32), slide.astype(numpy.int32) def time_multi_coincidence(times, slide_step=0, slop=.003, pivot='H1', fixed='L1'): """ Find multi detector coincidences. Parameters ---------- times: dict of numpy.ndarrays Dictionary keyed by ifo of single ifo trigger times slide_step: float Interval between time slides slop: float The amount of time to add to the TOF between detectors for coincidence pivot: str The ifo to which time shifts are applied in first stage coincidence fixed: str The other ifo used in first stage coincidence, subsequently used as a time reference for additional ifos. All other ifos are not time shifted relative to this ifo Returns ------- ids: dict of arrays of int Dictionary keyed by ifo with ids of trigger times forming coincidences. Coincidence is tested for every pair of ifos that can be formed from the input dict: only those tuples of times passing all tests are recorded slide: array of int Slide ids of coincident triggers in pivot ifo """ def win(ifo1, ifo2): d1 = Detector(ifo1) d2 = Detector(ifo2) return d1.light_travel_time_to_detector(d2) + slop # Find coincs between the 'pivot' and 'fixed' detectors as in 2-ifo case pivot_id, fix_id, slide = time_coincidence(times[pivot], times[fixed], win(pivot, fixed), slide_step=slide_step) # Additional detectors do not slide independently of the 'fixed' one # Each trigger in an additional detector must be concident with both # triggers in an existing coincidence # Slide 'pivot' trigger times to be coincident with trigger times in # 'fixed' detector fixed_time = times[fixed][fix_id] pivot_time = times[pivot][pivot_id] - slide_step * slide ctimes = {fixed: fixed_time, pivot: pivot_time} ids = {fixed: fix_id, pivot: pivot_id} dep_ifos = [ifo for ifo in times.keys() if ifo != fixed and ifo != pivot] for ifo1 in dep_ifos: # FIXME - make this loop into a function? # otime is extra ifo time in original trigger order otime = times[ifo1] # tsort gives ordering from original order to time sorted order tsort = otime.argsort() time1 = otime[tsort] # Find coincidences between dependent ifo triggers and existing coincs # - Cycle over fixed and pivot # - At the 1st iteration, the fixed and pivot triggers are reduced to # those for which the first out of fixed/pivot forms a coinc with ifo1 # - At the 2nd iteration, we are left with triggers for which both # fixed and pivot are coincident with ifo1 # - If there is more than 1 dependent ifo, ones that were previously # tested against fixed and pivot are now present for testing with new # dependent ifos for ifo2 in ids: logging.info('added ifo %s, testing against %s' % (ifo1, ifo2)) w = win(ifo1, ifo2) left = numpy.searchsorted(time1, ctimes[ifo2] - w) right = numpy.searchsorted(time1, ctimes[ifo2] + w) # Any times within time1 coincident with the time in ifo2 have # indices between 'left' and 'right' # 'nz' indexes into times in ifo2 which have coincidences with ifo1 # times nz = (right - left).nonzero() if len(right - left): rlmax = (right - left).max() if len(nz[0]) and rlmax > 1: # We expect at most one coincident time in ifo1, assuming # trigger spacing in ifo1 > time window. # However there are rare corner cases at starts/ends of inspiral # jobs. For these, arbitrarily keep the first trigger and # discard the second (and any subsequent ones). where = right - left == rlmax logging.warning('Triggers in %s are closer than coincidence ' 'window, 1 or more coincs will be discarded. ' 'This is a warning, not an error.' % ifo1) print([float(ti) for ti in time1[left[where][0]:right[where][0]]]) # identify indices of times in ifo1 that form coincs with ifo2 dep_ids = left[nz] # slide is array of slide ids attached to pivot ifo slide = slide[nz] for ifo in ctimes: # cycle over fixed and pivot & any previous additional ifos # reduce times and IDs to just those forming a coinc with ifo1 ctimes[ifo] = ctimes[ifo][nz] ids[ifo] = ids[ifo][nz] # undo time sorting on indices of ifo1 triggers, add ifo1 ids and times # to dicts for testing against any additional detectrs ids[ifo1] = tsort[dep_ids] ctimes[ifo1] = otime[ids[ifo1]] return ids, slide def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, argmax=numpy.argmax): """Cluster coincident events for each timeslide separately, across templates, based on the ranking statistic Parameters ---------- stat: numpy.ndarray vector of ranking values to maximize time1: numpy.ndarray first time vector time2: numpy.ndarray second time vector timeslide_id: numpy.ndarray vector that determines the timeslide offset slide: float length of the timeslides offset interval window: float length to cluster over Returns ------- cindex: numpy.ndarray The set of indices corresponding to the surviving coincidences. """ logging.info('clustering coinc triggers over %ss window' % window) if len(time1) == 0 or len(time2) == 0: logging.info('No coinc triggers in one, or both, ifos.') return numpy.array([]) if numpy.isfinite(slide): # for a time shifted coinc, time1 is greater than time2 by approximately timeslide_id*slide # adding this quantity gives a mean coinc time located around time1 time = (time1 + time2 + timeslide_id * slide) / 2 else: time = 0.5 * (time2 + time1) tslide = timeslide_id.astype(numpy.float128) time = time.astype(numpy.float128) span = (time.max() - time.min()) + window * 10 time = time + span * tslide cidx = cluster_over_time(stat, time, window, argmax) return cidx def cluster_coincs_multiifo(stat, time_coincs, timeslide_id, slide, window, argmax=numpy.argmax): """Cluster coincident events for each timeslide separately, across templates, based on the ranking statistic Parameters ---------- stat: numpy.ndarray vector of ranking values to maximize time_coincs: tuple of numpy.ndarrays trigger times for each ifo, or -1 if an ifo does not participate in a coinc timeslide_id: numpy.ndarray vector that determines the timeslide offset slide: float length of the timeslides offset interval window: float duration of clustering window in seconds Returns ------- cindex: numpy.ndarray The set of indices corresponding to the surviving coincidences """ time_coinc_zip = list(zip(*time_coincs)) if len(time_coinc_zip) == 0: logging.info('No coincident triggers.') return numpy.array([]) time_avg_num = [] #find number of ifos and mean time over participating ifos for each coinc for tc in time_coinc_zip: time_avg_num.append(mean_if_greater_than_zero(tc)) time_avg, num_ifos = zip(*time_avg_num) time_avg = numpy.array(time_avg) num_ifos = numpy.array(num_ifos) # shift all but the pivot ifo by (num_ifos-1) * timeslide_id * slide # this leads to a mean coinc time located around pivot time if numpy.isfinite(slide): nifos_minusone = (num_ifos - numpy.ones_like(num_ifos)) time_avg = time_avg + (nifos_minusone * timeslide_id * slide)/num_ifos tslide = timeslide_id.astype(numpy.float128) time_avg = time_avg.astype(numpy.float128) span = (time_avg.max() - time_avg.min()) + window * 10 time_avg = time_avg + span * tslide cidx = cluster_over_time(stat, time_avg, window, argmax) return cidx def mean_if_greater_than_zero(vals): """ Calculate mean over numerical values, ignoring values less than zero. E.g. used for mean time over coincident triggers when timestamps are set to -1 for ifos not included in the coincidence. Parameters ---------- vals: iterator of numerical values values to be mean averaged Returns ------- mean: float The mean of the values in the original vector which are greater than zero num_above_zero: int The number of entries in the vector which are above zero """ vals = numpy.array(vals) above_zero = vals > 0 return vals[above_zero].mean(), above_zero.sum() def cluster_over_time(stat, time, window, argmax=numpy.argmax): """Cluster generalized transient events over time via maximum stat over a symmetric sliding window Parameters ---------- stat: numpy.ndarray vector of ranking values to maximize time: numpy.ndarray time to use for clustering window: float length to cluster over argmax: function the function used to calculate the maximum value Returns ------- cindex: numpy.ndarray The set of indices corresponding to the surviving coincidences. """ logging.info('Clustering events over %s s window', window) indices = [] time_sorting = time.argsort() stat = stat[time_sorting] time = time[time_sorting] left = numpy.searchsorted(time, time - window) right = numpy.searchsorted(time, time + window) indices = numpy.zeros(len(left), dtype=numpy.uint32) # i is the index we are inspecting, j is the next one to save i = 0 j = 0 while i < len(left): l = left[i] r = right[i] # If there are no other points to compare it is obviously the max if (r - l) == 1: indices[j] = i j += 1 i += 1 continue # Find the location of the maximum within the time interval around i max_loc = argmax(stat[l:r]) + l # If this point is the max, we can skip to the right boundary if max_loc == i: indices[j] = i i = r j += 1 # If the max is later than i, we can skip to it elif max_loc > i: i = max_loc elif max_loc < i: i += 1 indices = indices[:j] logging.info('%d triggers remaining', len(indices)) return time_sorting[indices] class MultiRingBuffer(object): """Dynamic size n-dimensional ring buffer that can expire elements.""" def __init__(self, num_rings, max_time, dtype): """ Parameters ---------- num_rings: int The number of ring buffers to create. They all will have the same intrinsic size and will expire at the same time. max_time: int The maximum "time" an element can exist in each ring. dtype: numpy.dtype The type of each element in the ring buffer. """ self.max_time = max_time self.buffer = [] self.buffer_expire = [] for _ in range(num_rings): self.buffer.append(numpy.zeros(0, dtype=dtype)) self.buffer_expire.append(numpy.zeros(0, dtype=int)) self.time = 0 @property def filled_time(self): return min(self.time, self.max_time) def num_elements(self): return sum([len(a) for a in self.buffer]) @property def nbytes(self): return sum([a.nbytes for a in self.buffer]) def discard_last(self, indices): """Discard the triggers added in the latest update""" for i in indices: self.buffer_expire[i] = self.buffer_expire[i][:-1] self.buffer[i] = self.buffer[i][:-1] def advance_time(self): """Advance the internal time increment by 1, expiring any triggers that are now too old. """ self.time += 1 def add(self, indices, values): """Add triggers in 'values' to the buffers indicated by the indices """ for i, v in zip(indices, values): self.buffer[i] = numpy.append(self.buffer[i], v) self.buffer_expire[i] = numpy.append(self.buffer_expire[i], self.time) self.advance_time() def expire_vector(self, buffer_index): """Return the expiration vector of a given ring buffer """ return self.buffer_expire[buffer_index] def data(self, buffer_index): """Return the data vector for a given ring buffer""" # Check for expired elements and discard if they exist expired = self.time - self.max_time exp = self.buffer_expire[buffer_index] j = 0 while j < len(exp): # Everything before this j must be expired if exp[j] >= expired: self.buffer_expire[buffer_index] = exp[j:].copy() self.buffer[buffer_index] = self.buffer[buffer_index][j:].copy() break j += 1 if j > 0 and j == len(exp): print("Shouldn't be here!") raise return self.buffer[buffer_index] class CoincExpireBuffer(object): """Unordered dynamic sized buffer that handles multiple expiration vectors. """ def __init__(self, expiration, ifos, initial_size=2**20, dtype=numpy.float32): """ Parameters ---------- expiration: int The 'time' in arbitrary integer units to allow to pass before removing an element. ifos: list of strs List of strings to identify the multiple data expiration times. initial_size: int, optional The initial size of the buffer. dtype: numpy.dtype The dtype of each element of the buffer. """ self.expiration = expiration self.buffer = numpy.zeros(initial_size, dtype=dtype) self.index = 0 self.ifos = ifos self.time = {} self.timer = {} for ifo in self.ifos: self.time[ifo] = 0 self.timer[ifo] = numpy.zeros(initial_size, dtype=numpy.int32) def __len__(self): return self.index @property def nbytes(self): """Returns the approximate memory usage of self. """ nbs = [self.timer[ifo].nbytes for ifo in self.ifos] nbs.append(self.buffer.nbytes) return sum(nbs) def increment(self, ifos): """Increment without adding triggers""" self.add([], [], ifos) def remove(self, num): """Remove the the last 'num' elements from the buffer""" self.index -= num def add(self, values, times, ifos): """Add values to the internal buffer Parameters ---------- values: numpy.ndarray Array of elements to add to the internal buffer. times: dict of arrays The current time to use for each element being added. ifos: list of strs The set of timers to be incremented. """ for ifo in ifos: self.time[ifo] += 1 # Resize the internal buffer if we need more space if self.index + len(values) >= len(self.buffer): newlen = len(self.buffer) * 2 for ifo in self.ifos: self.timer[ifo].resize(newlen) self.buffer.resize(newlen, refcheck=False) self.buffer[self.index:self.index+len(values)] = values if len(values) > 0: for ifo in self.ifos: self.timer[ifo][self.index:self.index+len(values)] = times[ifo] self.index += len(values) # Remove the expired old elements keep = None for ifo in ifos: kt = self.timer[ifo][:self.index] >= self.time[ifo] - self.expiration keep = numpy.logical_and(keep, kt) if keep is not None else kt self.buffer[:keep.sum()] = self.buffer[:self.index][keep] for ifo in self.ifos: self.timer[ifo][:keep.sum()] = self.timer[ifo][:self.index][keep] self.index = keep.sum() def num_greater(self, value): """Return the number of elements larger than 'value'""" return (self.buffer[:self.index] > value).sum() @property def data(self): """Return the array of elements""" return self.buffer[:self.index] class LiveCoincTimeslideBackgroundEstimator(object): """Rolling buffer background estimation.""" def __init__(self, num_templates, analysis_block, background_statistic, sngl_ranking, stat_files, ifos, ifar_limit=100, timeslide_interval=.035, coinc_threshold=.002, return_background=False, **kwargs): """ Parameters ---------- num_templates: int The size of the template bank analysis_block: int The number of seconds in each analysis segment background_statistic: str The name of the statistic to rank coincident events. sngl_ranking: str The single detector ranking to use with the background statistic stat_files: list of strs List of filenames that contain information used to construct various coincident statistics. ifos: list of strs List of ifo names that are being analyzed. At the moment this must be two items such as ['H1', 'L1']. ifar_limit: float The largest inverse false alarm rate in years that we would like to calculate. timeslide_interval: float The time in seconds between consecutive timeslide offsets. coinc_threshold: float Amount of time allowed to form a coincidence in addition to the time of flight in seconds. return_background: boolean If true, background triggers will also be included in the file output. kwargs: dict Additional options for the statistic to use. See stat.py for more details on statistic options. """ from . import old_stat as stat self.num_templates = num_templates self.analysis_block = analysis_block stat_class = stat.get_statistic(background_statistic) self.stat_calculator = stat_class( sngl_ranking, stat_files, ifos=ifos, **kwargs ) self.timeslide_interval = timeslide_interval self.return_background = return_background self.ifos = ifos if len(self.ifos) != 2: raise ValueError("Only a two ifo analysis is supported at this time") self.lookback_time = (ifar_limit * lal.YRJUL_SI * timeslide_interval) ** 0.5 self.buffer_size = int(numpy.ceil(self.lookback_time / analysis_block)) det0, det1 = Detector(ifos[0]), Detector(ifos[1]) self.time_window = det0.light_travel_time_to_detector(det1) + coinc_threshold self.coincs = CoincExpireBuffer(self.buffer_size, self.ifos) self.singles = {} @classmethod def pick_best_coinc(cls, coinc_results): """Choose the best two-ifo coinc by ifar first, then statistic if needed. This function picks which of the available double-ifo coincs to use. It chooses the best (highest) ifar. The ranking statistic is used as a tie-breaker. A trials factor is applied if multiple types of coincs are possible at this time given the active ifos. Parameters ---------- coinc_results: list of coinc result dicts Dictionary by detector pair of coinc result dicts. Returns ------- best: coinc results dict If there is a coinc, this will contain the 'best' one. Otherwise it will return the provided dict. """ mstat = 0 mifar = 0 mresult = None # record the trials factor from the possible coincs we could # maximize over trials = 0 for result in coinc_results: # Check that a coinc was possible. See the 'add_singles' method # to see where this flag was added into the results dict if 'coinc_possible' in result: trials += 1 # Check that a coinc exists if 'foreground/ifar' in result: ifar = result['foreground/ifar'] stat = result['foreground/stat'] if ifar > mifar or (ifar == mifar and stat > mstat): mifar = ifar mstat = stat mresult = result # apply trials factor for the best coinc if mresult: mresult['foreground/ifar'] = mifar / float(trials) logging.info('Found %s coinc with ifar %s', mresult['foreground/type'], mresult['foreground/ifar']) return mresult # If no coinc, just return one of the results dictionaries. They will # all contain the same results (i.e. single triggers) in this case. else: return coinc_results[0] @classmethod def from_cli(cls, args, num_templates, analysis_chunk, ifos): from . import old_stat as stat # Allow None inputs stat_files = args.statistic_files or [] stat_keywords = args.statistic_keywords or [] # flatten the list of lists of filenames to a single list (may be empty) stat_files = sum(stat_files, []) kwargs = stat.parse_statistic_keywords_opt(stat_keywords) return cls(num_templates, analysis_chunk, args.ranking_statistic, args.sngl_ranking, stat_files, return_background=args.store_background, ifar_limit=args.background_ifar_limit, timeslide_interval=args.timeslide_interval, ifos=ifos, **kwargs) @staticmethod def insert_args(parser): from . import old_stat as stat stat.insert_statistic_option_group(parser) group = parser.add_argument_group('Coincident Background Estimation') group.add_argument('--store-background', action='store_true', help="Return background triggers with zerolag coincidencs") group.add_argument('--background-ifar-limit', type=float, help="The limit on inverse false alarm rate to calculate " "background in years", default=100.0) group.add_argument('--timeslide-interval', type=float, help="The interval between timeslides in seconds", default=0.1) group.add_argument('--ifar-remove-threshold', type=float, help="NOT YET IMPLEMENTED", default=100.0) @property def background_time(self): """Return the amount of background time that the buffers contain""" time = 1.0 / self.timeslide_interval for ifo in self.singles: time *= self.singles[ifo].filled_time * self.analysis_block return time def save_state(self, filename): """Save the current state of the background buffers""" import pickle pickle.dump(self, filename) @staticmethod def restore_state(filename): """Restore state of the background buffers from a file""" import pickle return pickle.load(filename) def ifar(self, coinc_stat): """Return the far that would be associated with the coincident given. """ n = self.coincs.num_greater(coinc_stat) return self.background_time / lal.YRJUL_SI / (n + 1) def set_singles_buffer(self, results): """Create the singles buffer This creates the singles buffer for each ifo. The dtype is determined by a representative sample of the single triggers in the results. Parameters ---------- restuls: dict of dict Dict indexed by ifo and then trigger column. """ # Determine the dtype from a sample of the data. self.singles_dtype = [] data = False for ifo in self.ifos: if ifo in results and results[ifo] is not False \ and len(results[ifo]['snr']): data = results[ifo] break if data is False: return for key in data: self.singles_dtype.append((key, data[key].dtype)) if 'stat' not in data: self.singles_dtype.append(('stat', self.stat_calculator.single_dtype)) # Create a ring buffer for each template ifo combination for ifo in self.ifos: self.singles[ifo] = MultiRingBuffer(self.num_templates, self.buffer_size, self.singles_dtype) def _add_singles_to_buffer(self, results, ifos): """Add single detector triggers to the internal buffer Parameters ---------- results: dict of arrays Dictionary of dictionaries indexed by ifo and keys such as 'snr', 'chisq', etc. The specific format it determined by the LiveBatchMatchedFilter class. Returns ------- updated_singles: dict of numpy.ndarrays Array of indices that have been just updated in the internal buffers of single detector triggers. """ if len(self.singles.keys()) == 0: self.set_singles_buffer(results) # If this *still* didn't work, no triggers in first set, try next time if len(self.singles.keys()) == 0: return {} # convert to single detector trigger values # FIXME Currently configured to use pycbc live output # where chisq is the reduced chisq and chisq_dof is the actual DOF logging.info("adding singles to the background estimate...") updated_indices = {} for ifo in ifos: trigs = results[ifo] if len(trigs['snr'] > 0): trigsc = copy.copy(trigs) trigsc['chisq'] = trigs['chisq'] * trigs['chisq_dof'] trigsc['chisq_dof'] = (trigs['chisq_dof'] + 2) / 2 single_stat = self.stat_calculator.single(trigsc) else: single_stat = numpy.array([], ndmin=1, dtype=self.stat_calculator.single_dtype) trigs['stat'] = single_stat # add each single detector trigger to the and advance the buffer data = numpy.zeros(len(single_stat), dtype=self.singles_dtype) for key, value in trigs.items(): data[key] = value self.singles[ifo].add(trigs['template_id'], data) updated_indices[ifo] = trigs['template_id'] return updated_indices def _find_coincs(self, results, ifos): """Look for coincs within the set of single triggers Parameters ---------- results: dict of arrays Dictionary of dictionaries indexed by ifo and keys such as 'snr', 'chisq', etc. The specific format it determined by the LiveBatchMatchedFilter class. Returns ------- coinc_results: dict of arrays A dictionary of arrays containing the coincident results. """ # for each single detector trigger find the allowed coincidences # Record which template and the index of the single trigger # that forms each coincident trigger cstat = [[]] offsets = [] ctimes = {self.ifos[0]:[], self.ifos[1]:[]} single_expire = {self.ifos[0]:[], self.ifos[1]:[]} template_ids = [[]] trigger_ids = {self.ifos[0]:[[]], self.ifos[1]:[[]]} # Calculate all the permutations of coincident triggers for each # new single detector trigger collected for ifo in ifos: trigs = results[ifo] oifo = self.ifos[1] if self.ifos[0] == ifo else self.ifos[0] for i in range(len(trigs['end_time'])): trig_stat = trigs['stat'][i] trig_time = trigs['end_time'][i] template = trigs['template_id'][i] times = self.singles[oifo].data(template)['end_time'] stats = self.singles[oifo].data(template)['stat'] i1, _, slide = time_coincidence(times, numpy.array(trig_time, ndmin=1, dtype=numpy.float64), self.time_window, self.timeslide_interval) trig_stat = numpy.resize(trig_stat, len(i1)) sngls_list = [[ifo, trig_stat], [oifo, stats[i1]]] if oifo == self.ifos[0]: to_shift = [-1, 0] else: to_shift = [0, -1] # This can only use 2-det coincs at present c = self.stat_calculator.rank_stat_coinc( sngls_list, slide, self.timeslide_interval, to_shift ) offsets.append(slide) cstat.append(c) ctimes[oifo].append(times[i1]) ctimes[ifo].append(numpy.zeros(len(c), dtype=numpy.float64)) ctimes[ifo][-1].fill(trig_time) single_expire[oifo].append(self.singles[oifo].expire_vector(template)[i1]) single_expire[ifo].append(numpy.zeros(len(c), dtype=numpy.int32)) single_expire[ifo][-1].fill(self.singles[ifo].time - 1) # save the template and trigger ids to keep association # to singles. The trigger was just added so it must be in # the last position we mark this with -1 so the # slicing picks the right point template_ids.append(numpy.zeros(len(c)) + template) trigger_ids[oifo].append(i1) trigger_ids[ifo].append(numpy.zeros(len(c)) - 1) cstat = numpy.concatenate(cstat) template_ids = numpy.concatenate(template_ids).astype(numpy.int32) for ifo in ifos: trigger_ids[ifo] = numpy.concatenate(trigger_ids[ifo]).astype(numpy.int32) # cluster the triggers we've found # (both zerolag and non handled together) num_zerolag = 0 num_background = 0 logging.info( "%s: %s background and zerolag coincs", ppdets(self.ifos, "-"), len(cstat) ) if len(cstat) > 0: offsets = numpy.concatenate(offsets) ctime0 = numpy.concatenate(ctimes[self.ifos[0]]).astype(numpy.float64) ctime1 = numpy.concatenate(ctimes[self.ifos[1]]).astype(numpy.float64) cidx = cluster_coincs(cstat, ctime0, ctime1, offsets, self.timeslide_interval, self.analysis_block) offsets = offsets[cidx] zerolag_idx = (offsets == 0) bkg_idx = (offsets != 0) for ifo in self.ifos: single_expire[ifo] = numpy.concatenate(single_expire[ifo]) single_expire[ifo] = single_expire[ifo][cidx][bkg_idx] self.coincs.add(cstat[cidx][bkg_idx], single_expire, ifos) num_zerolag = zerolag_idx.sum() num_background = bkg_idx.sum() elif len(ifos) > 0: self.coincs.increment(ifos) ####################################Collect coinc results for saving coinc_results = {} # Save information about zerolag triggers if num_zerolag > 0: zerolag_results = {} idx = cidx[zerolag_idx][0] zerolag_cstat = cstat[cidx][zerolag_idx] zerolag_results['foreground/ifar'] = self.ifar(zerolag_cstat) zerolag_results['foreground/stat'] = zerolag_cstat template = template_ids[idx] for ifo in self.ifos: trig_id = trigger_ids[ifo][idx] single_data = self.singles[ifo].data(template)[trig_id] for key in single_data.dtype.names: path = 'foreground/%s/%s' % (ifo, key) zerolag_results[path] = single_data[key] zerolag_results['foreground/type'] = '-'.join(self.ifos) coinc_results.update(zerolag_results) # Save some summary statistics about the background coinc_results['background/time'] = numpy.array([self.background_time]) coinc_results['background/count'] = len(self.coincs.data) # Save all the background triggers if self.return_background: coinc_results['background/stat'] = self.coincs.data return num_background, coinc_results def backout_last(self, updated_singles, num_coincs): """Remove the recently added singles and coincs Parameters ---------- updated_singles: dict of numpy.ndarrays Array of indices that have been just updated in the internal buffers of single detector triggers. num_coincs: int The number of coincs that were just added to the internal buffer of coincident triggers """ for ifo in updated_singles: self.singles[ifo].discard_last(updated_singles[ifo]) self.coincs.remove(num_coincs) def add_singles(self, results): """Add singles to the background estimate and find candidates Parameters ---------- results: dict of arrays Dictionary of dictionaries indexed by ifo and keys such as 'snr', 'chisq', etc. The specific format it determined by the LiveBatchMatchedFilter class. Returns ------- coinc_results: dict of arrays A dictionary of arrays containing the coincident results. """ # Let's see how large everything is logging.info( "%s: %s coincs, %s bytes", ppdets(self.ifos, "-"), len(self.coincs), self.coincs.nbytes ) # If there are no results just return valid_ifos = [k for k in results.keys() if results[k] and k in self.ifos] if len(valid_ifos) == 0: return {} # Add single triggers to the internal buffer self._add_singles_to_buffer(results, ifos=valid_ifos) # Calculate zerolag and background coincidences _, coinc_results = self._find_coincs(results, ifos=valid_ifos) # record if a coinc is possible in this chunk if len(valid_ifos) == 2: coinc_results['coinc_possible'] = True return coinc_results
44,282
37.506957
99
py
pycbc
pycbc-master/docs/remove_non_standard_imports.py
#!/usr/bin/env python # Copyright (C) 2011 Ian W. Harry # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """ PLEASE FIX: This code is here because sphinx-apidoc does not have the capability to exclude specific files from the documentation. The GPU modules must be removed as we cannot import their stuff. This feature has been requested in sphinx, remove this code and use it if it gets added. We may need to write a patch for Sphinx to add this functionality. """ import os,sys import glob def check_module_name_line(currLine): if (':mod:' in currLine) and ('Module' in currLine): return True elif 'module' in currLine: return True else: return False # Can these be named somewhat more obviously! excludes=['cuda', 'cufft', 'cuda_pyfft', 'cl_pyfft',\ 'pycbc_phenomC_tmplt', 'TaylorF2','fotonfilter', 'cpnest'] fileList = glob.glob('pycbc.*.rst') for file in fileList: output = [] fp = open(file,'r') addLine = True for line in fp: if check_module_name_line(line): for excludeNam in excludes: if excludeNam in line: addLine=False break else: addLine=True if addLine: output.append(line) fp.close() fp = open(file,'w') fp.writelines(output) fp.close()
2,021
30.59375
76
py
pycbc
pycbc-master/docs/conf.py
# -*- coding: utf-8 -*- # # PyCBC documentation build configuration file, created by # sphinx-quickstart on Tue Jun 11 17:02:52 2013. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import time import pycbc.version import subprocess import logging import glob import pycbc # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) pycbc.init_logging(True) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxcontrib.programoutput', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax', 'matplotlib.sphinxext.plot_directive', 'sphinx.ext.autosummary', 'sphinx.ext.inheritance_diagram', 'sphinx_design', "sphinxcontrib.jquery", ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyCBC' copyright = u'2015, 2016, 2017, Alexander Nitz, Ian Harry, Christopher M. Biwer, Duncan A. Brown, Josh Willis, and Tito Dal Canton' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = pycbc.version.last_release # The full version, including alpha/beta/rc tags. release = pycbc.version.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['pycbc.'] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {'style_nav_header_background': 'linear-gradient(0deg, rgba(0,0,0,1) 0%, rgba(193,193,255,1) 85%)', 'logo_only':True, } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] html_context = { 'display_github': True, 'github_user': 'gwastro', 'github_repo': 'pycbc', 'github_version': 'master/docs/', } # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = 'https://raw.githubusercontent.com/gwastro/pycbc-logo/master/pycbc_logo_name.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. html_split_index = True # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'PyCBCdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'PyCBC.tex', u'PyCBC Documentation', u'Alexander Nitz', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pycbc', u'PyCBC Documentation', [u'Alexander Nitz'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'PyCBC', u'PyCBC Documentation', u'Alexander Nitz', 'PyCBC', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'h5py': ('http://docs.h5py.org/en/stable/', None), } napoleon_use_ivar = False suppress_warnings = ['image.nonlocal_uri'] def build_includes(): """Creates rst files in the _include directory using the python scripts there. This will ignore any files in the _include directory that start with ``_``. """ logging.info("Running scripts in _include:") cwd = os.getcwd() os.chdir('_include') pyfiles = glob.glob('*.py') + glob.glob('*.sh') run_args = [] for fn in pyfiles: if not fn.startswith('_'): if fn.endswith('.py'): exe = 'python' elif fn.endswith('.sh'): exe = 'bash' args = [exe, fn] run_args.append(args) run_num = 2 # Number of scripts to run in parallel i = 0 running = [] still_running = True while still_running: time.sleep(0.01) # Sleep so this process doesn't eat CPU time if len(running) < run_num and i < len(run_args): args = run_args[i] proc = subprocess.Popen(args, stdout=None, stderr=None) logging.info('Running: {}'.format(' '.join(proc.args))) i += 1 running.append(proc) for proc in running: status = proc.poll() r = proc.returncode if status is not None: if r == 0: print('DONE with :{}'.format(' '.join(proc.args))) else: msg = "Failure to run {}".format(' '.join(proc.args)) for p in running: p.terminate() raise RuntimeError(msg) running.remove(proc) if len(running) == 0 and i == len(run_args): still_running = False os.chdir(cwd) if not 'SKIP_PYCBC_DOCS_INCLUDE' in os.environ: build_includes() def setup(app): app.add_js_file('typed.min.js') app.add_js_file('terminal.css') app.add_js_file("theme_overrides.css") # -- Options for inheritance graphs ------------------------------------------- # Makes the graphs be vertically aligned, with parents at the top inheritance_graph_attrs = {'rankdir': 'TB'}
11,336
31.484241
132
py
pycbc
pycbc-master/docs/_include/waveform-parameters.py
#!/usr/bin/env python # Copyright (C) 2018 Duncan Macleod, Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints the usable waveform parameters as a RST table. """ # NOTE: the manual call to OrdereDict can be removed in favour of # `ParameterList.description_dict` when gwastro/pycbc#2125 is merged # and released from pycbc import waveform from _dict_to_rst import rst_dict_table allparams = (waveform.td_waveform_params + waveform.fd_waveform_params + waveform.location_params) tbl = rst_dict_table(allparams.description_dict, key_format='``\'{0}\'``'.format, header=('Parameter', 'Description'), sort=False) filename = 'waveform-parameters.rst' with open(filename, 'w') as fp: print(tbl, file=fp)
1,478
35.073171
75
py
pycbc
pycbc-master/docs/_include/inference_data_opts-table.py
#!/usr/bin/env python # Copyright (C) 2020 Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints an RST table of data options for inference models. """ from io import StringIO import re import textwrap from pycbc.inference.models import data_utils # wrapper for long metavars metavar_txtwrap = textwrap.TextWrapper(width=34, break_long_words=False) # convenience class for storing row data class Row(object): def __init__(self, divider=None, lpad=0, wrap_option=True): if divider is None: divider = ' | ' self.divider = divider self.lborder = ' '*lpad + divider[1:] self.rborder = divider[:-1] self._groupmsg = '' self.wrap_option = wrap_option self._option = '' self._metavar = '' self._helpmsg = '' @property def option(self): return self._option @option.setter def option(self, option): if self.wrap_option: # add `` around option string option = '``'+option+'``' self._option = option @property def metavar(self): return self._metavar @metavar.setter def metavar(self, metavar): # text wrap if metavar is more than 34 characters wide self._metavar = '\n'.join(metavar_txtwrap.wrap(metavar)) @staticmethod def replace_doubledash(str_): """Replaces all instances of --arg with ``arg`` in a string.""" pattern = r"--\w+-?\w*" for s in re.findall(pattern, str_): rep = '``' + s.replace('--', '') + '``' str_ = re.sub(s, rep, str_) return str_ @property def helpmsg(self): return self._helpmsg @helpmsg.setter def helpmsg(self, msg): # replace all instances of --arg with ``arg`` self._helpmsg = self.replace_doubledash(msg) @property def groupmsg(self): return self._groupmsg @groupmsg.setter def groupmsg(self, msg): # replace all instances of --arg with ``arg`` self._groupmsg = self.replace_doubledash(msg) @property def isgroup(self): return self.groupmsg != '' @property def grouplen(self): return max(map(len, self.groupmsg.split('\n'))) @property def metavarlen(self): return max(map(len, self.metavar.split('\n'))) @property def helplen(self): return max(map(len, self.helpmsg.split('\n'))) def format(self, maxlen, optlen, metalen, helplen): if self.isgroup: out = ['{lbdr}{msg:<{width}}{rbdr}'.format(lbdr=self.lborder, msg=msg, width=maxlen, rbdr=self.rborder) for msg in self.groupmsg.split('\n')] else: tmplt = '{msg:<{rpad}}' out = [] metavar = self.metavar.split('\n') helpmsg = self.helpmsg.split('\n') nlines = max(len(metavar), len(helpmsg)) for ii in range(nlines): if ii == 0: optstr = self.option else: optstr = '' if ii < len(metavar): metastr = metavar[ii] else: metastr = '' if ii < len(helpmsg): helpstr = helpmsg[ii] else: helpstr = '' optstr = tmplt.format(msg=optstr, rpad=optlen) metastr = tmplt.format(msg=metastr, rpad=metalen) helpstr = tmplt.format(msg=helpstr, rpad=helplen) #rowstr = self.divider.join([optstr, helpstr, metastr]) rowstr = self.divider.join([optstr, metastr, helpstr]) # add borders rowstr = '{}{}{}'.format(self.lborder, rowstr, self.rborder) out.append(rowstr) return '\n'.join(out) def __len__(self): if self.isgroup: baselen = self.grouplen else: baselen = len(self.option) + self.metavarlen + self.helplen return baselen + 2*len(self.divider) # create a data parser that has the options parser = data_utils.create_data_parser() # dump the help message to a file buffer fp = StringIO() parser.print_help(file=fp) # Regular expressions to interpret the help message: # Lines with a "--option stuff" (i.e., a single space after the option) include # metadata. Lines with "--option msg" (i.e., multiple spaces after the # option) contain no metadata, and just go straight to the help message. regx_optmeta = re.compile( r'^\s+((-\S, )*)--(?P<option>\S+)\s(?P<metavar>\S.+)') regx_optmsg = re.compile(r'^\s+((-\S, )*)--(?P<option>\S+)\s+(?P<msg>.+)') # Note: optmsg will match optmeta lines, so need to test for optmeta first. # Lines that start with whitespace but do not match optmeta or optmsg will be # assumed to be the rest of the help message for either an option or a group. regx_helpmsg = re.compile(r'^\s+(?P<msg>.+)') # Note: this will match both optmeta and optmsg lines, so need to test for # for those before this. # Lines that do not start with whitespace will be considered to be the start of # a new option group. This is mutually exclusive of all the previous regxs, # since they all require whitespace at the start of a line. regx_newgroup = re.compile(r'^(?P<msg>\S.+)') # now format the string buffer into a rst table fp.seek(0) # we want to skip everything up to the "optional arguments:" skip = True while skip: line = fp.readline() m = regx_newgroup.match(line) if m is not None: skip = m.group('msg') not in ['optional arguments:', 'options:'] # advance past the 'optional arguments:' and the 'help' line line = fp.readline() line = fp.readline() # now read through the rest of the lines, converting options into a list of # tuples with order (option, meta data, help message), grouped by option groups # add a header row header = Row(wrap_option=False) header.option = 'Name' header.metavar = 'Syntax' header.helpmsg = 'Description' table = [header] while line: # determine if the line is a new group newgroup = regx_newgroup.match(line) if newgroup: groupmsg = [newgroup.group('msg')] # continue reading until we get a blank line or an option line = fp.readline() while not (line == '' or line == '\n' or regx_optmsg.match(line)): groupmsg.append(regx_helpmsg.match(line).group('msg')) line = fp.readline() # compile the group message row = Row() row.groupmsg = '\n'.join(groupmsg) table.append(row) # check if the line contains an option m = regx_optmsg.match(line) if m: row = Row() helpmsg = [] row.option = m.group('option') # check if the line is an option with metadata meta = regx_optmeta.match(line) if meta: row.metavar = meta.group('metavar') else: helpmsg.append(m.group('msg')) # continue reading to get the help message line = fp.readline() while not (line == '' or line == '\n' or regx_optmsg.match(line)): helpmsg.append(regx_helpmsg.match(line).group('msg')) line = fp.readline() # compile the help message row.helpmsg = '\n'.join(helpmsg) # remove the list of PSDs from the fake strain and psd model arguments, # referring instead to the psd table if (m.group('option') == 'fake-strain' or m.group('option') == 'psd-model'): # strip off everything after the "Choose from" helpmsg = row.helpmsg.replace('\n', ' ') idx = re.search(r"Choose +from", helpmsg).end() helpmsg = helpmsg[:idx] + " any available PSD model" # for fake strain, add zero Noise if m.group('option') == 'fake-strain': helpmsg += ', or ``zeroNoise``.' else: helpmsg += '.' row.helpmsg = textwrap.fill(helpmsg, width=54) # add the row to the table table.append(row) else: # read the next line for the loop line = fp.readline() # Now format the table # get the maximum length of each column optlen = max([len(row.option) for row in table]) metalen = max([row.metavarlen for row in table]) helplen = max([row.helplen for row in table]) maxlen = optlen + metalen + helplen + 6 # the 6 is for the 2 dividers # create row separators # "major" will have == lines majorline = Row(divider='=+=', wrap_option=False) majorline.option = '='*optlen majorline.metavar = '='*metalen majorline.helpmsg = '='*helplen # "minor" will have -- lines minorline = Row(divider='-+-', wrap_option=False) minorline.option = '-'*optlen minorline.metavar = '-'*metalen minorline.helpmsg = '-'*helplen formatargs = [maxlen, optlen, metalen, helplen] # Write the formatted table to file filename = 'inference_data_opts-table.rst' out = open(filename, 'w') print(minorline.format(*formatargs), file=out) # print the header print(header.format(*formatargs), file=out) print(majorline.format(*formatargs), file=out) # print everything else in the table for ii in range(1, len(table)): row = table[ii] print(row.format(*formatargs), file=out) print(minorline.format(*formatargs), file=out) out.close()
10,089
33.913495
79
py
pycbc
pycbc-master/docs/_include/_dict_to_rst.py
# Copyright (C) 2018 Duncan Macleod # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Utilitiy for converting a python dictionary to a rst table. """ __author__ = 'Duncan Macleod <duncan.macleod@ligo.org>' def rst_dict_table(dict_, key_format=str, val_format=str, header=None, sort=True): """Returns an RST-formatted table of keys and values from a `dict` Parameters ---------- dict_ : dict data to display in table key_format : callable callable function with which to format keys val_format : callable callable function with which to format values header : None, tuple of str a 2-tuple of header for the two columns, or `None` to exclude a header line (default) sort : bool, optional Sort the dictionary keys alphabetically when writing the table. Examples -------- >>> a = {'key1': 'value1', 'key2': 'value2'} >>> print(rst_dict_table(a)) ==== ====== key1 value1 key2 value2 ==== ====== >>> print(rst_dict_table(a, key_format='``{}``'.format, ... val_format=':class:`{}`'.format, ... header=('Key', 'Value')) ======== =============== Key Value ======== =============== ``key1`` :class:`value1` ``key2`` :class:`value2` ======== =============== """ keys, values = zip(*dict_.items()) # apply formatting keys = list(map(key_format, keys)) values = list(map(val_format, values)) # work out longest elements in each column nckey = max(list(map(len, keys))) ncval = max(list(map(len, values))) if header: khead, vhead = header nckey = max(nckey, len(khead)) ncval = max(ncval, len(vhead)) # build table header line divider = "{} {}".format('='*nckey, '='*ncval) def row(key, val): fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) return fmt.format(key, val) # build table of lines lines = [divider] if header: lines.extend((row(*header), divider)) params = zip(keys, values) if sort: params = sorted(params) for key, val in params: fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) lines.append(fmt.format(key, val)) lines.append(divider) return '\n'.join(lines) def format_pyobj(obj, objtype): """Function for formatting python objects in the table. An object can be a module, class, or function. Parameters ---------- obj : python object The object to link to. objtype : str The type of the object, e.g., 'class', 'func', etc. For the full list of recognized domains, see the `sphinx docs <https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html>`_. Returns ------- str : Formatted RST string that will cross reference to the object. """ return ':py:{0}:`{1}.{2}`'.format(objtype, obj.__module__, obj.__name__) def format_class(class_): """Function for formatting classes in the table. This can be passed to ``rst_dict_table``'s ``val_format`` argument. """ return format_pyobj(class_, 'class') def format_function(func_): """Function for formatting python functions in the table. This can be passed to ``rst_dict_table``'s ``val_format`` argument. """ return format_pyobj(func_, 'func')
4,090
29.992424
85
py
pycbc
pycbc-master/docs/_include/distributions-table.py
#!/usr/bin/env python # Copyright (C) 2018 Duncan Macleod, Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints an RST table of available distributions from the distributions module. """ from pycbc.distributions import distribs from _dict_to_rst import (rst_dict_table, format_class) tbl = rst_dict_table(distribs, key_format='``\'{0}\'``'.format, header=('Name', 'Class'), val_format=format_class) filename = 'distributions-table.rst' with open(filename, 'w') as fp: print(tbl, file=fp)
1,217
37.0625
75
py
pycbc
pycbc-master/docs/_include/transforms-table.py
#!/usr/bin/env python # Copyright (C) 2018 Duncan Macleod, Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints an RST table of available distributions from the distributions module. """ from pycbc.transforms import transforms from _dict_to_rst import (rst_dict_table, format_class) tbl = rst_dict_table(transforms, key_format='``\'{0}\'``'.format, header=('Name', 'Class'), val_format=format_class) filename = 'transforms-table.rst' with open(filename, 'w') as fp: print(tbl, file=fp)
1,215
37
75
py
pycbc
pycbc-master/docs/_include/models-table.py
#!/usr/bin/env python # Copyright (C) 2018 Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints an RST table of available models from the inference.models module. """ from pycbc.inference.models import get_models from _dict_to_rst import (rst_dict_table, format_class) tbl = rst_dict_table(get_models(), key_format='``\'{0}\'``'.format, header=('Name', 'Class'), val_format=format_class) filename = 'models-table.rst' with open(filename, 'w') as fp: print(tbl, file=fp)
1,199
36.5
75
py
pycbc
pycbc-master/docs/_include/sampler_inheritance_diagrams.py
# Creates RST for the sampler inheritance diagrams from pycbc.inference.sampler import samplers fname = 'sampler_inheritance_diagrams.rst' tmplt = """.. _inheritance-{name}: * ``{name}``: .. inheritance-diagram:: {module}.{clsname} :parts: 3 | """ fp = open(fname, 'w') for sampler, cls in sorted(samplers.items()): out = tmplt.format(name=sampler, clsname=cls.__name__, module=cls.__module__) print(out, file=fp) fp.close()
466
19.304348
58
py
pycbc
pycbc-master/docs/_include/samplers-table.py
#!/usr/bin/env python # Copyright (C) 2018 Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints an RST table of available models from the inference.models module. """ from pycbc.inference.sampler import samplers from _dict_to_rst import (rst_dict_table, format_class) tbl = rst_dict_table(samplers, key_format='``\'{0}\'``'.format, header=('Name', 'Class'), val_format=format_class) filename = 'samplers-table.rst' with open(filename, 'w') as fp: print(tbl, file=fp)
1,196
36.40625
75
py
pycbc
pycbc-master/docs/_include/inference_io_inheritance_diagrams.py
# Creates RST for the sampler inheritance diagrams import inspect from pycbc.inference.io import filetypes fname = 'inference_io_inheritance_diagrams.rst' def get_topclasses(cls): """Gets the base classes that are in pycbc.""" bases = [c for c in inspect.getmro(cls) if c.__module__.startswith('pycbc') and c != cls] return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases]) tmplt = """.. _inheritance-io-{name}: * ``{name}``: .. inheritance-diagram:: {module}.{clsname} :parts: 3 :top-classes: pycbc.inference.io.base_hdf.BaseInferenceFile | """ fp = open(fname, 'w') for ftname, cls in sorted(filetypes.items()): # get the parents topclasses = get_topclasses(cls) out = tmplt.format(name=ftname, clsname=cls.__name__, module=cls.__module__) print(out, file=fp) fp.close()
871
24.647059
79
py
pycbc
pycbc-master/docs/_include/psd_models-table.py
#!/usr/bin/env python # Copyright (C) 2020 Collin Capano # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Prints an RST table of available psd models. """ from pycbc import psd from _dict_to_rst import (rst_dict_table, format_function) psds = {p: getattr(psd.analytical, p) for p in psd.analytical.get_psd_model_list()} tbl = rst_dict_table(psds, key_format='``{0}``'.format, header=('Name', 'Function'), val_format=format_function) filename = 'psd_models-table.rst' with open(filename, 'w') as fp: print(tbl, file=fp)
1,243
32.621622
75
py
MAPS-mt
MAPS-mt-main/interactive.py
import os import difflib import logging import argparse import warnings from typing import List from langcodes import Language from data.trigger_sents import SUPPORT_LANGS from comet import load_from_checkpoint, download_model from data import demo_ex_dict, kw_ex_dict, topic_ex_dict from model.openai.translate import api_key, model2max_context, num_tokens_from_string, batch_translate_with_backoff, translate_with_backoff from tabulate import tabulate from termcolor import colored import shutil warnings.filterwarnings("ignore", category=UserWarning, module="pytorch_lightning.trainer.setup") SUPPORTED_LANG_PAIRS = [f"{s}-{t}" for s in SUPPORT_LANGS for t in SUPPORT_LANGS if s != t] MODEL_NAME = "text-davinci-003" #TODO: support more models KNOW2COLOR = { "Keywords": 'light_red', "Topics": 'light_green', "Demo": 'light_yellow', } comet_model_mapping = { "wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt", } def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--lang-pair", "-lp", type=str, required=True, choices=SUPPORTED_LANG_PAIRS, help="Language pair") parser.add_argument("--comet-qe-model-name", type=str, default="wmt21-comet-qe-da", help="COMET QE model name") parser.add_argument("--comet-saving-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'eval_ckpt')) parser.add_argument("--only-final", action="store_true", help="Only output the final translation") parser.add_argument("--use-gpu", action="store_true", help="Use gpu for QE model") return parser.parse_args() def query(prompt): len_prompt = num_tokens_from_string(prompt, MODEL_NAME) return translate_with_backoff( prompt, MODEL_NAME, max_tokens=model2max_context[MODEL_NAME]-len_prompt, api_key=api_key, temperature=0.0 ) def batch_query(prompts): if len(prompts) == 0: return [] len_prompt = max([num_tokens_from_string(p, MODEL_NAME) for p in prompts]) return batch_translate_with_backoff( prompts, MODEL_NAME, max_tokens=model2max_context[MODEL_NAME]-len_prompt, api_key=api_key, temperature=0.0 ) def mine_keywords_prompt(source_sentence: str, src_lng: str, tgt_lng: str, src_full: str, tgt_full: str): ex = kw_ex_dict[(src_lng, tgt_lng)] all_items = ex + [(source_sentence, None)] prompt_lst = [] for it in all_items: it_src, it_kw = it s = f"Let's extract the keywords in the following {src_full} sentence, and then translate these keywords into {tgt_full}.\n" + \ f"{src_full}: {it_src}\n" + \ (f"Keyword Pairs: {it_kw}" if it_kw else "Keyword Pairs:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) return prompt def mine_topics_prompt(source_sentence: str, src_lng: str, tgt_lng: str): ex = topic_ex_dict[(src_lng, tgt_lng)] all_items = ex + [(source_sentence, None)] prompt_lst = [] for it in all_items: it_src, it_topic = it s = f"Use a few words to describe the topics of the following input sentence.\n" + \ f"Input: {it_src}\n" + \ (f"Topics: {it_topic}" if it_topic else "Topics:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) return prompt def mine_demo_prompt(source_sentence: str, src_lng: str, tgt_lng: str, src_full: str, tgt_full: str): ex = demo_ex_dict[(src_lng, tgt_lng)] all_items = ex + [(source_sentence, None, None)] prompt_lst = [] for it in all_items: it_src, it_demo_src, it_demo_tgt = it s = f"Let's write {'an' if src_full == 'English' else 'a'} {src_full} sentence related to but different from the input {src_full} sentence and translate it into {tgt_full}\n" + \ f"Input {src_full} sentence: {it_src}\n" + \ (f"Output {src_full}-{tgt_full} sentence pair: {it_demo_src}\t{it_demo_tgt}" if (it_demo_src and it_demo_tgt) else f"Output {src_full}-{tgt_full} sentence pair:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) return prompt def mine_knowledge(source_sentence: str, src_lng: str, tgt_lng: str, src_full: str, tgt_full: str): prompts = [] prompts.append(mine_keywords_prompt(source_sentence, src_lng, tgt_lng, src_full, tgt_full)) prompts.append(mine_topics_prompt(source_sentence, src_lng, tgt_lng)) prompts.append(mine_demo_prompt(source_sentence, src_lng, tgt_lng, src_full, tgt_full)) return batch_query(prompts) def knowledge_integration(source_sentence: str, src_full: str, tgt_full: str, keywords: str, topics: str, demo: str): prompts = [] prompts.append(translate_prompt(source_sentence, src_full, tgt_full)) prompts.append(translate_with_knowledge_prompt("Keyword Pairs", keywords, source_sentence, src_full, tgt_full)) prompts.append(translate_with_knowledge_prompt("Topics", topics, source_sentence, src_full, tgt_full)) prompts.append(translate_with_knowledge_prompt(f"Related {src_full}-{tgt_full} sentence pairs", demo, source_sentence, src_full, tgt_full)) return batch_query(prompts) def translate_with_knowledge_prompt(knowledge_type: str, knowledge_content: str, source_sentence: str, src_full: str, tgt_full: str): prompt = f"{knowledge_type}: {knowledge_content}\n\n" + \ f"Instruction: Given the above knowledge, translate the following {src_full} text into {tgt_full}.\n" + \ f"{src_full}: {source_sentence}\n" + \ f"{tgt_full}:" return prompt def translate_prompt(source_sentence: str, src_full: str, tgt_full: str): prompt = f"Instruction: Translate the following {src_full} text into {tgt_full}.\n" + \ f"{src_full}: {source_sentence}\n" + \ (f"{tgt_full}:") return prompt def comet_qe(comet_model, source_sentence: str, translation_candidates: List[str], use_gpu: bool): data = [] for translation_candidate in translation_candidates: data.append({"mt": translation_candidate, "src": source_sentence, "ref": None}) model_output = comet_model.predict(data, batch_size=4, gpus=1 if use_gpu else 0, progress_bar=False) scores = model_output.scores return scores def argmax(lst): return lst.index(max(lst)) def find_diff_str(str1: str, str2: str, know_name: str, language: str) -> str: """Highlight the differecnt part in `str` Args: str1 (str): the reference string, i.e., the base candidates str2 (str): input string know_name (str): string of knowledge, should be in `KNOWS` language (str): the language full name Returns: str: highlighted str2 """ d = difflib.Differ() # helper function to process diffs def process_diff(diff): result = [] for fragment in diff: if fragment[0] == ' ': result.append(fragment[2:]) # Keep unchanged parts elif fragment[0] == '-': continue # Discard parts in str1 not in str2 elif fragment[0] == '+': # Highlight additions from str2 not in str1 result.append(colored(fragment[2:], KNOW2COLOR[know_name])) return result if language in ['English', 'German']: # split the input strings into word lists str1_list = str1.split() str2_list = str2.split() diff = d.compare(str1_list, str2_list) result = process_diff(diff) result = ' '.join(result) else: diff = d.compare(str1, str2) result = process_diff(diff) result = ''.join(result) return result def main(args): src_lng, tgt_lng = args.lang_pair.split('-') src_full = Language.make(language=src_lng).display_name() tgt_full = Language.make(language=tgt_lng).display_name() # Loading the comet model loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] for logger in loggers: logger.setLevel(logging.ERROR) if args.comet_qe_model_name in comet_model_mapping: comet_model = load_from_checkpoint(os.path.join(args.comet_saving_dir, comet_model_mapping[args.comet_qe_model_name])) else: model_path = download_model(args.comet_qe_model_name, saving_directory=args.comet_saving_dir) comet_model = load_from_checkpoint(model_path) comet_model.eval() # Translate while True: source_sentence = "" while source_sentence == "": source_sentence = input(f"\nEnter source {src_full} sentence: ") # knowledge mining keywords, topics, demo = mine_knowledge(source_sentence, src_lng, tgt_lng, src_full, tgt_full) # knowledge integration candidate_base, candidate_kw, candidate_topic, candidate_demo = knowledge_integration(source_sentence, src_full, tgt_full, keywords, topics, demo) # knowledge selection candidates = [candidate_base, candidate_kw, candidate_topic, candidate_demo] scores = comet_qe(comet_model, source_sentence, candidates, args.use_gpu) final_translaton = candidates[argmax(scores)] # output if args.only_final: print(final_translaton) else: table = [ [colored("Keywords", KNOW2COLOR["Keywords"]), f"{keywords}"], [colored("Topics", KNOW2COLOR["Topics"]), f"{topics}"], [colored("Demo", KNOW2COLOR["Demo"]), f"{demo}"], ["----", "--"], [colored("Cand Kw", KNOW2COLOR["Keywords"]), f"{find_diff_str(candidate_base, candidate_kw, 'Keywords', tgt_full)}"], [colored("Cand Topic", KNOW2COLOR["Topics"]), f"{find_diff_str(candidate_base, candidate_topic, 'Topics', tgt_full)}"], [colored("Cand Demo", KNOW2COLOR["Demo"]), f"{find_diff_str(candidate_base, candidate_demo, 'Demo', tgt_full)}"], ["Cand Base", f"{candidate_base}"], ["----", "--"], ["Final", colored(f"{final_translaton}", attrs=["bold"])], ] width = min(shutil.get_terminal_size().columns-18, 120) print(tabulate(table, tablefmt='fancy_grid', maxcolwidths=[None, width])) if __name__ == "__main__": args = parse_args() main(args)
10,309
41.780083
186
py
MAPS-mt
MAPS-mt-main/scripts/score.py
import os import argparse from comet import load_from_checkpoint, download_model import os import json import threading from bleurt import score as bleurt_score import logging from sacrebleu.metrics import BLEU comet_model_mapping = { "wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt", } def wait_until_path_exist(path): while not os.path.isdir(path): pass return def comet(**kwargs): sys_lines = kwargs["sys_lines"] src_lines = kwargs["src_lines"] ref_lines = kwargs["ref_lines"] comet_model_name = kwargs["comet_model_name"] comet_saving_dir = kwargs["comet_saving_dir"] comet_cache_dir = kwargs["comet_cache_dir"] batch_size = kwargs["batch_size"] cpu = kwargs["cpu"] cache_file = os.path.join(comet_cache_dir, 'comet_cache.json') wait_until_path_exist(comet_saving_dir) cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} data = [] new_sys_lines, new_src_lines, new_ref_lines = [], [], [] for sys, src, ref in zip(sys_lines, src_lines, ref_lines): cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_src_lines.append(src) new_ref_lines.append(ref) data.append({"mt": sys, "src": src, "ref": ref}) logging.info(f"COMET cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}") if data: if comet_model_name in comet_model_mapping: comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_model_name])) else: model_path = download_model(comet_model_name, saving_directory=comet_saving_dir) comet_model = load_from_checkpoint(model_path) comet_model.eval() model_output = comet_model.predict(data, batch_size=batch_size, gpus=0 if cpu else 1) scores = model_output.scores with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, src, ref), score in zip(zip(new_sys_lines, new_src_lines, new_ref_lines), scores): cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)] for sys, src, ref in zip(sys_lines, src_lines, ref_lines)] return sum(final_scores)/len(final_scores) def comet_qe(**kwargs): sys_lines = kwargs["sys_lines"] src_lines = kwargs["src_lines"] comet_qe_model_name = kwargs["comet_qe_model_name"] comet_saving_dir = kwargs["comet_saving_dir"] comet_cache_dir = kwargs["comet_cache_dir"] batch_size = kwargs["batch_size"] cache_file = os.path.join(comet_cache_dir, 'comet_qe_cache.json') wait_until_path_exist(comet_saving_dir) cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} data = [] new_sys_lines, new_src_lines = [], [] for sys, src in zip(sys_lines, src_lines): cache_key = json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_src_lines.append(src) data.append({"mt": sys, "src": src, "ref": None}) logging.info(f"COMET-QE cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}") if data: if comet_model_name in comet_model_mapping: comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_model_name])) else: model_path = download_model(comet_model_name, saving_directory=comet_saving_dir) comet_model = load_from_checkpoint(model_path) comet_model.eval() model_output = comet_model.predict(data, batch_size=batch_size, gpus=1) scores = model_output.scores with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, src), score in zip(zip(new_sys_lines, new_src_lines), scores): cache_key = json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False)] for sys, src in zip(sys_lines, src_lines)] return sum(final_scores)/len(final_scores) def bleurt(**kwargs): sys_lines = kwargs["sys_lines"] ref_lines = kwargs["ref_lines"] bleurt_cache_dir = kwargs["bleurt_cache_dir"] bleurt_ckpt = kwargs["bleurt_ckpt"] batch_size = kwargs["batch_size"] cache_file = os.path.join(bleurt_cache_dir, 'bleurt_cache.json') cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} new_sys_lines, new_ref_lines = [], [] for sys, ref in zip(sys_lines, ref_lines): cache_key = json.dumps((sys, ref), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_ref_lines.append(ref) logging.info(f"BLEURT cache info: {len(sys_lines)-len(new_sys_lines)}/{len(sys_lines)}") assert len(new_sys_lines) == len(new_ref_lines) if len(new_sys_lines) > 0: bleurt_model = bleurt_score.LengthBatchingBleurtScorer(bleurt_ckpt) scores = bleurt_model.score(references=new_ref_lines, candidates=new_sys_lines, batch_size=batch_size) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, ref), score in zip(zip(new_sys_lines, new_ref_lines), scores): cache_key = json.dumps((sys, ref), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((sys, ref), ensure_ascii=False)] for sys, ref in zip(sys_lines, ref_lines)] return sum(final_scores)/len(final_scores) def bleu(**kwargs): sys_lines = kwargs["sys_lines"] ref_lines = kwargs["ref_lines"] tgt_lang = kwargs["tgt_lang"] bleu = BLEU(tokenize="flores200") assert len(sys_lines) == len(ref_lines) return bleu.corpus_score(sys_lines, [ref_lines]).score def readlines(file_path): if not file_path: return [] with open(file_path, 'r') as f: lines = f.readlines() return [l.strip() for l in lines] def check_equal_num_lines(paths): if len(paths) <= 1: return True else: return all([len(readlines(p)) == len(readlines(paths[0])) for p in paths[1:]]) def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--sys", nargs='+', required=True) parser.add_argument("--src", type=str, required=True) parser.add_argument("--ref", type=str) parser.add_argument("--tgt-lang", type=str) parser.add_argument("--comet-qe-model-name", type=str, default="wmt21-comet-qe-da") parser.add_argument("--comet-model-name", type=str, default="Unbabel/wmt22-comet-da") parser.add_argument("--comet-cache-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'comet')) parser.add_argument("--comet-saving-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt')) parser.add_argument("--bleurt-ckpt", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt', 'BLEURT-20')) parser.add_argument("--bleurt-cache-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'bleurt')) parser.add_argument("--metric", type=str, choices=["comet", "comet_qe", "bleurt", "bleu"], required=True) parser.add_argument("--batch-size", type=int, default=16) parser.add_argument("--cpu", action="store_true") return parser.parse_args() if __name__ == "__main__": args = parse_args() sys_file_paths = args.sys src_file_path = args.src ref_file_path = args.ref tgt_lang = args.tgt_lang comet_qe_model_name = args.comet_qe_model_name comet_model_name = args.comet_model_name comet_cache_dir = args.comet_cache_dir comet_saving_dir = args.comet_saving_dir bleurt_ckpt = args.bleurt_ckpt bleurt_cache_dir = args.bleurt_cache_dir metric = args.metric batch_size = args.batch_size cpu = args.cpu assert tgt_lang or metric != "bleu", "BLEU need to specify target language. (--tgt-lang xx)" sys_lines_lst = [ readlines(v) for v in sys_file_paths ] src_lines = readlines(src_file_path) ref_lines = readlines(ref_file_path) if metric != "comet_qe": assert all([len(src_lines) == len(ref_lines)] + [len(sys_lines) == len(src_lines) for sys_lines in sys_lines_lst]) else: assert all([len(sys_lines) == len(src_lines) for sys_lines in sys_lines_lst]) scorer = eval(metric) scores = [ scorer(**{ "sys_lines": sys_lines, "src_lines": src_lines, "ref_lines": ref_lines, "comet_qe_model_name": comet_qe_model_name, "comet_model_name": comet_model_name, "comet_cache_dir": comet_cache_dir, "comet_saving_dir": comet_saving_dir, "bleurt_ckpt": bleurt_ckpt, "bleurt_cache_dir": bleurt_cache_dir, "batch_size": batch_size, "cpu": cpu, "tgt_lang": tgt_lang }) for sys_lines in sys_lines_lst ] for n, s in zip(sys_file_paths, scores): if metric == "bleu": print(f"{os.path.basename(n)}: {s:.1f}") else: print(f"{os.path.basename(n)}: {s*100:.1f}")
11,174
37.402062
155
py
MAPS-mt
MAPS-mt-main/scripts/knowledge-selection.py
import os import torch import json import random import logging import argparse import threading import numpy as np from sacrebleu.metrics import BLEU from comet import load_from_checkpoint, download_model comet_model_mapping = { "wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt", } def seed_everything(TORCH_SEED): random.seed(TORCH_SEED) os.environ['PYTHONHASHSEED'] = str(TORCH_SEED) np.random.seed(TORCH_SEED) torch.manual_seed(TORCH_SEED) torch.cuda.manual_seed_all(TORCH_SEED) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def bleu(**kwargs): sys_lines = kwargs["sys_lines"] ref_lines = kwargs["ref_lines"] tgt_lang = kwargs["tgt_lang"] if tgt_lang == "zh": return [BLEU(tokenize="zh").corpus_score([sys_line], [[ref_line]]).score for sys_line, ref_line in zip(sys_lines, ref_lines)] else: return [BLEU().corpus_score([sys_line], [[ref_line]]).score for sys_line, ref_line in zip(sys_lines, ref_lines)] def randscore(**kwargs): sys_lines = kwargs["sys_lines"] n_line = len(sys_lines) random.uniform(0, 100) return [random.uniform(0, 100) for _ in range(n_line)] def comet(**kwargs): sys_lines = kwargs["sys_lines"] src_lines = kwargs["src_lines"] ref_lines = kwargs["ref_lines"] comet_model_name = kwargs["comet_model_name"] comet_saving_dir = kwargs["comet_saving_dir"] comet_cache_dir = kwargs["comet_cache_dir"] batch_size = kwargs["batch_size"] cache_file = os.path.join(comet_cache_dir, 'comet_cache.json') cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} data = [] new_sys_lines, new_src_lines, new_ref_lines = [], [], [] for sys, src, ref in zip(sys_lines, src_lines, ref_lines): cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_src_lines.append(src) new_ref_lines.append(ref) data.append({"mt": sys, "src": src, "ref": ref}) logging.info(f"COMET cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}") if data: if comet_model_name in comet_model_mapping: comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_model_name])) else: model_path = download_model(comet_model_name, saving_directory=comet_saving_dir) comet_model = load_from_checkpoint(model_path) comet_model.eval() model_output = comet_model.predict(data, batch_size=batch_size, gpus=1) scores = model_output.scores with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, src, ref), score in zip(zip(new_sys_lines, new_src_lines, new_ref_lines), scores): cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)] for sys, src, ref in zip(sys_lines, src_lines, ref_lines)] return final_scores def comet_qe(**kwargs): sys_lines = kwargs["sys_lines"] src_lines = kwargs["src_lines"] comet_qe_model_name = kwargs["comet_qe_model_name"] comet_saving_dir = kwargs["comet_saving_dir"] comet_cache_dir = kwargs["comet_cache_dir"] batch_size = kwargs["batch_size"] cache_file = os.path.join(comet_cache_dir, 'comet_qe_cache.json') cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} data = [] new_sys_lines, new_src_lines = [], [] for sys, src in zip(sys_lines, src_lines): cache_key = json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_src_lines.append(src) data.append({"mt": sys, "src": src, "ref": None}) logging.info(f"COMET-QE cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}") if data: if comet_qe_model_name in comet_model_mapping: comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_qe_model_name])) else: model_path = download_model(comet_qe_model_name, saving_directory=comet_saving_dir) comet_model = load_from_checkpoint(model_path) comet_model.eval() model_output = comet_model.predict(data, batch_size=batch_size, gpus=1) scores = model_output.scores with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, src), score in zip(zip(new_sys_lines, new_src_lines), scores): cache_key = json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((comet_qe_model_name, sys, src), ensure_ascii=False)] for sys, src in zip(sys_lines, src_lines)] return final_scores def readlines(file_path): if not file_path: return [] with open(file_path, 'r') as f: lines = f.readlines() return [l.strip() for l in lines] def argmax(lst): return lst.index(max(lst)) def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--seed", type=int) parser.add_argument("--sys", nargs='+', required=True, help="candidates") parser.add_argument("--src", type=str, required=True, help="source") parser.add_argument("--ref", type=str, default=None, help="reference") parser.add_argument("--out", type=str, required=True, help="output path") parser.add_argument("--src-lang", type=str, required=True, help="source langauge code") parser.add_argument("--tgt-lang", type=str, required=True, help="target langauge code") parser.add_argument("--comet-qe-model-name", type=str, default="wmt21-comet-qe-da") parser.add_argument("--comet-model-name", type=str, default="Unbabel/wmt22-comet-da") parser.add_argument("--comet-saving-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt')) parser.add_argument("--comet-cache-dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'comet')) parser.add_argument("--metric", type=str, choices=["bleu", "comet", "comet_qe", "randscore"], required=True) parser.add_argument("-bs", "--batch-size", type=int, default=32) return parser.parse_args() def main(args): seed = args.seed sys_file_paths = args.sys src_file_path = args.src ref_file_path = args.ref out_file_path = args.out src_lang = args.src_lang tgt_lang = args.tgt_lang comet_qe_model_name = args.comet_qe_model_name comet_model_name = args.comet_model_name comet_saving_dir = args.comet_saving_dir comet_cache_dir = args.comet_cache_dir metric = args.metric batch_size = args.batch_size if seed: seed_everything(seed) scorer = eval(metric) sys_lines_lst = [ readlines(v) for v in sys_file_paths ] src_lines = readlines(src_file_path) ref_lines = readlines(ref_file_path) assert metric in ["comet_qe", "randscore"] or len(ref_lines) > 0 assert all([len(sys_lines) == len(src_lines) for sys_lines in sys_lines_lst]) combine_sys_lines = None metrics_lst = None metrics_lst = [ scorer(**{ "sys_lines": sys_lines, "src_lines": src_lines, "ref_lines": ref_lines, "src_lang": src_lang, "tgt_lang": tgt_lang, "comet_qe_model_name": comet_qe_model_name, "comet_model_name": comet_model_name, "comet_cache_dir": comet_cache_dir, "comet_saving_dir": comet_saving_dir, "batch_size": batch_size }) for sys_lines in sys_lines_lst ] if metrics_lst and (not combine_sys_lines): combine_sys_lines = [] for i in range(len(src_lines)): metrics = [metrics[i] for metrics in metrics_lst] sys_lines = [sys_lines[i] for sys_lines in sys_lines_lst] max_idx = argmax(metrics) combine_sys_lines.append(sys_lines[max_idx]) with open(out_file_path, 'w') as out_f: out_f.write("\n".join(combine_sys_lines) + '\n') if __name__ == "__main__": args = parse_args() main(args)
9,515
36.027237
155
py
MAPS-mt
MAPS-mt-main/scripts/alpaca-post-process.py
#! /usr/bin/python import sys import re import os all_langs = [] with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "lang.list"), 'r') as reader: for sample in reader: lang, code = sample.strip().split('\t') all_langs.append(lang) if '(' in lang: lang = lang[:lang.find('(')].strip() all_langs.append(lang) all_langs = list(set(all_langs)) all_langs.extend(["Englisch", "Deutsch", "中文", "德文", "Chinesisch", "Français", "日本语", "英文", "中文(简体)", "Italiano", "英语(美国)", "Spanisch", "Arabic"]) def rep(s): return s.replace('(', '\(').replace(')', '\)') all_langs = [rep(lang) for lang in all_langs] pattern = re.compile('(' + '|'.join(all_langs) + '):.*') def filter(sample): sample = re.sub('A:.*', '', sample) sample = re.sub('Output:.*', '', sample) sample = re.sub('Input:.*', '', sample) sample = re.sub('输出:.*', '', sample) sample = re.sub('输入:.*', '', sample) sample = re.sub('Correct Translate:.*', '', sample) sample = re.sub('Incorrect Translate:.*', '', sample) sample = re.sub('Instruction:.*', '', sample) sample = re.sub("Let's extract the keywords in the following.*", '', sample) sample = re.sub("Use a few words to describe the topics of the following input sentence.*", '', sample) sample = re.sub("Let's write an (.*) sentence related to but different from the input (.*) sentence and translate it into.*", '', sample) sample = pattern.sub('', sample) return sample for sample in sys.stdin: print(filter(sample.strip()))
1,574
31.8125
146
py
MAPS-mt
MAPS-mt-main/scripts/compare.py
import os from comet.cli.compare import * import threading import logging from bleurt import score as bleurt_score from sacrebleu.metrics import BLEU comet_model_mapping = { "wmt21-comet-qe-da": "wmt21-comet-qe-da/checkpoints/model.ckpt", } def wait_until_path_exist(path): while not os.path.isdir(path): pass return def bleurt(**kwargs): sys_lines = kwargs["sys_lines"] ref_lines = kwargs["ref_lines"] bleurt_cache_dir = kwargs["bleurt_cache_dir"] bleurt_ckpt = kwargs["bleurt_ckpt"] batch_size = kwargs["batch_size"] cache_file = os.path.join(bleurt_cache_dir, 'bleurt_cache.json') cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} new_sys_lines, new_ref_lines = [], [] for sys, ref in zip(sys_lines, ref_lines): cache_key = json.dumps((sys, ref), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_ref_lines.append(ref) logging.info(f"BLEURT cache info: {len(sys_lines)-len(new_sys_lines)}/{len(sys_lines)}") assert len(new_sys_lines) == len(new_ref_lines) if len(new_sys_lines) > 0: bleurt_model = bleurt_score.LengthBatchingBleurtScorer(bleurt_ckpt) scores = bleurt_model.score(references=new_ref_lines, candidates=new_sys_lines, batch_size=batch_size) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, ref), score in zip(zip(new_sys_lines, new_ref_lines), scores): cache_key = json.dumps((sys, ref), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((sys, ref), ensure_ascii=False)] for sys, ref in zip(sys_lines, ref_lines)] return final_scores def comet(**kwargs): sys_lines = kwargs["sys_lines"] src_lines = kwargs["src_lines"] ref_lines = kwargs["ref_lines"] comet_model_name = kwargs["comet_model_name"] comet_saving_dir = kwargs["comet_saving_dir"] comet_cache_dir = kwargs["comet_cache_dir"] batch_size = kwargs["batch_size"] cache_file = os.path.join(comet_cache_dir, 'comet_cache.json') wait_until_path_exist(comet_saving_dir) cache_lock = threading.Lock() with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} data = [] new_sys_lines, new_src_lines, new_ref_lines = [], [], [] for sys, src, ref in zip(sys_lines, src_lines, ref_lines): cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False) if cache_key not in cache: new_sys_lines.append(sys) new_src_lines.append(src) new_ref_lines.append(ref) data.append({"mt": sys, "src": src, "ref": ref}) logging.info(f"COMET cache info: {len(sys_lines)-len(data)}/{len(sys_lines)}") if data: if comet_model_name in comet_model_mapping: comet_model = load_from_checkpoint(os.path.join(comet_saving_dir, comet_model_mapping[comet_model_name])) else: model_path = download_model(comet_model_name, saving_directory=comet_saving_dir) comet_model = load_from_checkpoint(model_path) comet_model.eval() model_output = comet_model.predict(data, batch_size=batch_size, gpus=1) scores = model_output.scores with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) else: cache = {} for (sys, src, ref), score in zip(zip(new_sys_lines, new_src_lines, new_ref_lines), scores): cache_key = json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False) cache[cache_key] = score with open(cache_file, 'w') as f: json.dump(cache, f, indent=2, ensure_ascii=False) with cache_lock: if os.path.exists(cache_file): with open(cache_file, 'r') as f: cache = json.load(f) final_scores = [cache[json.dumps((comet_model_name, sys, src, ref), ensure_ascii=False)] for sys, src, ref in zip(sys_lines, src_lines, ref_lines)] return final_scores def bleu(**kwargs): sys_lines = kwargs["sys_lines"] ref_lines = kwargs["ref_lines"] tgt_lang = kwargs["tgt_lang"] assert len(sys_lines) == len(ref_lines) res = [] for sys, ref in zip(sys_lines, ref_lines): bleu = BLEU(tokenize="flores200") res.append(bleu.corpus_score([sys], [[ref]]).score) del bleu return res def display_statistical_results(data: Statistical_test_info) -> None: """Print out the T-test results for a system pair. Args: data (Statistical_test_info): Stats to be printed out. """ print("==========================") print("x_name:", data["x_name"]) print("y_name:", data["y_name"]) print("\nBootstrap Resampling Results:") for k, v in data["bootstrap_resampling"].items(): print("{}:\t{:.4f}".format(k, v)) print("\nPaired T-Test Results:") for k, v in data["paired_t-test"].items(): print("{}:\t{:.4f}".format(k, v)) x_seg_scores = data["bootstrap_resampling"]["x-mean"] y_seg_scores = data["bootstrap_resampling"]["y-mean"] best_system = ( data["x_name"] if x_seg_scores > y_seg_scores else data["y_name"] ) worse_system = ( data["x_name"] if x_seg_scores < y_seg_scores else data["y_name"] ) if data["paired_t-test"]["p_value"] <= 0.05: print("Null hypothesis rejected according to t-test.") print("Scores differ significantly across samples.") print(f"{best_system} outperforms {worse_system}.") else: print("Null hypothesis can't be rejected.\nBoth systems have equal averages.") def t_tests_summary( t_test_results: List[Statistical_test_info], translations: Tuple[Path_fr], threshold_p_value: float = 0.05, ) -> None: """Prints T-tests Summary Args: t_test_results (List[Statistical_test_info]): List of stats between systems. translations (Tuple[Path_fr]): Path to each system. threshold_p_value (float): Threshold for p_value. Defaults to 0.05. """ n = len(translations) name2id = {os.path.basename(name): i for i, name in enumerate(translations)} grid = [[None] * n for name in translations] for t_test in t_test_results: p_value = t_test["paired_t-test"]["p_value"] x_id = name2id[t_test["x_name"]] y_id = name2id[t_test["y_name"]] grid[x_id][y_id] = False grid[y_id][x_id] = False if p_value < threshold_p_value: x_seg_scores = t_test["bootstrap_resampling"]["x-mean"] y_seg_scores = t_test["bootstrap_resampling"]["y-mean"] if x_seg_scores > y_seg_scores: grid[x_id][y_id] = True else: grid[y_id][x_id] = True # Add the row's name aka the system's name. grid = [(os.path.basename(name),) + tuple(row) for name, row in zip(translations, grid)] print("Summary") print("If system_x is better than system_y then:") print( f"Null hypothesis rejected according to t-test with p_value={threshold_p_value}." ) print("Scores differ significantly across samples.") print(tabulate(grid, headers=("system_x \ system_y",) + tuple([os.path.basename(t) for t in translations]))) def score(cfg: Namespace, systems: List[Dict[str, List[str]]]) -> np.ndarray: """Scores each systems with a given model. Args: cfg (Namespace): comet-compare configs. systems (List[Dict[str, List[str]]]): List with translations for each system. Return: np.ndarray: segment-level scores flatten. """ seg_scores = [] for system in systems: samples = [dict(zip(system, t)) for t in zip(*system.values())] sys_lines= [s["mt"] for s in samples] src_lines= [s["src"] for s in samples] ref_lines= [s["ref"] for s in samples] comet_model_name = cfg.model comet_saving_dir = cfg.model_storage_path comet_cache_dir = cfg.comet_cache_dir bleurt_ckpt = cfg.bleurt_ckpt bleurt_cache_dir = cfg.bleurt_cache_dir batch_size = cfg.batch_size metric = cfg.metric tgt_lang = cfg.tgt_lang assert tgt_lang or metric != "bleu", "BLEU need to specify target language. (--tgt-lang xx)" seg_scores += eval(metric)( sys_lines=sys_lines, src_lines=src_lines, ref_lines=ref_lines, comet_model_name=comet_model_name, comet_saving_dir=comet_saving_dir, comet_cache_dir=comet_cache_dir, bleurt_ckpt=bleurt_ckpt, bleurt_cache_dir=bleurt_cache_dir, batch_size=batch_size, tgt_lang=tgt_lang ) n = len(systems[0]["src"]) # [grouper](https://docs.python.org/3/library/itertools.html#itertools-recipes) seg_scores = list(zip(*[iter(seg_scores)] * n)) seg_scores = np.array(seg_scores, dtype="float32") # num_systems x num_translations return seg_scores def get_cfg() -> Namespace: """Parse the CLI options and arguments. Return: Namespace: comet-compare configs. """ parser = ArgumentParser( description="Command for comparing multiple MT systems' translations." ) parser.add_argument("--tgt-lang", type=str) parser.add_argument("-s", "--sources", type=Path_fr) parser.add_argument("-r", "--references", type=Path_fr) parser.add_argument("-t", "--translations", nargs="*", type=Path_fr) parser.add_argument("-d", "--sacrebleu_dataset", type=str) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--gpus", type=int, default=1) parser.add_argument( "--quiet", action="store_true", help="Sets all loggers to ERROR level." ) parser.add_argument( "--only_system", action="store_true", help="Prints only the final system score." ) parser.add_argument( "--num_splits", type=int, default=300, help="Number of random partitions used in Bootstrap resampling.", ) parser.add_argument( "--sample_ratio", type=float, default=0.4, help="Percentage of the testset to use in each split.", ) parser.add_argument( "--t_test_alternative", type=str, default="two-sided", help=( "Alternative hypothesis from scipy.stats.ttest_rel. The following options" + " are available: 'two-sided', 'less', 'greater'. Defaults to 'two-sided'" ), ) parser.add_argument( "--to_json", type=str, default="", help="Exports results to a json file.", ) parser.add_argument( "--model", type=str, default="Unbabel/wmt22-comet-da", help="COMET model to be used.", ) parser.add_argument( "--model_storage_path", help=( "Path to the directory where models will be stored. " + "By default its saved in ~/.cache/torch/unbabel_comet/" ), default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt'), ) parser.add_argument( "--num_workers", help="Number of workers to use when loading data.", type=int, default=None, ) parser.add_argument( "--disable_cache", action="store_true", help=( "Disables sentence embeddings caching." + " This makes inference slower but saves memory." ), ) parser.add_argument( "--disable_length_batching", action="store_true", help="Disables length batching. This makes inference slower.", ) parser.add_argument( "--print_cache_info", action="store_true", help="Print information about COMET cache.", ) parser.add_argument( "--comet_cache_dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'comet') ) parser.add_argument( "--bleurt_ckpt", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'eval_ckpt', 'BLEURT-20') ) parser.add_argument( "--bleurt_cache_dir", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache', 'bleurt') ) parser.add_argument( "--metric", type=str, choices=["comet", "bleurt", "bleu"], required=True ) cfg = parser.parse_args() if cfg.sources is None and cfg.sacrebleu_dataset is None: parser.error(f"You must specify a source (-s) or a sacrebleu dataset (-d)") if cfg.sacrebleu_dataset is not None: if cfg.references is not None or cfg.sources is not None: parser.error( f"Cannot use sacrebleu datasets (-d) with manually-specified datasets (-s and -r)" ) try: testset, langpair = cfg.sacrebleu_dataset.rsplit(":", maxsplit=1) cfg.sources = Path_fr(get_source_file(testset, langpair)) cfg.references = Path_fr(get_reference_files(testset, langpair)[0]) except ValueError: parser.error( "SacreBLEU testset format must be TESTSET:LANGPAIR, e.g., wmt20:de-en" ) except Exception as e: import sys print("SacreBLEU error:", e, file=sys.stderr) sys.exit(1) # if cfg.metric == "comet": # if cfg.model.endswith(".ckpt") and os.path.exists(cfg.model): # cfg.model_path = cfg.model # else: # cfg.model_path = os.path.join(cfg.model_storage_path, comet_model_mapping[cfg.model]) return cfg, parser def compare_command() -> None: """CLI that uses comet to compare multiple systems in a pairwise manner.""" cfg, parser = get_cfg() seed_everything(1) assert len(cfg.translations) > 1, "You must provide at least 2 translation files" with open(cfg.sources(), encoding="utf-8") as fp: sources = [line.strip() for line in fp.readlines()] translations = [] for system in cfg.translations: with open(system, mode="r", encoding="utf-8") as fp: translations.append([line.strip() for line in fp.readlines()]) if cfg.references is not None: with open(cfg.references(), encoding="utf-8") as fp: references = [line.strip() for line in fp.readlines()] systems = [ {"src": sources, "mt": system, "ref": references} for system in translations ] else: systems = [{"src": sources, "mt": system} for system in translations] seg_scores = score(cfg, systems) population_size = seg_scores.shape[1] sys_scores = bootstrap_resampling( seg_scores, sample_size=max(int(population_size * cfg.sample_ratio), 1), num_splits=cfg.num_splits, ) results = list(pairwise_bootstrap(sys_scores, cfg.translations)) # Paired T_Test Results: pairs = combinations(zip(cfg.translations, seg_scores), 2) for (x_name, x_seg_scores), (y_name, y_seg_scores) in pairs: ttest_result = stats.ttest_rel( x_seg_scores, y_seg_scores, alternative=cfg.t_test_alternative ) for res in results: if res["x_name"] == x_name and res["y_name"] == y_name: res["paired_t-test"] = { "statistic": ttest_result.statistic, "p_value": ttest_result.pvalue, } for res in results: res["x_name"] = os.path.basename(res["x_name"]) res["y_name"] = os.path.basename(res["y_name"]) info = { "statistical_results": results, "source": sources, "translations": [ { "name": os.path.basename(name), "mt": trans, "scores": scores.tolist(), } for name, trans, scores in zip(cfg.translations, translations, seg_scores) ], } if references is not None: info["reference"] = references for data in results: display_statistical_results(data) print() t_tests_summary(results, tuple(cfg.translations)) print() if cfg.to_json != "": with open(cfg.to_json, "w", encoding="utf-8") as outfile: json.dump(info, outfile, ensure_ascii=False, indent=4) print("Predictions saved in: {}.".format(cfg.to_json)) if __name__ == "__main__": compare_command()
17,345
34.4
155
py
MAPS-mt
MAPS-mt-main/data/format_ask_demo.py
import random import os from langcodes import Language import argparse from .trigger_sents import SUPPORT_LANGS, TRIGGER_SENTS DEMO_SENTS = { "en": [ "Around the same time, Patrick Brown, a professor of biochemistry at Stanford University School of Medicine, became interested in developing new techniques for mapping genes.", "Libyan security officials say the Afriqiyah Airways plane was flying from Johannesburg, South Africa, Wednesday morning when it crashed short of the runway at the Tripoli airport.", "The victory completed a triumphant first season in charge for 38 - year - old Barca coach Pep Guardiola.", 'On Friday night, protests continued in "an almost celebratory manner" near the QuikTrip until police arrived at around 11:00 p.m.', "You may access Bing-powered experiences when using other non-Microsoft services, such as those from Yahoo!", ], "zh": [ '几乎是在同时,斯坦福医学院的生物化学教授Patrick Brown,对一种被称作"基因地图"的技术产生了很大兴趣。', "利比亚安全官员说,这架泛非航空公司的飞机从南非约翰内斯堡起飞,星期三早晨在的黎波里机场跑道上坠毁。", "这场胜利为38岁的巴萨主帅佩普·瓜迪奥拉执教巴萨的第一个赛季画上圆满的句号。", '周五晚上,抗议活动在QuikTrip附近以“几乎是庆祝的方式”继续进行,直到晚上11点左右警察抵达。', "当使用其他非Microsoft服务(如Yahoo!的服务)时,您也可以访问必应体验。", ], "de": [ "Etwa zur gleichen Zeit interessierte sich Patrick Brown, Professor für Biochemie an der Stanford University School of Medicine, für die Entwicklung neuer Techniken zur Kartierung von Genen.", "Libysche Sicherheitsbeamte sagen, dass das Flugzeug von Afriqiyah Airways am Mittwochmorgen aus Johannesburg, Südafrika, flog, als es kurz vor der Landebahn des Flughafens von Tripolis abstürzte.", "Der Sieg beendete eine triumphale erste Saison als Trainer des 38-jährigen Barca-Trainers Pep Guardiola.", 'Am Freitagabend gingen die Proteste in der Nähe des QuikTrip "fast feierlich" weiter, bis die Polizei gegen 23:00 Uhr eintraf.', "Sie können auf Bing-basierte Umgebungen zugreifen, wenn Sie andere Dienste verwenden, die nicht von Microsoft stammen, z. B. die von Yahoo!", ], "ja": [ 'ほぼ同時期に、スタンフォード医科大学の生化学教授Patrick Brownが、「遺伝子マッピング」と呼ばれる技術に興味を持った。', "リビアの治安当局によると、パン・アフリカン航空の飛行機は南アフリカのヨハネスブルグを離陸し、水曜日の朝、トリポリ空港の滑走路に墜落したという。", "この勝利で、38歳のバルセロナのボス、ペップ・グアルディオラのバルサ指揮官としての初シーズンが幕を閉じました。", '金曜日の夜、午後11時ごろに警察が到着するまで、抗議活動はQuikTripの近くで「ほとんど祝祭的な方法」で続けられました。', "また、Yahooのサービスなど、Microsoft以外のサービスを利用する際にも、Bingエクスペリエンスを利用することができます。", ], "fr": [ "À peu près à la même époque, Patrick Brown, professeur de biochimie à la faculté de médecine de l'Université Stanford, s'est intéressé au développement de nouvelles techniques de cartographie des gènes.", "Les autorités libyennes de sécurité affirment que l'avion d'Afriqiyah Airways était en provenance de Johannesburg (Afrique du Sud) mercredi matin lorsqu'il s'est écrasé avant la piste d'atterrissage de l'aéroport de Tripoli.", "Cette victoire a couronné la première saison triomphale de Pep Guardiola, 38 ans, à la tête du Barça.", 'Vendredi soir, les manifestations se sont poursuivies "d\'une manière presque festive" près du QuikTrip jusqu\'à l\'arrivée de la police vers 23 heures.', "Vous pouvez accéder à des expériences propulsées par Bing lorsque vous utilisez d'autres services non Microsoft, tels que ceux de Yahoo!", ] } demo_dict = {} for src_lng in SUPPORT_LANGS: for tgt_lng in SUPPORT_LANGS: if src_lng == tgt_lng: continue else: demo_dict[(src_lng, tgt_lng)] = [ (tri_sent, src_demo, tgt_demo) for tri_sent, src_demo, tgt_demo in zip(TRIGGER_SENTS[src_lng], DEMO_SENTS[src_lng], DEMO_SENTS[tgt_lng]) ] def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] out_file_path = os.path.join(format_dir, f"{test_name}.{src}-{tgt}.{src}.ask-demo") demos = demo_dict[(src, tgt)] with open(out_file_path, 'w') as out_f: for id, src_line in enumerate(test_src_lines): all_items = demos + [(src_line, None, None)] prompt_lst = [] for it in all_items: it_src, it_demo_src, it_demo_tgt = it s = f"Let's write {'an' if src_full == 'English' else 'a'} {src_full} sentence related to but different from the input {src_full} sentence and translate it into {tgt_full}\n" + \ f"Input {src_full} sentence: {it_src}\n" + \ (f"Output {src_full}-{tgt_full} sentence pair: {it_demo_src}\t{it_demo_tgt}" if (it_demo_src and it_demo_tgt) else f"Output {src_full}-{tgt_full} sentence pair:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
6,098
56
235
py
MAPS-mt
MAPS-mt-main/data/format_ask_topic.py
import random import os from langcodes import Language import argparse from .trigger_sents import SUPPORT_LANGS, TRIGGER_SENTS TOPICS = [ "Health, medicine" "Accident, aircraft crash" "Sports, spanish football" "Politics" "Business" ], demo_dict = {} for src_lng in SUPPORT_LANGS: for tgt_lng in SUPPORT_LANGS: if src_lng == tgt_lng: continue else: demo_dict[(src_lng, tgt_lng)] = [ (tri_sent, topics) for tri_sent, topics in zip(TRIGGER_SENTS[src_lng], TOPICS) ] def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] out_file_path = os.path.join(format_dir, f"{test_name}.{src}-{tgt}.{src}.ask-topic") demos = demo_dict[(src, tgt)] with open(out_file_path, 'w') as out_f: for id, src_line in enumerate(test_src_lines): all_items = demos + [(src_line, None)] prompt_lst = [] for it in all_items: it_src, it_topic = it s = f"Use a few words to describe the topics of the following input sentence.\n" + \ f"Input: {it_src}\n" + \ (f"Topics: {it_topic}" if it_topic else "Topics:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
2,698
34.051948
148
py
MAPS-mt
MAPS-mt-main/data/format_ask_kw.py
import random import os from langcodes import Language import argparse from .trigger_sents import SUPPORT_LANGS, TRIGGER_SENTS KETWORDS = { "en": [ ["Stanford University", "School of Medicine"], ["JAS 39C Gripen", "commercial flights"], ["Barça", "Sevilla"], ["Whitehall", "Downing Street", "Prime Minister's official residence"], ["Yahoo!", "Microsoft"] ], "zh": [ ["斯坦福大学", "医学院"], ["JAS 39C 鹰狮战斗机", "商业航班"], ["巴萨", "塞维利亚队"], ["白厅", "唐宁街", "首相官邸"], ["雅虎", "微软"], ], "de": [ ["Stanford Universität", "Medizinische Fakultät"], ["JAS 39C Gripen", "kommerzielle Flüge"], ["Barça", "Sevilla"], ["Whitehall", "Downing Straße", "offizielle Residenz des Premierministers"], ["Yahoo!", "Microsoft"], ], "ja": [ ["スタンフォード大学", "医学部"], ["JAS 39C Gripen", "商用フライト"], ["バルサ", "セビージャ"], ["ホワイトホール", "ダウニングストリート", "首相官邸"], ["ヤフー", "マイクロソフト"] ], "fr": [ ["Université Stanford", "l'école de médecine"], ["JAS 39C Gripen", "les vols commerciaux"], ["Barça", "Sevilla"], ["Whitehall", "Downing Street", "la résidence officielle du Premier ministre"], ["Yahoo!", "Microsoft"] ] } demo_dict = {} for src_lng in SUPPORT_LANGS: for tgt_lng in SUPPORT_LANGS: if src_lng == tgt_lng: continue else: demo_dict[(src_lng, tgt_lng)] = [ (tri_sent, ", ".join([f"{src_kw}={tgt_kw}" for src_kw, tgt_kw in zip(src_kw_lst, tgt_kw_lst)])) for tri_sent, src_kw_lst, tgt_kw_lst in zip(TRIGGER_SENTS[src_lng], KETWORDS[src_lng], KETWORDS[tgt_lng]) ] def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] out_file_path = os.path.join(format_dir, f"{test_name}.{src}-{tgt}.{src}.ask-kw") demos = demo_dict[(src, tgt)] with open(out_file_path, 'w') as out_f: for id, src_line in enumerate(test_src_lines): all_items = demos + [(src_line, None)] prompt_lst = [] for it in all_items: it_src, it_kw = it s = f"Let's extract the keywords in the following {src_full} sentence, and then translate these keywords into {tgt_full}.\n" + \ f"{src_full}: {it_src}\n" + \ (f"Keyword Pairs: {it_kw}" if it_kw else "Keyword Pairs:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
3,916
35.95283
148
py
MAPS-mt
MAPS-mt-main/data/format_base.py
import random import os from langcodes import Language import argparse def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument('-vn', "--valid-name", type=str, help="wmt22/wmt21/...") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') parser.add_argument('-n', "--n-shot", type=int, required=True, help='# shot.') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name valid_name = args.valid_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() shot = args.n_shot assert shot == 0 or (shot > 0 and valid_name) # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] if shot == 0: out_file_path = os.path.join(format_dir, f"{test_name}.{src}-{tgt}.{src}.{0}-shot") valid_src_lines = None valid_tgt_lines = None else: with open(os.path.join(raw_dir, f"{valid_name}.{src}-{tgt}.{src}")) as valid_src_f, \ open(os.path.join(raw_dir, f"{valid_name}.{src}-{tgt}.{tgt}")) as valid_tgt_f: valid_src_lines = [l.strip() for l in valid_src_f.readlines()] valid_tgt_lines = [l.strip() for l in valid_tgt_f.readlines()] out_file_path = os.path.join(format_dir, f"{test_name}.{src}-{tgt}.{src}.{shot}-shot.{seed}-seed") demos = [] if shot > 0: demos = random.sample(list(zip(valid_src_lines, valid_tgt_lines)), shot) with open(out_file_path, 'w') as out_f: for id, src_line in enumerate(test_src_lines): all_items = demos + [(src_line, None)] prompt_lst = [] for it in all_items: it_src, it_tgt = it s = f"Instruction: Translate the following {src_full} text into {tgt_full}.\n" + \ f"{src_full}: {it_src}\n" + \ (f"{tgt_full}: {it_tgt}" if it_tgt else f"{tgt_full}:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
3,015
39.213333
148
py
MAPS-mt
MAPS-mt-main/data/format_kw.py
import random import os from langcodes import Language import argparse def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument('-m', "--model-name", type=str, required=True, help="model name") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name model_name = args.model_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() model_out_dir = os.path.join(workspace, "output", model_name) output_dir = os.path.join(format_dir, "with-knowledge", model_name) # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f, \ open(os.path.join(model_out_dir, f"{test_name}.{src}-{tgt}.{src}.ask-kw.trans")) as kw_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] kw_lines = [l.strip() for l in kw_f.readlines()] out_file_path = os.path.join(output_dir, f"{test_name}.{src}-{tgt}.{src}.kw.{seed}-seed") demos = [] with open(out_file_path, 'w') as out_f: for id, (src_line, kw_line) in enumerate(zip(test_src_lines, kw_lines)): all_items = demos + [(src_line, None, kw_line)] prompt_lst = [] for it in all_items: it_src, it_tgt, it_kw = it s = f"Keyword Pairs: {it_kw}\n\n" + \ f"Instruction: Given the above knowledge, translate the following {src_full} text into {tgt_full}.\n" + \ f"{src_full}: {it_src}\n" + \ (f"{tgt_full}: {it_tgt}" if it_tgt else f"{tgt_full}:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
2,724
41.578125
148
py
MAPS-mt
MAPS-mt-main/data/__init__.py
from .format_ask_demo import demo_dict as demo_ex_dict from .format_ask_kw import demo_dict as kw_ex_dict from .format_ask_topic import demo_dict as topic_ex_dict
162
53.333333
56
py
MAPS-mt
MAPS-mt-main/data/format_topic.py
import random import os from langcodes import Language import argparse def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument('-m', "--model-name", type=str, required=True, help="model name") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name model_name = args.model_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() model_out_dir = os.path.join(workspace, "output", model_name) output_dir = os.path.join(format_dir, "with-knowledge", model_name) # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f, \ open(os.path.join(model_out_dir, f"{test_name}.{src}-{tgt}.{src}.ask-topic.trans")) as topic_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] topic_lines = [l.strip() for l in topic_f.readlines()] out_file_path = os.path.join(output_dir, f"{test_name}.{src}-{tgt}.{src}.topic.{seed}-seed") demos = [] with open(out_file_path, 'w') as out_f: for id, (src_line, topic_line) in enumerate(zip(test_src_lines, topic_lines)): all_items = demos + [(src_line, None, topic_line)] prompt_lst = [] for it in all_items: it_src, it_tgt, it_topic = it s = f"Topics: {it_topic}\n\n" + \ f"Instruction: Given the above knowledge, translate the following {src_full} text into {tgt_full}.\n" + \ f"{src_full}: {it_src}\n" + \ (f"{tgt_full}: {it_tgt}" if it_tgt else f"{tgt_full}:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
2,747
41.9375
148
py
MAPS-mt
MAPS-mt-main/data/format_demo.py
import random import os from langcodes import Language import argparse def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-w', "--workspace", type=str, default=os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'), help="Workspace dir") parser.add_argument('-tn', "--test-name", type=str, required=True, help="wmt22/wmt21/...") parser.add_argument('-m', "--model-name", type=str, required=True, help="model name") parser.add_argument("--seed", type=int, default=0) parser.add_argument('-s', "--src", type=str, required=True, help='source lang') parser.add_argument('-t', "--tgt", type=str, required=True, help='target lang') return parser.parse_args() def main(args): workspace = args.workspace data_dir=os.path.join(workspace, "data") raw_dir=os.path.join(data_dir, "raw") format_dir=os.path.join(data_dir, "format") test_name = args.test_name model_name = args.model_name seed = args.seed src = args.src tgt = args.tgt src_full = Language.make(language=src).display_name() tgt_full = Language.make(language=tgt).display_name() model_out_dir = os.path.join(workspace, "output", model_name) output_dir = os.path.join(format_dir, "with-knowledge", model_name) # seed random random.seed(seed) # read files with open(os.path.join(raw_dir, f"{test_name}.{src}-{tgt}.{src}")) as test_src_f, \ open(os.path.join(model_out_dir, f"{test_name}.{src}-{tgt}.{src}.ask-demo.trans")) as demo_f: test_src_lines = [l.strip() for l in test_src_f.readlines()] demo_lines = [l.strip() for l in demo_f.readlines()] out_file_path = os.path.join(output_dir, f"{test_name}.{src}-{tgt}.{src}.demo.{seed}-seed") demos = [] with open(out_file_path, 'w') as out_f: for id, (src_line, demo_line) in enumerate(zip(test_src_lines, demo_lines)): all_items = demos + [(src_line, None, demo_line)] prompt_lst = [] for it in all_items: it_src, it_tgt, it_demo = it s = f"Related {src_full}-{tgt_full} sentence pairs: {it_demo}\n\n" + \ f"Instruction: Given the above knowledge, translate the following {src_full} text into {tgt_full}.\n" + \ f"{src_full}: {it_src}\n" + \ (f"{tgt_full}: {it_tgt}" if it_tgt else f"{tgt_full}:") prompt_lst.append(s) prompt = "\n\n".join(prompt_lst) out_f.write( f"{id:04}\n" f"{prompt}\n\n\n" ) if __name__ == "__main__": args = parse_args() main(args)
2,774
43.047619
148
py
MAPS-mt
MAPS-mt-main/data/trigger_sents.py
TRIGGER_SENTS = { "en": [ "On Monday, scientists from the Stanford University School of Medicine announced the invention of a new diagnostic tool that can sort cells by type: a tiny printable chip that can be manufactured using standard inkjet printers for possibly about one U.S. cent each.", "The JAS 39C Gripen crashed onto a runway at around 9:30 am local time (0230 UTC) and exploded, closing the airport to commercial flights.", "28-year-old Vidal had joined Barça three seasons ago, from Sevilla.", "The protest started around 11:00 local time (UTC+1) on Whitehall opposite the police-guarded entrance to Downing Street, the Prime Minister's official residence.", "The number of users of the Yahoo! and Microsoft services combined will rival the number of AOL's customers.", ], "zh": [ "周一,斯坦福大学医学院的科学家宣布,他们发明了一种可以将细胞按类型分类的新型诊断工具:一种可打印的微型芯片。这种芯片可以使用标准喷墨打印机制造,每片价格可能在一美分左右。", "当地时间上午 9:30 左右 (UTC 0230),JAS 39C 鹰狮战斗机撞上跑道并发生爆炸,导致机场关闭,商业航班无法正常起降。", "三个赛季前,28岁的比达尔(Vidal)从塞维利亚队加盟巴萨。", "抗议活动于当地时间 11:00 (UTC+1) 左右在白厅 (Whitehall) 开始,白厅对面是首相官邸唐宁街的入口处,由警察看守。", "雅虎和微软服务的用户总和,与美国在线的客户数不相上下。", ], "de": [ "Am Montag haben die Wisenschaftler der Stanford University School of Medicine die Erfindung eines neuen Diagnosetools bekanntgegeben, mit dem Zellen nach ihrem Typ sortiert werden können: ein winziger, ausdruckbarer Chip, der für jeweils etwa einen US-Cent mit Standard-Tintenstrahldruckern hergestellt werden kann.", "Der JAS 39C Gripen stürzte gegen 9:30 Uhr Ortszeit (02:30 UTC) auf eine Startbahn und explodierte, sodass der Flughafen für kommerzielle Flüge geschlossen werden musste.", "Der 28-jährige Vidal war vor drei Spielzeiten von Sevilla zu Barça gekommen.", "Der Protest begann gegen 11:00 Uhr Ortszeit (UTC +1) in Whitehall gegenüber dem von der Polizei bewachten Eingang zur Downing Street, dem offiziellen Wohnsitz des Premierministers.", "Die Zahl der Nutzer der Dienste von Yahoo! und Microsoft zusammengenommen wird mit der Zahl der Kunden von AOL konkurrieren.", ], "ja": [ "月曜日にスタンフォード大学医学部の科学者たちは、細胞を種類別に分類できる新しい診断ツールを発明したと発表しました。それは標準的なインクジェットプリンタで印刷して製造できる小型チップであり、原価は1枚あたり1円ほどす。", "JAS 39Cグリペンは現地時間の午前9時30分頃(UTC 0230)に滑走路に墜落して爆発し、その影響で空港の商業便が閉鎖されました。", "28歳のビダル選手は、3シーズン前にセビージャから移籍してバルサに所属していました。", "抗議行動は、現地時間11:00(UTC+1)頃にホワイトホール通りで始まり、首相官邸があるダウニング街の警察が警備する入口の向かいに群衆が集結しました。", "ヤフーとマイクロソフトのサービスを合わせたユーザー数は、AOLの顧客数に匹敵するだろう。", ], "fr": [ "Des scientifiques de l’école de médecine de l’université de Stanford ont annoncé ce lundi la création d'un nouvel outil de diagnostic, qui permettrait de différencier les cellules en fonction de leur type. Il s'agit d'une petit puce imprimable, qui peut être produite au moyen d'une imprimante à jet d'encre standard, pour un coût d'environ un cent de dollar pièce.", "Le JAS 39C Gripen s’est écrasé sur une piste autour de 9 h 30 heure locale (0230 UTC) et a explosé, provoquant la fermeture de l’aéroport aux vols commerciaux.", "Vidal, 28 ans, avait rejoint le Barça il y a trois saisons, en provenance de Séville.", "La manifestation a commencé vers 11 h heure locale (UTC+1) sur Whitehall, en face de l'entrée gardée par la police de Downing Street, la résidence officielle du Premier ministre.", "Le nombre d'utilisateurs des services Yahoo! et Microsoft combinés rivalisera avec le nombre de clients d'AOL.", ], } SUPPORT_LANGS = TRIGGER_SENTS.keys()
3,564
92.815789
376
py
MAPS-mt
MAPS-mt-main/model/__init__.py
0
0
0
py
MAPS-mt
MAPS-mt-main/model/openai/translate.py
import os import re import openai import argparse import tiktoken from tqdm import tqdm import backoff api_key = "YOUR OPENAI API KEY" model2max_context = { "text-davinci-003": 4097, } class OutOfQuotaException(Exception): "Raised when the key exceeded the current quota" def __init__(self, key, cause=None): super().__init__(f"No quota for key: {key}") self.key = key self.cause = cause def __str__(self): if self.cause: return f"{super().__str__()}. Caused by {self.cause}" else: return super().__str__() class AccessTerminatedException(Exception): "Raised when the key has been terminated" def __init__(self, key, cause=None): super().__init__(f"Access terminated key: {key}") self.key = key self.cause = cause def __str__(self): if self.cause: return f"{super().__str__()}. Caused by {self.cause}" else: return super().__str__() def num_tokens_from_string(string: str, model_name: str) -> int: """Returns the number of tokens in a text string.""" encoding = tiktoken.encoding_for_model(model_name) num_tokens = len(encoding.encode(string)) return num_tokens def generate_batch(lst, batch_size): """ Yields batch of specified size """ for i in range(0, len(lst), batch_size): yield lst[i : i + batch_size] def post_procress(s: str): res = s.strip().replace("\n", " ") if res == "": res = " " return res @backoff.on_exception(backoff.expo, (openai.error.OpenAIError, openai.error.RateLimitError, openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.APIConnectionError), max_tries=5) def translate_with_backoff(smp, model_name, max_tokens, api_key, temperature): try: response = openai.Completion.create( model=model_name, prompt=smp, temperature=temperature, max_tokens=max_tokens, api_key=api_key, ) gen = response.choices[0].text gen = post_procress(gen) return gen except openai.error.RateLimitError as e: if "You exceeded your current quota, please check your plan and billing details" in e.user_message: raise OutOfQuotaException(api_key) elif "Your access was terminated due to violation of our policies" in e.user_message: raise AccessTerminatedException(api_key) else: raise e @backoff.on_exception(backoff.expo, (openai.error.OpenAIError, openai.error.RateLimitError, openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.APIConnectionError), max_tries=5) def batch_translate_with_backoff(smp_lst, model_name, max_tokens, api_key, temperature): try: response = openai.Completion.create( model=model_name, prompt=smp_lst, temperature=temperature, max_tokens=max_tokens, api_key=api_key, ) gen_lst = [""] * len(smp_lst) for choice in response.choices: gen = choice.text gen = post_procress(gen) # Assuming your post_procress function can handle a single text gen_lst[choice.index] = gen return gen_lst except openai.error.RateLimitError as e: if "You exceeded your current quota, please check your plan and billing details" in e.user_message: raise OutOfQuotaException(api_key) elif "Your access was terminated due to violation of our policies" in e.user_message: raise AccessTerminatedException(api_key) else: raise e def parse_args(): parser = argparse.ArgumentParser("", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--model-name", type=str, required=True, help="Model name") parser.add_argument("-i", "--input", type=str, required=True, help="Input file path") parser.add_argument("-o", "--output", type=str, required=True, help="Output file path") parser.add_argument("--temperature", type=float, default=0, help="Sampling temperature") return parser.parse_args() def main(): args = parse_args() model_name = args.model_name in_file_path = args.input out_file_path = args.output temperature = args.temperature # get input samples input_file_path = os.path.join(in_file_path) with open(input_file_path, 'r') as in_file: in_file_str = in_file.read() samples = in_file_str.strip().split("\n\n\n") total = len(samples) # create or check output file num_done = 0 output_file_path = os.path.join(out_file_path) if os.path.exists(output_file_path): with open(output_file_path, 'r') as out_file: num_done = len(out_file.readlines()) # translate pattern = re.compile(r'\d\d\d\d\n') with tqdm(total=total) as pbar: pbar.update(num_done) for to_be_translated_idx, to_be_translated_smp in enumerate(samples[num_done: ]): assert len(pattern.findall(to_be_translated_smp)) >= 1 to_be_translated_smp = to_be_translated_smp.replace(f"{to_be_translated_idx:04}\n", "", 1).strip() len_prompt = num_tokens_from_string(to_be_translated_smp, model_name) gen = translate_with_backoff( to_be_translated_smp, model_name=model_name, max_tokens=model2max_context[model_name]-len_prompt, api_key=api_key, temperature=temperature ) with open(output_file_path, 'a') as fout: fout.write(f"{gen}\n") pbar.update(1) if __name__ == "__main__": main()
5,776
34.22561
199
py
MAPS-mt
MAPS-mt-main/model/openai/__init__.py
0
0
0
py
MAPS-mt
MAPS-mt-main/model/alpaca/translate.py
import os import re import torch import argparse from tqdm import tqdm from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--seed', type=int, default=0) parser.add_argument('--model-name-or-path', type=str, required=True, help='model name in the hub or local path') parser.add_argument('--input','-i', type=str, required=True, help='input file') parser.add_argument('--output','-o', type=str, required=True, help='output file') parser.add_argument('--search-algorithm', '-sa', type=str, default='beam', help='search algorithms: sample, beam') parser.add_argument('--batch', '-b', type=int, default=2, help='batch size') parser.add_argument('--temperature', '-t', type=float, default=0.1, help='temperature: 0.7 for text generation') args = parser.parse_args() seed = args.seed model_name_or_path = args.model_name_or_path input_file = args.input output_file = args.output search = args.search_algorithm batch = args.batch temperature = args.temperature # read output file num_done = 0 if os.path.exists(output_file): with open(output_file, 'r') as out_file: num_done = len(out_file.readlines()) # get input samples with open(input_file, 'r') as in_file: in_file_str = in_file.read() in_samples = in_file_str.strip().split("\n\n\n") for idx in range(len(in_samples)): smp = in_samples[idx] assert len(re.compile(r'\d\d\d\d\n').findall(smp)) >= 1 in_samples[idx] = smp.replace(f"{idx:04}\n", "", 1).strip() total = len(in_samples) in_samples = in_samples[num_done:] with tqdm(total=total) as pbar: pbar.update(num_done) if len(in_samples) == 0: exit(0) # Load checkpoints model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, device_map="auto") print(model.hf_device_map) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=False) tokenizer.padding_side = "left" if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token gen_config = GenerationConfig( temperature=temperature, do_sample=True, num_beams=1, max_new_tokens=256, eos_token_id=tokenizer.eos_token_id, pad_token=tokenizer.pad_token_id, ) if search == "beam": gen_config = GenerationConfig( temperature=temperature, num_beams=1, max_new_tokens=256, eos_token_id=tokenizer.eos_token_id, pad_token=tokenizer.pad_token_id, ) # Generate if len(in_samples) > 0: torch.manual_seed(args.seed) with open(output_file, 'a', encoding='utf-8') as fo: for i in range(0, len(in_samples), batch): p = in_samples[i:i+batch] tokenized = tokenizer(p, padding=True, return_tensors="pt") input_ids = tokenized.input_ids.cuda() attn_mask = tokenized.attention_mask.cuda() input_ids = input_ids[:, :-1] if input_ids[0, -1] == tokenizer.eos_token_id else input_ids attn_mask = attn_mask[:, :-1] if input_ids[0, -1] == tokenizer.eos_token_id else attn_mask with torch.no_grad(): generated_ids = model.generate(inputs=input_ids, attention_mask=attn_mask, generation_config=gen_config) for original_input, gen_id in zip(input_ids, generated_ids): original_text = tokenizer.decode(original_input, skip_special_tokens=True) gen_text = tokenizer.decode(gen_id, skip_special_tokens=True) new_text = gen_text.replace(original_text, "").replace("\n", "").strip() print(new_text, file=fo, flush=True) pbar.update(len(p))
4,295
44.221053
128
py
MAPS-mt
MAPS-mt-main/model/alpaca/__init__.py
0
0
0
py
keepalived
keepalived-master/tools/json_tracking/make_conf.py
#!/usr/bin/env python3 ##### # # This script will generate a sample configuration for Keepalived. # The configuration is as follows: # Each vlan contains a VRRPv2 instance with a private IPv4 address # and a VRRPv3 instance with a Local Address. # Each instance tracks a file located in keepalived_offset_folder. # # Author: Damien Clabaut <damien.clabaut@corp.ovh.com> # ##### import jinja2 # Configuration # First vlan of the range first_vlan = 2 # Last vlan of the range, must not be higher than 255 last_vlan = 100 # Path to the folder containing files tracked by each instance keepalived_offset_folder = "/etc/keepalived_offset/" # Path to the output file output_file = "keepalived.conf" # Underlying interface. # For this configuration to work you need vlan subinterfaces. # For example, if the value below is bond0, you need to have # interfaces bond0.2 to bond0.<last_vlan> interface = "bond0" class Vlan: def __init__(self, name, ip4addr, ip4net, ip6addr): self.name = name self.ip4addr = ip4addr self.ip4net = ip4net self.ip6addr = ip6addr vlans = [] for vlan_id in range(first_vlan, last_vlan + 1): ip4addr = "10.0." + str(vlan_id) + ".254" ip4net = "10.0." + str(vlan_id) + ".0/24" ip6addr = "fd00:42:ffff:" + format(vlan_id, '02x') + ":ff:ff:ff:ff/64" new_vlan = Vlan(vlan_id, ip4addr, ip4net, ip6addr) vlans.append(new_vlan) to_return = "" templateLoader = jinja2.FileSystemLoader(searchpath="./") templateEnv = jinja2.Environment(loader=templateLoader) vrrpconf = templateEnv.get_template('template_vrrp_instance').render( vlans=vlans, track_files=keepalived_offset_folder, interface=interface ) + "\n" output_file = open(output_file, "w") output_file.write(vrrpconf) output_file.close()
1,819
27.888889
74
py
keepalived
keepalived-master/doc/source/conf.py
# -*- coding: utf-8 -*- # # Keepalived documentation build configuration file, created by # sphinx-quickstart on Mon Dec 29 18:59:45 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import time # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.todo', ] # Support for todo items todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Keepalived' copyright = u'2001-%s, Alexandre Cassen' % time.strftime('%Y') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.3' # The full version, including alpha/beta/rc tags. release = '1.4.3' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {'collapsiblesidebar': True} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = ['tools'] #html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = '%s %s User Guide' % (project, release) # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Keepaliveddoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Keepalived_UserGuide.tex', u'Keepalived User Guide', u'Alexandre Cassen and Contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Keepalived User Guide' epub_author = u'Alexandre Cassen and Contributors' epub_publisher = u'Alexandre Cassen and Contributors' epub_copyright = u'%s, Alexandre Cassen' % time.strftime('%Y') # The basename for the epub file. It defaults to the project name. #epub_basename = u'Keepalived' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. #epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Choose between 'default' and 'includehidden'. #epub_tocscope = 'default' # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True
9,456
30.734899
80
py
UNIXKD
UNIXKD-master/teacher.py
import os import os.path as osp import argparse import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.optim.lr_scheduler import MultiStepLR from torch.utils.data import DataLoader import torchvision.transforms as transforms from torchvision.datasets import CIFAR100 from tensorboardX import SummaryWriter from utils import AverageMeter, accuracy from models import model_dict torch.backends.cudnn.benchmark = True parser = argparse.ArgumentParser(description='train teacher network.') parser.add_argument('--epoch', type=int, default=240) parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--lr', type=float, default=0.05) parser.add_argument('--momentum', type=float, default=0.9) parser.add_argument('--weight-decay', type=float, default=5e-4) parser.add_argument('--gamma', type=float, default=0.1) parser.add_argument('--milestones', type=int, nargs='+', default=[150,180,210]) parser.add_argument('--save-interval', type=int, default=40) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--arch', type=str) parser.add_argument('--gpu-id', type=int) args = parser.parse_args() torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id) exp_name = 'teacher_{}'.format(args.arch) exp_path = './experiments/{}'.format(exp_name) os.makedirs(exp_path, exist_ok=True) transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]), ]) trainset = CIFAR100('./data', train=True, transform=transform_train, download=True) valset = CIFAR100('./data', train=False, transform=transform_test, download=True) train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=False) val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=False) model = model_dict[args.arch](num_classes=100).cuda() optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) scheduler = MultiStepLR(optimizer, milestones=args.milestones, gamma=args.gamma) logger = SummaryWriter(osp.join(exp_path, 'events')) best_acc = -1 for epoch in range(args.epoch): model.train() loss_record = AverageMeter() acc_record = AverageMeter() start = time.time() for x, target in train_loader: optimizer.zero_grad() x = x.cuda() target = target.cuda() output = model(x) loss = F.cross_entropy(output, target) loss.backward() optimizer.step() batch_acc = accuracy(output, target, topk=(1,))[0] loss_record.update(loss.item(), x.size(0)) acc_record.update(batch_acc.item(), x.size(0)) logger.add_scalar('train/cls_loss', loss_record.avg, epoch+1) logger.add_scalar('train/cls_acc', acc_record.avg, epoch+1) run_time = time.time() - start info = 'train_Epoch:{:03d}/{:03d}\t run_time:{:.3f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}\t'.format( epoch+1, args.epoch, run_time, loss_record.avg, acc_record.avg) print(info) model.eval() acc_record = AverageMeter() loss_record = AverageMeter() start = time.time() for x, target in val_loader: x = x.cuda() target = target.cuda() with torch.no_grad(): output = model(x) loss = F.cross_entropy(output, target) batch_acc = accuracy(output, target, topk=(1,))[0] loss_record.update(loss.item(), x.size(0)) acc_record.update(batch_acc.item(), x.size(0)) run_time = time.time() - start logger.add_scalar('val/cls_loss', loss_record.avg, epoch+1) logger.add_scalar('val/cls_acc', acc_record.avg, epoch+1) info = 'test_Epoch:{:03d}/{:03d}\t run_time:{:.2f}\t cls_loss:{:.3f}\t cls_acc:{:.2f}\n'.format( epoch+1, args.epoch, run_time, loss_record.avg, acc_record.avg) print(info) scheduler.step() # save checkpoint if (epoch+1) in args.milestones or epoch+1==args.epoch or (epoch+1)%args.save_interval==0: state_dict = dict(epoch=epoch+1, state_dict=model.state_dict(), acc=acc_record.avg) name = osp.join(exp_path, 'ckpt/{:03d}.pth'.format(epoch+1)) os.makedirs(osp.dirname(name), exist_ok=True) torch.save(state_dict, name) # save best if acc_record.avg > best_acc: state_dict = dict(epoch=epoch+1, state_dict=model.state_dict(), acc=acc_record.avg) name = osp.join(exp_path, 'ckpt/best.pth') os.makedirs(osp.dirname(name), exist_ok=True) torch.save(state_dict, name) best_acc = acc_record.avg print('best_acc: {:.2f}'.format(best_acc))
5,083
33.821918
110
py
UNIXKD
UNIXKD-master/utils.py
import os import logging import numpy as np import time import torch from torch.nn import init import torch.nn.functional as F import torch.utils.data as data from PIL import Image class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.count = 0 self.sum = 0.0 self.val = 0.0 self.avg = 0.0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def accuracy(output, target, topk=(1,)): """Computes the precision@k for the specified values of k""" maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def norm(x): n = np.linalg.norm(x) return x / n def val(loader, args, t_model, s_model, logger, epoch): s_model.eval() acc_record = AverageMeter() loss_record = AverageMeter() start = time.time() for x, target in loader: x = x.cuda() target = target.cuda() with torch.no_grad(): _, output = s_model(x, is_feat=True) loss = F.cross_entropy(output, target) batch_acc = accuracy(output, target, topk=(1,))[0] acc_record.update(batch_acc.item(), x.size(0)) loss_record.update(loss.item(), x.size(0)) run_time = time.time() - start if logger is not None: logger.add_scalar('val/cls_loss', loss_record.avg, epoch+1) logger.add_scalar('val/cls_acc', acc_record.avg, epoch+1) info = 'student_test_Epoch:{:03d}\t run_time:{:.2f}\t cls_acc:{:.2f}\n'.format( epoch+1, run_time, acc_record.avg) print(info) return acc_record.avg def cal_center(loader, args, model): model.eval() feat = [] label = [] for x, target in loader: x = x.cuda() target = target.cuda() with torch.no_grad(): batch_feat, output = model(x, is_feat=True) feat.append(batch_feat[-1]) label.append(target) feat = torch.cat(feat, dim=0).cpu().numpy() label = torch.cat(label, dim=0).cpu().numpy() center = [] for i in range(max(label)+1): index = np.where(label==i)[0] center.append(np.mean(feat[index], axis=0)) center = np.vstack(center) center = torch.from_numpy(center).cuda() return center
2,691
24.638095
87
py
UNIXKD
UNIXKD-master/zoo.py
from __future__ import print_function import torch import torch.nn as nn import torch.nn.functional as F class Attention(nn.Module): """Paying More Attention to Attention: Improving the Performance of Convolutional Neural Networks via Attention Transfer code: https://github.com/szagoruyko/attention-transfer""" def __init__(self, p=2): super(Attention, self).__init__() self.p = p def forward(self, g_s, g_t): return [self.at_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)] def at_loss(self, f_s, f_t): s_H, t_H = f_s.shape[2], f_t.shape[2] if s_H > t_H: f_s = F.adaptive_avg_pool2d(f_s, (t_H, t_H)) elif s_H < t_H: f_t = F.adaptive_avg_pool2d(f_t, (s_H, s_H)) else: pass return (self.at(f_s) - self.at(f_t)).pow(2).mean() def at(self, f): return F.normalize(f.pow(self.p).mean(1).view(f.size(0), -1)) class Similarity(nn.Module): """Similarity-Preserving Knowledge Distillation, ICCV2019, verified by original author""" def __init__(self): super(Similarity, self).__init__() def forward(self, g_s, g_t): return [self.similarity_loss(f_s, f_t) for f_s, f_t in zip(g_s, g_t)] def similarity_loss(self, f_s, f_t): bsz = f_s.shape[0] f_s = f_s.view(bsz, -1) f_t = f_t.view(bsz, -1) G_s = torch.mm(f_s, torch.t(f_s)) # G_s = G_s / G_s.norm(2) G_s = torch.nn.functional.normalize(G_s) G_t = torch.mm(f_t, torch.t(f_t)) # G_t = G_t / G_t.norm(2) G_t = torch.nn.functional.normalize(G_t) G_diff = G_t - G_s loss = (G_diff * G_diff).view(-1, 1).sum(0) / (bsz * bsz) return loss
1,745
30.178571
101
py
UNIXKD
UNIXKD-master/student_v0.py
import os import os.path as osp import argparse import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader from torch.optim.lr_scheduler import MultiStepLR import torchvision.transforms as transforms from tensorboardX import SummaryWriter from models import model_dict from zoo import Attention, Similarity from dataset import CIFAR100 from utils import accuracy, val, AverageMeter, cal_center items = ['acc', 'loss', \ 's_select_confidence', 's_select_margin', 's_select_entropy', \ 's_else_confidence', 's_else_margin', 's_else_entropy', \ 's_all_confidence', 's_all_margin', 's_all_entropy', \ 't_confidence', 't_margin', 't_entropy', \ 'center_dist'] parser = argparse.ArgumentParser(description='train student network.') parser.add_argument('--epoch', type=int, default=240) parser.add_argument('--batch-size', type=int, default=64) parser.add_argument('--k', type=int, default=48) parser.add_argument('--b', type=int, default=32) parser.add_argument('--w', type=float, default=1000) parser.add_argument('--lr', type=float, default=0.05) parser.add_argument('--momentum', type=float, default=0.9) parser.add_argument('--weight-decay', type=float, default=5e-4) parser.add_argument('--milestones', type=float, nargs='+', default=[150, 180, 210]) parser.add_argument('--teacher-path', type=str, default='./experiments/teacher_resnet32x4') parser.add_argument('--teacher-ckpt', type=str, default='best') parser.add_argument('--student-arch', type=str, default='resnet8x4') parser.add_argument('--ce-weight', type=float, default=0.0) parser.add_argument('--kd-weight', type=float, default=1.0) parser.add_argument('--other-distill', type=str, choices=['AT', 'SP'], default=None) parser.add_argument('--T', type=float, default=4.0) parser.add_argument('--strategy', type=int, choices=[0,1,2,3], default=3) # 0: random, 1: least confidence, 2: margin, 3: entropy parser.add_argument('--seed', type=int, default=0) parser.add_argument('--gpu-id', type=int, default=0) args = parser.parse_args() torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id) torch.backends.cudnn.benchmark = True teacher_arch = '_'.join(args.teacher_path.split('/')[-1].split('_')[1:]) exp_name = '{}_student_{}_teacher_{}-{}_strategy{}_k{}_b{}_w{}_seed{}'.format(\ __file__.split('.')[0].split('_')[-1], \ args.student_arch, teacher_arch, args.teacher_ckpt, \ args.strategy, \ args.k, args.b, args.w, \ args.seed) exp_path = './experiments/{}'.format(exp_name) os.makedirs(exp_path, exist_ok=True) transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761]), ]) trainset = CIFAR100('./data', train=True, transform=transform_train) valset = CIFAR100('./data', train=False, transform=transform_test) num_classes = 100 train_loader = DataLoader(trainset, batch_size=args.batch_size, \ shuffle=True, num_workers=3, pin_memory=True) val_loader = DataLoader(valset, batch_size=args.batch_size, \ shuffle=False, num_workers=3, pin_memory=True) ckpt_path = osp.join('{}/ckpt/{}.pth'.format( \ args.teacher_path, args.teacher_ckpt)) t_model = model_dict[teacher_arch](num_classes=num_classes).cuda() state_dict = torch.load(ckpt_path)['state_dict'] t_model.load_state_dict(state_dict) t_model.eval() logger = SummaryWriter(osp.join(exp_path, 'events')) s_model = model_dict[args.student_arch](num_classes=num_classes).cuda() optimizer = optim.SGD(s_model.parameters(), lr=args.lr, \ momentum=args.momentum, weight_decay=args.weight_decay) scheduler = MultiStepLR(optimizer, milestones=args.milestones) if args.other_distill is not None: if args.other_distill == 'AT': criterion = Attention() weight = 1000 elif args.other_distill == 'SP': criterion = Similarity() weight = 3000 best_acc = 0 counter = torch.zeros(args.epoch, 50000).cuda() epoch = 0 for epoch in range(args.epoch): record = {name:AverageMeter() for name in items} center = cal_center(val_loader, args, s_model) for fuck, (x, y, k) in enumerate(train_loader): s_model.train() x = x.cuda() y = y.cuda() k = k.cuda() with torch.no_grad(): s_feats, logits = s_model(x, is_feat=True) probs = F.softmax(logits, dim=1) # confidence conf = probs.max(dim=1)[0] # margin rank = torch.argsort(probs, dim=1) top2 = torch.gather(probs, dim=1, index=rank[:,-2:]) margin = top2[:,-1] - top2[:,-2] # entropy entropy = -torch.sum(probs * torch.log(probs), dim=1) if args.strategy == 0: scores = torch.rand(x.size(0)).cuda() elif args.strategy == 1: scores = 1 - conf elif args.strategy == 2: scores = -margin elif args.strategy == 3: scores = entropy else: raise ValueError('Invalid strategy.') r = torch.arange(x.size(0)).float() m = (2*args.b-1) / (2*args.batch_size) mask_proto = 1 / (1 + torch.exp(-args.w * (r/args.batch_size - m) )) mask_proto = mask_proto.cuda() lamb = np.random.beta(1, 1) mask = lamb * mask_proto.view(-1, 1, 1, 1) rank = torch.argsort(scores, descending=True) index = torch.randperm(x.size(0)).cuda() x = (1-mask) * x[rank] + mask * x[index] x = x[:args.k] counter[epoch, k[rank[:args.b]] ] += 1 s_feats, s_logits = s_model(x, is_feat=True) with torch.no_grad(): t_feats, t_logits = t_model(x, is_feat=True) ## for statistics t_probs = F.softmax(t_logits, dim=1) # confidence t_conf = t_probs.max(dim=1)[0] # margin t_rank = torch.argsort(t_probs, dim=1) t_top2 = torch.gather(t_probs, dim=1, index=t_rank[:,-2:]) t_margin = t_top2[:,-1] - t_top2[:,-2] # entropy t_entropy = -torch.sum(t_probs * torch.log(t_probs), dim=1) # compute loss log_s_probs = F.log_softmax(s_logits / args.T, dim=1) t_probs = F.softmax(t_logits / args.T, dim=1) tmp = mask.squeeze()[:args.k] loss_ce = F.cross_entropy(s_logits, y[rank][:args.k], reduction='none') * (1-tmp) + \ F.cross_entropy(s_logits, y[index][:args.k], reduction='none') * tmp loss_kd = F.kl_div(log_s_probs, t_probs, reduction='batchmean') * args.T * args.T if args.other_distill is not None: loss_other = sum(criterion(s_feats[1:-1], t_feats[1:-1])) if args.other_distill == 'AT' \ else sum(criterion(s_feats[-2], t_feats[-2])) loss = args.ce_weight * loss_ce.mean() + args.kd_weight * loss_kd + weight * loss_other else: loss = args.ce_weight * loss_ce.mean() + args.kd_weight * loss_kd # BP optimizer.zero_grad() loss.backward() optimizer.step() # compute distance between samples and center C = center[y[rank[:args.k]]] S = s_feats[-1] D = torch.pow(C-S, 2).sum(dim=1).sqrt().mean() record['center_dist'].update(D.item(), rank[:args.k].size(0)) batch_acc = accuracy(logits, y, topk=(1,))[0] record['acc'].update(batch_acc.item(), logits.size(0)) record['loss'].update(loss.item(), s_logits.size(0)) i = rank[:args.k].size(0) record['s_select_confidence'].update(conf[rank[:args.k]].mean().item(), i) record['s_select_margin'].update(margin[rank[:args.k]].mean().item(), i) record['s_select_entropy'].update(entropy[rank[:args.k]].mean().item(), i) i = rank[args.k:].size(0) if i > 0: record['s_else_confidence'].update(conf[rank[args.k:]].mean().item(), i) record['s_else_margin'].update(margin[rank[args.k:]].mean().item(), i) record['s_else_entropy'].update(entropy[rank[args.k:]].mean().item(), i) i = conf.size(0) record['s_all_confidence'].update(conf.mean().item(), i) record['s_all_margin'].update(margin.mean().item(), i) record['s_all_entropy'].update(entropy.mean().item(), i) i = t_conf.size(0) record['t_confidence'].update(t_conf.mean().item(), i) record['t_margin'].update(t_margin.mean().item(), i) record['t_entropy'].update(t_entropy.mean().item(), i) for item in items: logger.add_scalar('train/{}'.format(item), record[item].avg, epoch+1) # val acc = val(val_loader, args, t_model, s_model, logger, epoch) if acc > best_acc: best_acc = acc state_dict = dict(state_dict=s_model.state_dict(), best_acc=best_acc) name = osp.join(exp_path, 'ckpt/student_best.pth') os.makedirs(osp.dirname(name), exist_ok=True) torch.save(state_dict, name) scheduler.step() if args.seed ==0 : counter = counter.cpu().numpy() np.save(osp.join(exp_path, 'counter.npy'), counter)
9,632
36.628906
101
py
UNIXKD
UNIXKD-master/dataset/utils.py
import os import os.path import hashlib import gzip import errno import tarfile import zipfile import torch from torch.utils.model_zoo import tqdm def gen_bar_updater(): pbar = tqdm(total=None) def bar_update(count, block_size, total_size): if pbar.total is None and total_size: pbar.total = total_size progress_bytes = count * block_size pbar.update(progress_bytes - pbar.n) return bar_update def calculate_md5(fpath, chunk_size=1024 * 1024): md5 = hashlib.md5() with open(fpath, 'rb') as f: for chunk in iter(lambda: f.read(chunk_size), b''): md5.update(chunk) return md5.hexdigest() def check_md5(fpath, md5, **kwargs): return md5 == calculate_md5(fpath, **kwargs) def check_integrity(fpath, md5=None): if not os.path.isfile(fpath): return False if md5 is None: return True return check_md5(fpath, md5) def download_url(url, root, filename=None, md5=None): """Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL md5 (str, optional): MD5 checksum of the download. If None, do not check """ import urllib root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) os.makedirs(root, exist_ok=True) # check if file is already present locally if check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: # download the file try: print('Downloading ' + url + ' to ' + fpath) urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater() ) except (urllib.error.URLError, IOError) as e: if url[:5] == 'https': url = url.replace('https:', 'http:') print('Failed download. Trying https -> http instead.' ' Downloading ' + url + ' to ' + fpath) urllib.request.urlretrieve( url, fpath, reporthook=gen_bar_updater() ) else: raise e # check integrity of downloaded file if not check_integrity(fpath, md5): raise RuntimeError("File not found or corrupted.") def list_dir(root, prefix=False): """List all directories at a given root Args: root (str): Path to directory whose folders need to be listed prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the directories found """ root = os.path.expanduser(root) directories = list( filter( lambda p: os.path.isdir(os.path.join(root, p)), os.listdir(root) ) ) if prefix is True: directories = [os.path.join(root, d) for d in directories] return directories def list_files(root, suffix, prefix=False): """List all files ending with a suffix at a given root Args: root (str): Path to directory whose folders need to be listed suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). It uses the Python "str.endswith" method and is passed directly prefix (bool, optional): If true, prepends the path to each result, otherwise only returns the name of the files found """ root = os.path.expanduser(root) files = list( filter( lambda p: os.path.isfile(os.path.join(root, p)) and p.endswith(suffix), os.listdir(root) ) ) if prefix is True: files = [os.path.join(root, d) for d in files] return files def download_file_from_google_drive(file_id, root, filename=None, md5=None): """Download a Google Drive file from and place it in root. Args: file_id (str): id of file to be downloaded root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the id of the file. md5 (str, optional): MD5 checksum of the download. If None, do not check """ # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url import requests url = "https://docs.google.com/uc?export=download" root = os.path.expanduser(root) if not filename: filename = file_id fpath = os.path.join(root, filename) os.makedirs(root, exist_ok=True) if os.path.isfile(fpath) and check_integrity(fpath, md5): print('Using downloaded and verified file: ' + fpath) else: session = requests.Session() response = session.get(url, params={'id': file_id}, stream=True) token = _get_confirm_token(response) if token: params = {'id': file_id, 'confirm': token} response = session.get(url, params=params, stream=True) _save_response_content(response, fpath) def _get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def _save_response_content(response, destination, chunk_size=32768): with open(destination, "wb") as f: pbar = tqdm(total=None) progress = 0 for chunk in response.iter_content(chunk_size): if chunk: # filter out keep-alive new chunks f.write(chunk) progress += len(chunk) pbar.update(progress - pbar.n) pbar.close() def _is_tarxz(filename): return filename.endswith(".tar.xz") def _is_tar(filename): return filename.endswith(".tar") def _is_targz(filename): return filename.endswith(".tar.gz") def _is_tgz(filename): return filename.endswith(".tgz") def _is_gzip(filename): return filename.endswith(".gz") and not filename.endswith(".tar.gz") def _is_zip(filename): return filename.endswith(".zip") def extract_archive(from_path, to_path=None, remove_finished=False): if to_path is None: to_path = os.path.dirname(from_path) if _is_tar(from_path): with tarfile.open(from_path, 'r') as tar: tar.extractall(path=to_path) elif _is_targz(from_path) or _is_tgz(from_path): with tarfile.open(from_path, 'r:gz') as tar: tar.extractall(path=to_path) elif _is_tarxz(from_path): with tarfile.open(from_path, 'r:xz') as tar: tar.extractall(path=to_path) elif _is_gzip(from_path): to_path = os.path.join(to_path, os.path.splitext(os.path.basename(from_path))[0]) with open(to_path, "wb") as out_f, gzip.GzipFile(from_path) as zip_f: out_f.write(zip_f.read()) elif _is_zip(from_path): with zipfile.ZipFile(from_path, 'r') as z: z.extractall(to_path) else: raise ValueError("Extraction of {} not supported".format(from_path)) if remove_finished: os.remove(from_path) def download_and_extract_archive(url, download_root, extract_root=None, filename=None, md5=None, remove_finished=False): download_root = os.path.expanduser(download_root) if extract_root is None: extract_root = download_root if not filename: filename = os.path.basename(url) download_url(url, download_root, filename, md5) archive = os.path.join(download_root, filename) print("Extracting {} to {}".format(archive, extract_root)) extract_archive(archive, extract_root, remove_finished) def iterable_to_str(iterable): return "'" + "', '".join([str(item) for item in iterable]) + "'" def verify_str_arg(value, arg=None, valid_values=None, custom_msg=None): if not isinstance(value, torch._six.string_classes): if arg is None: msg = "Expected type str, but got type {type}." else: msg = "Expected type str for argument {arg}, but got type {type}." msg = msg.format(type=type(value), arg=arg) raise ValueError(msg) if valid_values is None: return value if value not in valid_values: if custom_msg is not None: msg = custom_msg else: msg = ("Unknown value '{value}' for argument {arg}. " "Valid values are {{{valid_values}}}.") msg = msg.format(value=value, arg=arg, valid_values=iterable_to_str(valid_values)) raise ValueError(msg) return value
8,765
29.975265
109
py
UNIXKD
UNIXKD-master/dataset/__init__.py
from .cifar import CIFAR100, CIFAR10
37
18
36
py
UNIXKD
UNIXKD-master/dataset/vision.py
import os import torch import torch.utils.data as data class VisionDataset(data.Dataset): _repr_indent = 4 def __init__(self, root, transforms=None, transform=None, target_transform=None): if isinstance(root, torch._six.string_classes): root = os.path.expanduser(root) self.root = root has_transforms = transforms is not None has_separate_transform = transform is not None or target_transform is not None if has_transforms and has_separate_transform: raise ValueError("Only transforms or transform/target_transform can " "be passed as argument") # for backwards-compatibility self.transform = transform self.target_transform = target_transform if has_separate_transform: transforms = StandardTransform(transform, target_transform) self.transforms = transforms def __getitem__(self, index): raise NotImplementedError def __len__(self): raise NotImplementedError def __repr__(self): head = "Dataset " + self.__class__.__name__ body = ["Number of datapoints: {}".format(self.__len__())] if self.root is not None: body.append("Root location: {}".format(self.root)) body += self.extra_repr().splitlines() if hasattr(self, "transforms") and self.transforms is not None: body += [repr(self.transforms)] lines = [head] + [" " * self._repr_indent + line for line in body] return '\n'.join(lines) def _format_transform_repr(self, transform, head): lines = transform.__repr__().splitlines() return (["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]) def extra_repr(self): return "" class StandardTransform(object): def __init__(self, transform=None, target_transform=None): self.transform = transform self.target_transform = target_transform def __call__(self, input, target): if self.transform is not None: input = self.transform(input) if self.target_transform is not None: target = self.target_transform(target) return input, target def _format_transform_repr(self, transform, head): lines = transform.__repr__().splitlines() return (["{}{}".format(head, lines[0])] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]) def __repr__(self): body = [self.__class__.__name__] if self.transform is not None: body += self._format_transform_repr(self.transform, "Transform: ") if self.target_transform is not None: body += self._format_transform_repr(self.target_transform, "Target transform: ") return '\n'.join(body)
2,950
35.432099
86
py
UNIXKD
UNIXKD-master/dataset/cifar.py
from PIL import Image import os import os.path import numpy as np import pickle from .vision import VisionDataset from .utils import check_integrity, download_and_extract_archive class CIFAR10(VisionDataset): """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. Args: root (string): Root directory of dataset where directory ``cifar-10-batches-py`` exists or will be saved to if download is set to True. train (bool, optional): If True, creates dataset from training set, otherwise creates from test set. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. """ base_folder = 'cifar-10-batches-py' url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" filename = "cifar-10-python.tar.gz" tgz_md5 = 'c58f30108f718f92721af3b95e74349a' train_list = [ ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], ['data_batch_4', '634d18415352ddfa80567beed471001a'], ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], ] test_list = [ ['test_batch', '40351d587109b95175f43aff81a1287e'], ] meta = { 'filename': 'batches.meta', 'key': 'label_names', 'md5': '5ff9c542aee3614f3951f8cda6e48888', } def __init__(self, root, train=True, transform=None, target_transform=None, download=False): super(CIFAR10, self).__init__(root, transform=transform, target_transform=target_transform) self.train = train # training set or test set if download: self.download() if not self._check_integrity(): raise RuntimeError('Dataset not found or corrupted.' + ' You can use download=True to download it') if self.train: downloaded_list = self.train_list else: downloaded_list = self.test_list self.data = [] self.targets = [] # now load the picked numpy arrays for file_name, checksum in downloaded_list: file_path = os.path.join(self.root, self.base_folder, file_name) with open(file_path, 'rb') as f: entry = pickle.load(f, encoding='latin1') self.data.append(entry['data']) if 'labels' in entry: self.targets.extend(entry['labels']) else: self.targets.extend(entry['fine_labels']) self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC self._load_meta() def _load_meta(self): path = os.path.join(self.root, self.base_folder, self.meta['filename']) if not check_integrity(path, self.meta['md5']): raise RuntimeError('Dataset metadata file not found or corrupted.' + ' You can use download=True to download it') with open(path, 'rb') as infile: data = pickle.load(infile, encoding='latin1') self.classes = data[self.meta['key']] self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)} def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ img, target = self.data[index], self.targets[index] # doing this so that it is consistent with all other datasets # to return a PIL Image img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) if self.train: return img, target, index else: return img, target def __len__(self): return len(self.data) def _check_integrity(self): root = self.root for fentry in (self.train_list + self.test_list): filename, md5 = fentry[0], fentry[1] fpath = os.path.join(root, self.base_folder, filename) if not check_integrity(fpath, md5): return False return True def download(self): if self._check_integrity(): print('Files already downloaded and verified') return download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) def extra_repr(self): return "Split: {}".format("Train" if self.train is True else "Test") class CIFAR100(CIFAR10): """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. This is a subclass of the `CIFAR10` Dataset. """ base_folder = 'cifar-100-python' url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" filename = "cifar-100-python.tar.gz" tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' train_list = [ ['train', '16019d7e3df5f24257cddd939b257f8d'], ] test_list = [ ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], ] meta = { 'filename': 'meta', 'key': 'fine_label_names', 'md5': '7973b15100ade9c7d40fb424638fde48', }
5,819
34.272727
99
py
UNIXKD
UNIXKD-master/models/resnet.py
from __future__ import absolute_import '''Resnet for cifar dataset. Ported form https://github.com/facebook/fb.resnet.torch and https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py (c) YANG, Wei ''' import torch.nn as nn import torch.nn.functional as F import math __all__ = ['resnet'] def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False): super(BasicBlock, self).__init__() self.is_last = is_last self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, is_last=False): super(Bottleneck, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class ResNet(nn.Module): def __init__(self, depth, num_filters, block_name='BasicBlock', num_classes=10): super(ResNet, self).__init__() # Model type specifies number of layers for CIFAR-10 model if block_name.lower() == 'basicblock': assert (depth - 2) % 6 == 0, 'When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202' n = (depth - 2) // 6 block = BasicBlock elif block_name.lower() == 'bottleneck': assert (depth - 2) % 9 == 0, 'When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199' n = (depth - 2) // 9 block = Bottleneck else: raise ValueError('block_name shoule be Basicblock or Bottleneck') self.inplanes = num_filters[0] self.conv1 = nn.Conv2d(3, num_filters[0], kernel_size=3, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(num_filters[0]) self.relu = nn.ReLU(inplace=True) self.layer1 = self._make_layer(block, num_filters[1], n) self.layer2 = self._make_layer(block, num_filters[2], n, stride=2) self.layer3 = self._make_layer(block, num_filters[3], n, stride=2) self.avgpool = nn.AvgPool2d(8) self.fc = nn.Linear(num_filters[3] * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = list([]) layers.append(block(self.inplanes, planes, stride, downsample, is_last=(blocks == 1))) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, is_last=(i == blocks-1))) return nn.Sequential(*layers) def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.bn1) feat_m.append(self.relu) feat_m.append(self.layer1) feat_m.append(self.layer2) feat_m.append(self.layer3) return feat_m def get_bn_before_relu(self): if isinstance(self.layer1[0], Bottleneck): bn1 = self.layer1[-1].bn3 bn2 = self.layer2[-1].bn3 bn3 = self.layer3[-1].bn3 elif isinstance(self.layer1[0], BasicBlock): bn1 = self.layer1[-1].bn2 bn2 = self.layer2[-1].bn2 bn3 = self.layer3[-1].bn2 else: raise NotImplementedError('ResNet unknown block error !!!') return [bn1, bn2, bn3] def forward(self, x, is_feat=False, preact=False): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) # 32x32 f0 = x x, f1_pre = self.layer1(x) # 32x32 f1 = x x, f2_pre = self.layer2(x) # 16x16 f2 = x x, f3_pre = self.layer3(x) # 8x8 f3 = x x = self.avgpool(x) x = x.view(x.size(0), -1) f4 = x x = self.fc(x) if is_feat: if preact: return [f0, f1_pre, f2_pre, f3_pre, f4], x else: return [f0, f1, f2, f3, f4], x else: return x def resnet8(**kwargs): return ResNet(8, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet14(**kwargs): return ResNet(14, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet20(**kwargs): return ResNet(20, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet32(**kwargs): return ResNet(32, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet44(**kwargs): return ResNet(44, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet56(**kwargs): return ResNet(56, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet110(**kwargs): return ResNet(110, [16, 16, 32, 64], 'basicblock', **kwargs) def resnet8x4(**kwargs): return ResNet(8, [32, 64, 128, 256], 'basicblock', **kwargs) def resnet14x4(**kwargs): return ResNet(14, [32, 64, 128, 256], 'basicblock', **kwargs) def resnet32x4(**kwargs): return ResNet(32, [32, 64, 128, 256], 'basicblock', **kwargs) if __name__ == '__main__': import torch x = torch.randn(2, 3, 32, 32) net = resnet8x4(num_classes=20) feats, logit = net(x, is_feat=True, preact=True) for f in feats: print(f.shape, f.min().item()) print(logit.shape) for m in net.get_bn_before_relu(): if isinstance(m, nn.BatchNorm2d): print('pass') else: print('warning')
7,841
29.161538
116
py
UNIXKD
UNIXKD-master/models/mobilenetv2.py
""" MobileNetV2 implementation used in <Knowledge Distillation via Route Constrained Optimization> """ import torch import torch.nn as nn import math __all__ = ['mobilenetv2_T_w', 'mobile_half'] BN = None def conv_bn(inp, oup, stride): return nn.Sequential( nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU(inplace=True) ) def conv_1x1_bn(inp, oup): return nn.Sequential( nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU(inplace=True) ) class InvertedResidual(nn.Module): def __init__(self, inp, oup, stride, expand_ratio): super(InvertedResidual, self).__init__() self.blockname = None self.stride = stride assert stride in [1, 2] self.use_res_connect = self.stride == 1 and inp == oup self.conv = nn.Sequential( # pw nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False), nn.BatchNorm2d(inp * expand_ratio), nn.ReLU(inplace=True), # dw nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False), nn.BatchNorm2d(inp * expand_ratio), nn.ReLU(inplace=True), # pw-linear nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), ) self.names = ['0', '1', '2', '3', '4', '5', '6', '7'] def forward(self, x): t = x if self.use_res_connect: return t + self.conv(x) else: return self.conv(x) class MobileNetV2(nn.Module): """mobilenetV2""" def __init__(self, T, feature_dim, input_size=32, width_mult=1., remove_avg=False): super(MobileNetV2, self).__init__() self.remove_avg = remove_avg # setting of inverted residual blocks self.interverted_residual_setting = [ # t, c, n, s [1, 16, 1, 1], [T, 24, 2, 1], [T, 32, 3, 2], [T, 64, 4, 2], [T, 96, 3, 1], [T, 160, 3, 2], [T, 320, 1, 1], ] # building first layer assert input_size % 32 == 0 input_channel = int(32 * width_mult) self.conv1 = conv_bn(3, input_channel, 2) # building inverted residual blocks self.blocks = nn.ModuleList([]) for t, c, n, s in self.interverted_residual_setting: output_channel = int(c * width_mult) layers = [] strides = [s] + [1] * (n - 1) for stride in strides: layers.append( InvertedResidual(input_channel, output_channel, stride, t) ) input_channel = output_channel self.blocks.append(nn.Sequential(*layers)) self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280 self.conv2 = conv_1x1_bn(input_channel, self.last_channel) H = input_size // (32//2) self.avgpool = nn.AvgPool2d(H, ceil_mode=True) # building classifier #self.classifier = nn.Sequential( # # nn.Dropout(0.5), # nn.Linear(self.last_channel, feature_dim), #) self.classifier = nn.Linear(self.last_channel, feature_dim) self._initialize_weights() print(T, width_mult) def get_bn_before_relu(self): bn1 = self.blocks[1][-1].conv[-1] bn2 = self.blocks[2][-1].conv[-1] bn3 = self.blocks[4][-1].conv[-1] bn4 = self.blocks[6][-1].conv[-1] return [bn1, bn2, bn3, bn4] def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.blocks) return feat_m def forward(self, x, is_feat=False, preact=False): out = self.conv1(x) f0 = out out = self.blocks[0](out) out = self.blocks[1](out) f1 = out out = self.blocks[2](out) f2 = out out = self.blocks[3](out) out = self.blocks[4](out) f3 = out out = self.blocks[5](out) out = self.blocks[6](out) f4 = out out = self.conv2(out) if not self.remove_avg: out = self.avgpool(out) out = out.view(out.size(0), -1) f5 = out out = self.classifier(out) if is_feat: return [f0, f1, f2, f3, f4, f5], out else: return out def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_() def mobilenetv2_T_w(T, W, feature_dim=100): model = MobileNetV2(T=T, feature_dim=feature_dim, width_mult=W) return model def mobile_half(num_classes): return mobilenetv2_T_w(6, 0.5, num_classes) if __name__ == '__main__': x = torch.randn(2, 3, 32, 32) net = mobile_half(100) feats, logit = net(x, is_feat=True, preact=True) for f in feats: print(f.shape, f.min().item()) print(logit.shape) for m in net.get_bn_before_relu(): if isinstance(m, nn.BatchNorm2d): print('pass') else: print('warning')
5,777
27.323529
115
py
UNIXKD
UNIXKD-master/models/vgg.py
'''VGG for CIFAR10. FC layers are removed. (c) YANG, Wei ''' import torch.nn as nn import torch.nn.functional as F import math __all__ = [ 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19_bn', 'vgg19', ] model_urls = { 'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth', 'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth', 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', } class VGG(nn.Module): def __init__(self, cfg, batch_norm=False, num_classes=1000): super(VGG, self).__init__() self.block0 = self._make_layers(cfg[0], batch_norm, 3) self.block1 = self._make_layers(cfg[1], batch_norm, cfg[0][-1]) self.block2 = self._make_layers(cfg[2], batch_norm, cfg[1][-1]) self.block3 = self._make_layers(cfg[3], batch_norm, cfg[2][-1]) self.block4 = self._make_layers(cfg[4], batch_norm, cfg[3][-1]) self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2) self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) self.pool4 = nn.AdaptiveAvgPool2d((1, 1)) # self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2) self.classifier = nn.Linear(512, num_classes) self._initialize_weights() def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.block0) feat_m.append(self.pool0) feat_m.append(self.block1) feat_m.append(self.pool1) feat_m.append(self.block2) feat_m.append(self.pool2) feat_m.append(self.block3) feat_m.append(self.pool3) feat_m.append(self.block4) feat_m.append(self.pool4) return feat_m def get_bn_before_relu(self): bn1 = self.block1[-1] bn2 = self.block2[-1] bn3 = self.block3[-1] bn4 = self.block4[-1] return [bn1, bn2, bn3, bn4] def forward(self, x, is_feat=False, preact=False): h = x.shape[2] x = F.relu(self.block0(x)) f0 = x x = self.pool0(x) x = self.block1(x) f1_pre = x x = F.relu(x) f1 = x x = self.pool1(x) x = self.block2(x) f2_pre = x x = F.relu(x) f2 = x x = self.pool2(x) x = self.block3(x) f3_pre = x x = F.relu(x) f3 = x if h == 64: x = self.pool3(x) x = self.block4(x) f4_pre = x x = F.relu(x) f4 = x x = self.pool4(x) x = x.view(x.size(0), -1) f5 = x x = self.classifier(x) if is_feat: if preact: return [f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], x else: return [f0, f1, f2, f3, f4, f5], x else: return x @staticmethod def _make_layers(cfg, batch_norm=False, in_channels=3): layers = [] for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v layers = layers[:-1] return nn.Sequential(*layers) def _initialize_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): n = m.weight.size(1) m.weight.data.normal_(0, 0.01) m.bias.data.zero_() cfg = { 'A': [[64], [128], [256, 256], [512, 512], [512, 512]], 'B': [[64, 64], [128, 128], [256, 256], [512, 512], [512, 512]], 'D': [[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]], 'E': [[64, 64], [128, 128], [256, 256, 256, 256], [512, 512, 512, 512], [512, 512, 512, 512]], 'S': [[64], [128], [256], [512], [512]], } def vgg8(**kwargs): """VGG 8-layer model (configuration "S") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['S'], **kwargs) return model def vgg8_bn(**kwargs): """VGG 8-layer model (configuration "S") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['S'], batch_norm=True, **kwargs) return model def vgg11(**kwargs): """VGG 11-layer model (configuration "A") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['A'], **kwargs) return model def vgg11_bn(**kwargs): """VGG 11-layer model (configuration "A") with batch normalization""" model = VGG(cfg['A'], batch_norm=True, **kwargs) return model def vgg13(**kwargs): """VGG 13-layer model (configuration "B") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['B'], **kwargs) return model def vgg13_bn(**kwargs): """VGG 13-layer model (configuration "B") with batch normalization""" model = VGG(cfg['B'], batch_norm=True, **kwargs) return model def vgg16(**kwargs): """VGG 16-layer model (configuration "D") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['D'], **kwargs) return model def vgg16_bn(**kwargs): """VGG 16-layer model (configuration "D") with batch normalization""" model = VGG(cfg['D'], batch_norm=True, **kwargs) return model def vgg19(**kwargs): """VGG 19-layer model (configuration "E") Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = VGG(cfg['E'], **kwargs) return model def vgg19_bn(**kwargs): """VGG 19-layer model (configuration 'E') with batch normalization""" model = VGG(cfg['E'], batch_norm=True, **kwargs) return model if __name__ == '__main__': import torch x = torch.randn(2, 3, 32, 32) net = vgg19_bn(num_classes=100) feats, logit = net(x, is_feat=True, preact=True) for f in feats: print(f.shape, f.min().item()) print(logit.shape) for m in net.get_bn_before_relu(): if isinstance(m, nn.BatchNorm2d): print('pass') else: print('warning')
6,971
28.417722
98
py
UNIXKD
UNIXKD-master/models/classifier.py
from __future__ import print_function import torch.nn as nn ######################################### # ===== Classifiers ===== # ######################################### class LinearClassifier(nn.Module): def __init__(self, dim_in, n_label=10): super(LinearClassifier, self).__init__() self.net = nn.Linear(dim_in, n_label) def forward(self, x): return self.net(x) class NonLinearClassifier(nn.Module): def __init__(self, dim_in, n_label=10, p=0.1): super(NonLinearClassifier, self).__init__() self.net = nn.Sequential( nn.Linear(dim_in, 200), nn.Dropout(p=p), nn.BatchNorm1d(200), nn.ReLU(inplace=True), nn.Linear(200, n_label), ) def forward(self, x): return self.net(x)
819
21.777778
51
py
UNIXKD
UNIXKD-master/models/resnetv2.py
'''ResNet in PyTorch. For Pre-activation ResNet, see 'preact_resnet.py'. Reference: [1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun Deep Residual Learning for Image Recognition. arXiv:1512.03385 ''' import torch import torch.nn as nn import torch.nn.functional as F class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1, is_last=False): super(BasicBlock, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1, is_last=False): super(Bottleneck, self).__init__() self.is_last = is_last self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(self.expansion * planes) self.shortcut = nn.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = nn.Sequential( nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(self.expansion * planes) ) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) preact = out out = F.relu(out) if self.is_last: return out, preact else: return out class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.bn1) feat_m.append(self.layer1) feat_m.append(self.layer2) feat_m.append(self.layer3) feat_m.append(self.layer4) return feat_m def get_bn_before_relu(self): if isinstance(self.layer1[0], Bottleneck): bn1 = self.layer1[-1].bn3 bn2 = self.layer2[-1].bn3 bn3 = self.layer3[-1].bn3 bn4 = self.layer4[-1].bn3 elif isinstance(self.layer1[0], BasicBlock): bn1 = self.layer1[-1].bn2 bn2 = self.layer2[-1].bn2 bn3 = self.layer3[-1].bn2 bn4 = self.layer4[-1].bn2 else: raise NotImplementedError('ResNet unknown block error !!!') return [bn1, bn2, bn3, bn4] def _make_layer(self, block, planes, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) layers = [] for i in range(num_blocks): stride = strides[i] layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1)) self.in_planes = planes * block.expansion return nn.Sequential(*layers) def forward(self, x, is_feat=False, preact=False): out = F.relu(self.bn1(self.conv1(x))) f0 = out out, f1_pre = self.layer1(out) f1 = out out, f2_pre = self.layer2(out) f2 = out out, f3_pre = self.layer3(out) f3 = out out, f4_pre = self.layer4(out) f4 = out out = self.avgpool(out) out = out.view(out.size(0), -1) f5 = out out = self.linear(out) if is_feat: if preact: return [[f0, f1_pre, f2_pre, f3_pre, f4_pre, f5], out] else: return [f0, f1, f2, f3, f4, f5], out else: return out def ResNet18(**kwargs): return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) def ResNet34(**kwargs): return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) def ResNet50(**kwargs): return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) def ResNet101(**kwargs): return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) def ResNet152(**kwargs): return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) if __name__ == '__main__': net = ResNet18(num_classes=100) x = torch.randn(2, 3, 32, 32) feats, logit = net(x, is_feat=True, preact=True) for f in feats: print(f.shape, f.min().item()) print(logit.shape) for m in net.get_bn_before_relu(): if isinstance(m, nn.BatchNorm2d): print('pass') else: print('warning')
6,915
33.753769
106
py
UNIXKD
UNIXKD-master/models/ShuffleNetv1.py
'''ShuffleNet in PyTorch. See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details. ''' import torch import torch.nn as nn import torch.nn.functional as F class ShuffleBlock(nn.Module): def __init__(self, groups): super(ShuffleBlock, self).__init__() self.groups = groups def forward(self, x): '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]''' N,C,H,W = x.size() g = self.groups return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W) class Bottleneck(nn.Module): def __init__(self, in_planes, out_planes, stride, groups, is_last=False): super(Bottleneck, self).__init__() self.is_last = is_last self.stride = stride mid_planes = int(out_planes/4) g = 1 if in_planes == 24 else groups self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False) self.bn1 = nn.BatchNorm2d(mid_planes) self.shuffle1 = ShuffleBlock(groups=g) self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False) self.bn2 = nn.BatchNorm2d(mid_planes) self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if stride == 2: self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1)) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.shuffle1(out) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) res = self.shortcut(x) preact = torch.cat([out, res], 1) if self.stride == 2 else out+res out = F.relu(preact) # out = F.relu(torch.cat([out, res], 1)) if self.stride == 2 else F.relu(out+res) if self.is_last: return out, preact else: return out class ShuffleNet(nn.Module): def __init__(self, cfg, num_classes=10): super(ShuffleNet, self).__init__() out_planes = cfg['out_planes'] num_blocks = cfg['num_blocks'] groups = cfg['groups'] self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_planes = 24 self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups) self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups) self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups) self.linear = nn.Linear(out_planes[2], num_classes) def _make_layer(self, out_planes, num_blocks, groups): layers = [] for i in range(num_blocks): stride = 2 if i == 0 else 1 cat_planes = self.in_planes if i == 0 else 0 layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups, is_last=(i == num_blocks - 1))) self.in_planes = out_planes return nn.Sequential(*layers) def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.bn1) feat_m.append(self.layer1) feat_m.append(self.layer2) feat_m.append(self.layer3) return feat_m def get_bn_before_relu(self): raise NotImplementedError('ShuffleNet currently is not supported for "Overhaul" teacher') def forward(self, x, is_feat=False, preact=False): out = F.relu(self.bn1(self.conv1(x))) f0 = out out, f1_pre = self.layer1(out) f1 = out out, f2_pre = self.layer2(out) f2 = out out, f3_pre = self.layer3(out) f3 = out out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) f4 = out out = self.linear(out) if is_feat: if preact: return [f0, f1_pre, f2_pre, f3_pre, f4], out else: return [f0, f1, f2, f3, f4], out else: return out def ShuffleV1(**kwargs): cfg = { 'out_planes': [240, 480, 960], 'num_blocks': [4, 8, 4], 'groups': 3 } return ShuffleNet(cfg, **kwargs) if __name__ == '__main__': x = torch.randn(2, 3, 32, 32) net = ShuffleV1(num_classes=100) import time a = time.time() feats, logit = net(x, is_feat=True, preact=True) b = time.time() print(b - a) for f in feats: print(f.shape, f.min().item()) print(logit.shape)
4,732
33.05036
126
py
UNIXKD
UNIXKD-master/models/util.py
from __future__ import print_function import torch.nn as nn import math class Paraphraser(nn.Module): """Paraphrasing Complex Network: Network Compression via Factor Transfer""" def __init__(self, t_shape, k=0.5, use_bn=False): super(Paraphraser, self).__init__() in_channel = t_shape[1] out_channel = int(t_shape[1] * k) self.encoder = nn.Sequential( nn.Conv2d(in_channel, in_channel, 3, 1, 1), nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(in_channel, out_channel, 3, 1, 1), nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(out_channel, out_channel, 3, 1, 1), nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), ) self.decoder = nn.Sequential( nn.ConvTranspose2d(out_channel, out_channel, 3, 1, 1), nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), nn.ConvTranspose2d(out_channel, in_channel, 3, 1, 1), nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), nn.ConvTranspose2d(in_channel, in_channel, 3, 1, 1), nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), ) def forward(self, f_s, is_factor=False): factor = self.encoder(f_s) if is_factor: return factor rec = self.decoder(factor) return factor, rec class Translator(nn.Module): def __init__(self, s_shape, t_shape, k=0.5, use_bn=True): super(Translator, self).__init__() in_channel = s_shape[1] out_channel = int(t_shape[1] * k) self.encoder = nn.Sequential( nn.Conv2d(in_channel, in_channel, 3, 1, 1), nn.BatchNorm2d(in_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(in_channel, out_channel, 3, 1, 1), nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), nn.Conv2d(out_channel, out_channel, 3, 1, 1), nn.BatchNorm2d(out_channel) if use_bn else nn.Sequential(), nn.LeakyReLU(0.1, inplace=True), ) def forward(self, f_s): return self.encoder(f_s) class Connector(nn.Module): """Connect for Knowledge Transfer via Distillation of Activation Boundaries Formed by Hidden Neurons""" def __init__(self, s_shapes, t_shapes): super(Connector, self).__init__() self.s_shapes = s_shapes self.t_shapes = t_shapes self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes)) @staticmethod def _make_conenctors(s_shapes, t_shapes): assert len(s_shapes) == len(t_shapes), 'unequal length of feat list' connectors = [] for s, t in zip(s_shapes, t_shapes): if s[1] == t[1] and s[2] == t[2]: connectors.append(nn.Sequential()) else: connectors.append(ConvReg(s, t, use_relu=False)) return connectors def forward(self, g_s): out = [] for i in range(len(g_s)): out.append(self.connectors[i](g_s[i])) return out class ConnectorV2(nn.Module): """A Comprehensive Overhaul of Feature Distillation (ICCV 2019)""" def __init__(self, s_shapes, t_shapes): super(ConnectorV2, self).__init__() self.s_shapes = s_shapes self.t_shapes = t_shapes self.connectors = nn.ModuleList(self._make_conenctors(s_shapes, t_shapes)) def _make_conenctors(self, s_shapes, t_shapes): assert len(s_shapes) == len(t_shapes), 'unequal length of feat list' t_channels = [t[1] for t in t_shapes] s_channels = [s[1] for s in s_shapes] connectors = nn.ModuleList([self._build_feature_connector(t, s) for t, s in zip(t_channels, s_channels)]) return connectors @staticmethod def _build_feature_connector(t_channel, s_channel): C = [nn.Conv2d(s_channel, t_channel, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(t_channel)] for m in C: if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() return nn.Sequential(*C) def forward(self, g_s): out = [] for i in range(len(g_s)): out.append(self.connectors[i](g_s[i])) return out class ConvReg(nn.Module): """Convolutional regression for FitNet""" def __init__(self, s_shape, t_shape, use_relu=True): super(ConvReg, self).__init__() self.use_relu = use_relu s_N, s_C, s_H, s_W = s_shape t_N, t_C, t_H, t_W = t_shape if s_H == 2 * t_H: self.conv = nn.Conv2d(s_C, t_C, kernel_size=3, stride=2, padding=1) elif s_H * 2 == t_H: self.conv = nn.ConvTranspose2d(s_C, t_C, kernel_size=4, stride=2, padding=1) elif s_H >= t_H: self.conv = nn.Conv2d(s_C, t_C, kernel_size=(1+s_H-t_H, 1+s_W-t_W)) else: raise NotImplemented('student size {}, teacher size {}'.format(s_H, t_H)) self.bn = nn.BatchNorm2d(t_C) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) if self.use_relu: return self.relu(self.bn(x)) else: return self.bn(x) class Regress(nn.Module): """Simple Linear Regression for hints""" def __init__(self, dim_in=1024, dim_out=1024): super(Regress, self).__init__() self.linear = nn.Linear(dim_in, dim_out) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = x.view(x.shape[0], -1) x = self.linear(x) x = self.relu(x) return x class Embed(nn.Module): """Embedding module""" def __init__(self, dim_in=1024, dim_out=128): super(Embed, self).__init__() self.linear = nn.Linear(dim_in, dim_out) self.l2norm = Normalize(2) def forward(self, x): x = x.view(x.shape[0], -1) x = self.linear(x) x = self.l2norm(x) return x class LinearEmbed(nn.Module): """Linear Embedding""" def __init__(self, dim_in=1024, dim_out=128): super(LinearEmbed, self).__init__() self.linear = nn.Linear(dim_in, dim_out) def forward(self, x): x = x.view(x.shape[0], -1) x = self.linear(x) return x class MLPEmbed(nn.Module): """non-linear embed by MLP""" def __init__(self, dim_in=1024, dim_out=128): super(MLPEmbed, self).__init__() self.linear1 = nn.Linear(dim_in, 2 * dim_out) self.relu = nn.ReLU(inplace=True) self.linear2 = nn.Linear(2 * dim_out, dim_out) self.l2norm = Normalize(2) def forward(self, x): x = x.view(x.shape[0], -1) x = self.relu(self.linear1(x)) x = self.l2norm(self.linear2(x)) return x class Normalize(nn.Module): """normalization layer""" def __init__(self, power=2): super(Normalize, self).__init__() self.power = power def forward(self, x): norm = x.pow(self.power).sum(1, keepdim=True).pow(1. / self.power) out = x.div(norm) return out class Flatten(nn.Module): """flatten module""" def __init__(self): super(Flatten, self).__init__() def forward(self, feat): return feat.view(feat.size(0), -1) class PoolEmbed(nn.Module): """pool and embed""" def __init__(self, layer=0, dim_out=128, pool_type='avg'): super().__init__() if layer == 0: pool_size = 8 nChannels = 16 elif layer == 1: pool_size = 8 nChannels = 16 elif layer == 2: pool_size = 6 nChannels = 32 elif layer == 3: pool_size = 4 nChannels = 64 elif layer == 4: pool_size = 1 nChannels = 64 else: raise NotImplementedError('layer not supported: {}'.format(layer)) self.embed = nn.Sequential() if layer <= 3: if pool_type == 'max': self.embed.add_module('MaxPool', nn.AdaptiveMaxPool2d((pool_size, pool_size))) elif pool_type == 'avg': self.embed.add_module('AvgPool', nn.AdaptiveAvgPool2d((pool_size, pool_size))) self.embed.add_module('Flatten', Flatten()) self.embed.add_module('Linear', nn.Linear(nChannels*pool_size*pool_size, dim_out)) self.embed.add_module('Normalize', Normalize(2)) def forward(self, x): return self.embed(x) if __name__ == '__main__': import torch g_s = [ torch.randn(2, 16, 16, 16), torch.randn(2, 32, 8, 8), torch.randn(2, 64, 4, 4), ] g_t = [ torch.randn(2, 32, 16, 16), torch.randn(2, 64, 8, 8), torch.randn(2, 128, 4, 4), ] s_shapes = [s.shape for s in g_s] t_shapes = [t.shape for t in g_t] net = ConnectorV2(s_shapes, t_shapes) out = net(g_s) for f in out: print(f.shape)
9,622
32.068729
107
py
UNIXKD
UNIXKD-master/models/ShuffleNetv2.py
'''ShuffleNetV2 in PyTorch. See the paper "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" for more details. ''' import torch import torch.nn as nn import torch.nn.functional as F class ShuffleBlock(nn.Module): def __init__(self, groups=2): super(ShuffleBlock, self).__init__() self.groups = groups def forward(self, x): '''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]''' N, C, H, W = x.size() g = self.groups return x.view(N, g, C//g, H, W).permute(0, 2, 1, 3, 4).reshape(N, C, H, W) class SplitBlock(nn.Module): def __init__(self, ratio): super(SplitBlock, self).__init__() self.ratio = ratio def forward(self, x): c = int(x.size(1) * self.ratio) return x[:, :c, :, :], x[:, c:, :, :] class BasicBlock(nn.Module): def __init__(self, in_channels, split_ratio=0.5, is_last=False): super(BasicBlock, self).__init__() self.is_last = is_last self.split = SplitBlock(split_ratio) in_channels = int(in_channels * split_ratio) self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(in_channels) self.conv2 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, groups=in_channels, bias=False) self.bn2 = nn.BatchNorm2d(in_channels) self.conv3 = nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(in_channels) self.shuffle = ShuffleBlock() def forward(self, x): x1, x2 = self.split(x) out = F.relu(self.bn1(self.conv1(x2))) out = self.bn2(self.conv2(out)) preact = self.bn3(self.conv3(out)) out = F.relu(preact) # out = F.relu(self.bn3(self.conv3(out))) preact = torch.cat([x1, preact], 1) out = torch.cat([x1, out], 1) out = self.shuffle(out) if self.is_last: return out, preact else: return out class DownBlock(nn.Module): def __init__(self, in_channels, out_channels): super(DownBlock, self).__init__() mid_channels = out_channels // 2 # left self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=1, groups=in_channels, bias=False) self.bn1 = nn.BatchNorm2d(in_channels) self.conv2 = nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(mid_channels) # right self.conv3 = nn.Conv2d(in_channels, mid_channels, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(mid_channels) self.conv4 = nn.Conv2d(mid_channels, mid_channels, kernel_size=3, stride=2, padding=1, groups=mid_channels, bias=False) self.bn4 = nn.BatchNorm2d(mid_channels) self.conv5 = nn.Conv2d(mid_channels, mid_channels, kernel_size=1, bias=False) self.bn5 = nn.BatchNorm2d(mid_channels) self.shuffle = ShuffleBlock() def forward(self, x): # left out1 = self.bn1(self.conv1(x)) out1 = F.relu(self.bn2(self.conv2(out1))) # right out2 = F.relu(self.bn3(self.conv3(x))) out2 = self.bn4(self.conv4(out2)) out2 = F.relu(self.bn5(self.conv5(out2))) # concat out = torch.cat([out1, out2], 1) out = self.shuffle(out) return out class ShuffleNetV2(nn.Module): def __init__(self, net_size, num_classes=10): super(ShuffleNetV2, self).__init__() out_channels = configs[net_size]['out_channels'] num_blocks = configs[net_size]['num_blocks'] # self.conv1 = nn.Conv2d(3, 24, kernel_size=3, # stride=1, padding=1, bias=False) self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(24) self.in_channels = 24 self.layer1 = self._make_layer(out_channels[0], num_blocks[0]) self.layer2 = self._make_layer(out_channels[1], num_blocks[1]) self.layer3 = self._make_layer(out_channels[2], num_blocks[2]) self.conv2 = nn.Conv2d(out_channels[2], out_channels[3], kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_channels[3]) self.linear = nn.Linear(out_channels[3], num_classes) def _make_layer(self, out_channels, num_blocks): layers = [DownBlock(self.in_channels, out_channels)] for i in range(num_blocks): layers.append(BasicBlock(out_channels, is_last=(i == num_blocks - 1))) self.in_channels = out_channels return nn.Sequential(*layers) def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.bn1) feat_m.append(self.layer1) feat_m.append(self.layer2) feat_m.append(self.layer3) return feat_m def get_bn_before_relu(self): raise NotImplementedError('ShuffleNetV2 currently is not supported for "Overhaul" teacher') def forward(self, x, is_feat=False, preact=False): out = F.relu(self.bn1(self.conv1(x))) # out = F.max_pool2d(out, 3, stride=2, padding=1) f0 = out out, f1_pre = self.layer1(out) f1 = out out, f2_pre = self.layer2(out) f2 = out out, f3_pre = self.layer3(out) f3 = out out = F.relu(self.bn2(self.conv2(out))) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) f4 = out out = self.linear(out) if is_feat: if preact: return [f0, f1_pre, f2_pre, f3_pre, f4], out else: return [f0, f1, f2, f3, f4], out else: return out configs = { 0.2: { 'out_channels': (40, 80, 160, 512), 'num_blocks': (3, 3, 3) }, 0.3: { 'out_channels': (40, 80, 160, 512), 'num_blocks': (3, 7, 3) }, 0.5: { 'out_channels': (48, 96, 192, 1024), 'num_blocks': (3, 7, 3) }, 1: { 'out_channels': (116, 232, 464, 1024), 'num_blocks': (3, 7, 3) }, 1.5: { 'out_channels': (176, 352, 704, 1024), 'num_blocks': (3, 7, 3) }, 2: { 'out_channels': (224, 488, 976, 2048), 'num_blocks': (3, 7, 3) } } def ShuffleV2(**kwargs): model = ShuffleNetV2(net_size=1, **kwargs) return model if __name__ == '__main__': net = ShuffleV2(num_classes=100) x = torch.randn(3, 3, 32, 32) import time a = time.time() feats, logit = net(x, is_feat=True, preact=True) b = time.time() print(b - a) for f in feats: print(f.shape, f.min().item()) print(logit.shape)
7,074
32.530806
107
py
UNIXKD
UNIXKD-master/models/__init__.py
from .resnet import resnet8, resnet14, resnet20, resnet32, resnet44, resnet56, resnet110, resnet8x4, resnet32x4, resnet14x4 from .resnetv2 import ResNet18, ResNet34, ResNet50 from .wrn import wrn_16_1, wrn_16_2, wrn_40_1, wrn_40_2 from .vgg import vgg19_bn, vgg16_bn, vgg13_bn, vgg11_bn, vgg8_bn from .mobilenetv2 import mobile_half from .ShuffleNetv1 import ShuffleV1 from .ShuffleNetv2 import ShuffleV2 model_dict = { 'resnet8': resnet8, 'resnet14': resnet14, 'resnet20': resnet20, 'resnet32': resnet32, 'resnet44': resnet44, 'resnet56': resnet56, 'resnet110': resnet110, 'resnet8x4': resnet8x4, 'resnet14x4': resnet14x4, 'resnet32x4': resnet32x4, 'ResNet18': ResNet18, 'ResNet34': ResNet34, 'ResNet50': ResNet50, 'wrn_16_1': wrn_16_1, 'wrn_16_2': wrn_16_2, 'wrn_40_1': wrn_40_1, 'wrn_40_2': wrn_40_2, 'vgg8': vgg8_bn, 'vgg11': vgg11_bn, 'vgg13': vgg13_bn, 'vgg16': vgg16_bn, 'vgg19': vgg19_bn, 'MobileNetV2': mobile_half, 'ShuffleV1': ShuffleV1, 'ShuffleV2': ShuffleV2, }
1,076
28.916667
123
py
UNIXKD
UNIXKD-master/models/wrn.py
import math import torch import torch.nn as nn import torch.nn.functional as F """ Original Author: Wei Yang """ __all__ = ['wrn'] class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False) or None def forward(self, x): if not self.equalInOut: x = self.relu1(self.bn1(x)) else: out = self.relu1(self.bn1(x)) out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x))) if self.droprate > 0: out = F.dropout(out, p=self.droprate, training=self.training) out = self.conv2(out) return torch.add(x if self.equalInOut else self.convShortcut(x), out) class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0): super(NetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate): layers = [] for i in range(nb_layers): layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x) class WideResNet(nn.Module): def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0): super(WideResNet, self).__init__() nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor] assert (depth - 4) % 6 == 0, 'depth should be 6n+4' n = (depth - 4) // 6 block = BasicBlock # 1st conv before any network block self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) # 1st block self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) # 2nd block self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) # 3rd block self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) # global average pooling and classifier self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def get_feat_modules(self): feat_m = nn.ModuleList([]) feat_m.append(self.conv1) feat_m.append(self.block1) feat_m.append(self.block2) feat_m.append(self.block3) return feat_m def get_bn_before_relu(self): bn1 = self.block2.layer[0].bn1 bn2 = self.block3.layer[0].bn1 bn3 = self.bn1 return [bn1, bn2, bn3] def forward(self, x, is_feat=False, preact=False): out = self.conv1(x) f0 = out out = self.block1(out) f1 = out out = self.block2(out) f2 = out out = self.block3(out) f3 = out out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 8) out = out.view(-1, self.nChannels) f4 = out out = self.fc(out) if is_feat: if preact: f1 = self.block2.layer[0].bn1(f1) f2 = self.block3.layer[0].bn1(f2) f3 = self.bn1(f3) return [f0, f1, f2, f3, f4], out else: return out def wrn(**kwargs): """ Constructs a Wide Residual Networks. """ model = WideResNet(**kwargs) return model def wrn_40_2(**kwargs): model = WideResNet(depth=40, widen_factor=2, **kwargs) return model def wrn_40_1(**kwargs): model = WideResNet(depth=40, widen_factor=1, **kwargs) return model def wrn_16_2(**kwargs): model = WideResNet(depth=16, widen_factor=2, **kwargs) return model def wrn_16_1(**kwargs): model = WideResNet(depth=16, widen_factor=1, **kwargs) return model if __name__ == '__main__': import torch x = torch.randn(2, 3, 32, 32) net = wrn_40_2(num_classes=100) feats, logit = net(x, is_feat=True, preact=True) for f in feats: print(f.shape, f.min().item()) print(logit.shape) for m in net.get_bn_before_relu(): if isinstance(m, nn.BatchNorm2d): print('pass') else: print('warning')
5,519
31.280702
116
py
GraphLIME
GraphLIME-master/setup.py
import setuptools setuptools.setup( name="graphlime", version="1.2.0", author="williamcchuang", author_email="hang6318179@gmail.com", description="A package", # long_description=long_description, # long_description_content_type="text/markdown", url="https://github.com/WilliamCCHuang/GraphLIME", packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', )
570
26.190476
54
py
GraphLIME
GraphLIME-master/graphlime/__init__.py
__version__ = '1.2.0' __all__ = [ 'GraphLIME' ] import numpy as np from sklearn.linear_model import LassoLars import torch from torch_geometric.nn import MessagePassing from torch_geometric.utils import k_hop_subgraph class GraphLIME: def __init__(self, model, hop=2, rho=0.1, cached=True): self.hop = hop self.rho = rho self.model = model self.cached = cached self.cached_result = None self.model.eval() def __flow__(self): for module in self.model.modules(): if isinstance(module, MessagePassing): return module.flow return 'source_to_target' def __subgraph__(self, node_idx, x, y, edge_index, **kwargs): num_nodes, num_edges = x.size(0), edge_index.size(1) subset, edge_index, mapping, edge_mask = k_hop_subgraph( node_idx, self.hop, edge_index, relabel_nodes=True, num_nodes=num_nodes, flow=self.__flow__()) x = x[subset] y = y[subset] for key, item in kwargs.items(): if torch.is_tensor(item) and item.size(0) == num_nodes: item = item[subset] elif torch.is_tensor(item) and item.size(0) == num_edges: item = item[edge_mask] kwargs[key] = item return x, y, edge_index, mapping, edge_mask, kwargs def __init_predict__(self, x, edge_index, **kwargs): if self.cached and self.cached_result is not None: if x.size(0) != self.cached_result.size(0): raise RuntimeError( 'Cached {} number of nodes, but found {}.'.format( x.size(0), self.cached_result.size(0))) # get the initial prediction if not self.cached or self.cached_result is None: with torch.no_grad(): log_logits = self.model(x=x, edge_index=edge_index, **kwargs) probas = log_logits.exp() self.cached_result = probas return self.cached_result def __compute_kernel__(self, x, reduce): assert x.ndim == 2, x.shape n, d = x.shape dist = x.reshape(1, n, d) - x.reshape(n, 1, d) # (n, n, d) dist = dist ** 2 if reduce: dist = np.sum(dist, axis=-1, keepdims=True) # (n, n, 1) std = np.sqrt(d) K = np.exp(-dist / (2 * std ** 2 * 0.1 + 1e-10)) # (n, n, 1) or (n, n, d) return K def __compute_gram_matrix__(self, x): # unstable implementation due to matrix product (HxH) # n = x.shape[0] # H = np.eye(n, dtype=np.float) - 1.0 / n * np.ones(n, dtype=np.float) # G = np.dot(np.dot(H, x), H) # more stable and accurate implementation G = x - np.mean(x, axis=0, keepdims=True) G = G - np.mean(G, axis=1, keepdims=True) G = G / (np.linalg.norm(G, ord='fro', axis=(0, 1), keepdims=True) + 1e-10) return G def explain_node(self, node_idx, x, edge_index, **kwargs): probas = self.__init_predict__(x, edge_index, **kwargs) x, probas, _, _, _, _ = self.__subgraph__( node_idx, x, probas, edge_index, **kwargs) x = x.detach().cpu().numpy() # (n, d) y = probas.detach().cpu().numpy() # (n, classes) n, d = x.shape K = self.__compute_kernel__(x, reduce=False) # (n, n, d) L = self.__compute_kernel__(y, reduce=True) # (n, n, 1) K_bar = self.__compute_gram_matrix__(K) # (n, n, d) L_bar = self.__compute_gram_matrix__(L) # (n, n, 1) K_bar = K_bar.reshape(n ** 2, d) # (n ** 2, d) L_bar = L_bar.reshape(n ** 2,) # (n ** 2,) solver = LassoLars(self.rho, fit_intercept=False, normalize=False, positive=True) solver.fit(K_bar * n, L_bar * n) return solver.coef_
3,882
29.81746
89
py
GraphLIME
GraphLIME-master/exp/noise_features/other_explainers.py
import copy import numpy as np from tqdm import tqdm from sklearn.linear_model import Ridge import torch class LIME: def __init__(self, model, num_samples, cached=True): self.model = model self.num_samples = num_samples self.cached = cached self.cached_result = None self.model.eval() def __init_predict__(self, x, edge_index, **kwargs): if self.cached and self.cached_result is not None: if x.size(0) != self.cached_result.size(0): raise RuntimeError( 'Cached {} number of nodes, but found {}.'.format( x.size(0), self.cached_result.size(0))) if not self.cached or self.cached_result is None: # get the initial prediction with torch.no_grad(): log_logits = self.model(x=x, edge_index=edge_index, **kwargs) probas = log_logits.exp() self.cached_result = probas return self.cached_result def explain_node(self, node_idx, x, edge_index, **kwargs): probas = self.__init_predict__(x, edge_index, **kwargs) proba, label = probas[node_idx, :].max(dim=0) x_ = copy.deepcopy(x) original_feats = x[node_idx, :] sample_x = [original_feats.detach().cpu().numpy()] sample_y = [proba.item()] for _ in tqdm(range(self.num_samples), desc='collect samples', leave=False): x_[node_idx, :] = original_feats + torch.randn_like(original_feats) with torch.no_grad(): log_logits = self.model(x=x_, edge_index=edge_index, **kwargs) probas_ = log_logits.exp() proba_ = probas_[node_idx, label] sample_x.append(x_[node_idx, :].detach().cpu().numpy()) sample_y.append(proba_.item()) sample_x = np.array(sample_x) sample_y = np.array(sample_y) solver = Ridge(alpha=0.1) solver.fit(sample_x, sample_y) return solver.coef_ class Greedy: def __init__(self, model, cached=True): self.model = model self.cached = cached self.cached_result = None self.model.eval() def __init_predict__(self, x, edge_index, **kwargs): if self.cached and self.cached_result is not None: if x.size(0) != self.cached_result.size(0): raise RuntimeError( 'Cached {} number of nodes, but found {}.'.format( x.size(0), self.cached_result.size(0))) if not self.cached or self.cached_result is None: # get the initial prediction with torch.no_grad(): log_logits = self.model(x=x, edge_index=edge_index, **kwargs) probas = log_logits.exp() self.cached_result = probas return self.cached_result def explain_node(self, node_idices, x, edge_index, **kwargs): if isinstance(node_idices, int): node_idices = [node_idices] probas = self.__init_predict__(x, edge_index, **kwargs) probas, labels = probas[node_idices, :].max(dim=1) # (m,), (m,) num_nodes, num_feats = len(node_idices), x.size(1) delta_probas = np.zeros((num_nodes, num_feats)) # (m, #feats) self.model.eval() for feat_idx in tqdm(range(num_feats), desc='search features', leave=False): x_ = copy.deepcopy(x) x_[:, feat_idx] = 0.0 with torch.no_grad(): log_logits = self.model(x=x_, edge_index=edge_index, **kwargs) probas_ = log_logits.exp() probas_ = probas_[node_idices, :] # (m, #classes) for node_idx in range(num_nodes): proba = probas[node_idx].item() label = labels[node_idx] proba_ = probas_[node_idx, label].item() delta_probas[node_idx, feat_idx] = abs((proba - proba_) / proba) return delta_probas class Random: def __init__(self, num_feats, K): self.num_feats = num_feats self.K = K def explain_node(self): return np.random.choice(self.num_feats, self.K)
4,242
30.902256
84
py
GraphLIME
GraphLIME-master/exp/noise_features/exp_noise_features.py
from os import sys, path as osp sys.path.append(osp.dirname(osp.dirname(osp.dirname(__file__)))) import random import argparse import warnings import numpy as np from tqdm import tqdm import matplotlib.pyplot as plt import torch from torch_geometric.nn import GNNExplainer from models import GAT from graphlime import GraphLIME from other_explainers import LIME, Greedy, Random from utils import prepare_data, extract_test_nodes, train, evaluate, plot_dist warnings.filterwarnings('ignore') INPUT_DIM = { 'Cora': 1433, 'Pubmed': 500 } DIRNAME = osp.dirname(__file__) def build_args(): parser = argparse.ArgumentParser() # data parser.add_argument('--dataset', type=str, default='Cora', help='dataset') parser.add_argument('--model_epochs', type=int, default=400, help='epochs for training a GNN model') parser.add_argument('--model_lr', type=float, default=0.001, help='learning rate for training model') parser.add_argument('--test_samples', type=int, default=200, help='number of test samples') parser.add_argument('--num_noise', type=int, default=10, help='number of noise features to add') # GraphLIME parser.add_argument('--hop', type=int, default=2, help='hops') parser.add_argument('--rho', type=float, default=0.15, help='rho') parser.add_argument('--K', type=int, default=300, help='top-K most importance features') # GNNExplainer parser.add_argument('--masks_epochs', type=int, default=200, help='epochs for training a GNNExplainer') parser.add_argument('--masks_lr', type=float, default=0.01, help='learning rate for training GNNExplainer') parser.add_argument('--masks_threshold', type=float, default=0.1, help='threshold of features for GNNExplainer') # LIME parser.add_argument('--lime_samples', type=int, default=50, help='generate samples for LIME') # Greedy parser.add_argument('--greedy_threshold', type=float, default=0.03, help='threshold of features for Greedy') parser.add_argument('--ymax', type=float, default=1.10, help='max of y-axis') parser.add_argument('--seed', type=int, default=42, help='seed') args = parser.parse_args() return args def check_args(args): assert args.dataset.title() in ['Cora', 'Pubmed'] def fix_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True def find_noise_feats_by_GraphLIME(model, data, args): explainer = GraphLIME(model, hop=args.hop, rho=args.rho) node_indices = extract_test_nodes(data, args.test_samples) num_noise_feats = [] for node_idx in tqdm(node_indices, desc='explain node', leave=False): coefs = explainer.explain_node(node_idx, data.x, data.edge_index) feat_indices = coefs.argsort()[-args.K:] feat_indices = [idx for idx in feat_indices if coefs[idx] > 0.0] num_noise_feat = sum(idx >= INPUT_DIM[args.dataset] for idx in feat_indices) num_noise_feats.append(num_noise_feat) return num_noise_feats def find_noise_feats_by_GNNExplainer(model, data, args): explainer = GNNExplainer(model, epochs=args.masks_epochs, lr=args.masks_lr, num_hops=args.hop, log=False) node_indices = extract_test_nodes(data, args.test_samples) num_noise_feats = [] for node_idx in tqdm(node_indices, desc='explain node', leave=False): node_feat_mask, edge_mask = explainer.explain_node(node_idx, data.x, data.edge_index) node_feat_mask = node_feat_mask.detach().cpu().numpy() feat_indices = node_feat_mask.argsort()[-args.K:] feat_indices = [idx for idx in feat_indices if node_feat_mask[idx] > args.masks_threshold] num_noise_feat = sum(idx >= INPUT_DIM[args.dataset] for idx in feat_indices) num_noise_feats.append(num_noise_feat) return num_noise_feats def find_noise_feats_by_LIME(model, data, args): explainer = LIME(model, args.lime_samples) node_indices = extract_test_nodes(data, args.test_samples) num_noise_feats = [] for node_idx in tqdm(node_indices, desc='explain node', leave=False): coefs = explainer.explain_node(node_idx, data.x, data.edge_index) coefs = np.abs(coefs) feat_indices = coefs.argsort()[-args.K:] num_noise_feat = sum(idx >= INPUT_DIM[args.dataset] for idx in feat_indices) num_noise_feats.append(num_noise_feat) return num_noise_feats def find_noise_feats_by_greedy(model, data, args): explainer = Greedy(model) node_indices = extract_test_nodes(data, args.test_samples) delta_probas = explainer.explain_node(node_indices, data.x, data.edge_index) # (#test_smaples, #feats) feat_indices = delta_probas.argsort(axis=-1)[:, -args.K:] # (#test_smaples, K) num_noise_feats = [] for node_proba, node_feat_indices in zip(delta_probas, feat_indices): node_feat_indices = [feat_idx for feat_idx in node_feat_indices if node_proba[feat_idx] > args.greedy_threshold] num_noise_feat = sum(feat_idx >= INPUT_DIM[args.dataset] for feat_idx in node_feat_indices) num_noise_feats.append(num_noise_feat) return num_noise_feats def find_noise_feats_by_random(data, args): num_feats = data.x.size(1) explainer = Random(num_feats, args.K) num_noise_feats = [] for node_idx in tqdm(range(args.test_samples), desc='explain node', leave=False): feat_indices = explainer.explain_node() noise_feat = (feat_indices >= INPUT_DIM[args.dataset]).sum() num_noise_feats.append(noise_feat) return num_noise_feats def main(): args = build_args() check_args(args) fix_seed(args.seed) data = prepare_data(args) hparams = { 'input_dim': data.x.size(1), 'hidden_dim': 16, 'output_dim': max(data.y).item() + 1 } device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') model = GAT(**hparams).to(device) data = data.to(device) train(model, data, args) # model.load_state_dict(torch.load('./examples/noise_features/model.pth')) test_loss, test_acc = evaluate(model, data, mask=data.test_mask) print('test_loss: {:.4f}, test_acc: {:.4f}'.format(test_loss, test_acc)) if test_acc < 0.8: print('bad model! Please re-run!') exit() print('=== Explain by GraphLIME ===') noise_feats = find_noise_feats_by_GraphLIME(model, data, args) plot_dist(noise_feats, label='GraphLIME', ymax=args.ymax, color='g') print('=== Explain by GNNExplainer ===') noise_feats = find_noise_feats_by_GNNExplainer(model, data, args) plot_dist(noise_feats, label='GNNExplainer', ymax=args.ymax, color='r') print('=== Explain by LIME ===') noise_feats = find_noise_feats_by_LIME(model, data, args) plot_dist(noise_feats, label='LIME', ymax=args.ymax, color='C0') print('=== Explain by Greedy ===') noise_feats = find_noise_feats_by_greedy(model, data, args) plot_dist(noise_feats, label='Greedy', ymax=args.ymax, color='orange') print('=== Explain by Random ===') noise_feats = find_noise_feats_by_random(data, args) plot_dist(noise_feats, label='Random', ymax=args.ymax, color='k', title=f'Distribution of noisy features on {args.dataset} for {model.__class__.__name__}', save_path=f'{DIRNAME}/results/{args.dataset.lower()}.png') plt.show() if __name__ == "__main__": main()
7,487
33.827907
120
py