id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
227,900
gwastro/pycbc
pycbc/fft/fftw_pruned.py
pruned_c2cifft
def pruned_c2cifft(invec, outvec, indices, pretransposed=False): """ Perform a pruned iFFT, only valid for power of 2 iffts as the decomposition is easier to choose. This is not a strict requirement of the functions, but it is unlikely to the optimal to use anything but power of 2. (Alex to provide more details in write up. Parameters ----------- invec : array The input vector. This should be the correlation between the data and the template at full sample rate. Ideally this is pre-transposed, but if not this will be transposed in this function. outvec : array The output of the first phase of the pruned FFT. indices : array of ints The indexes at which to calculate the full sample-rate SNR. pretransposed : boolean, default=False Used to indicate whether or not invec is pretransposed. Returns -------- SNRs : array The complex SNRs at the indexes given by indices. """ N1, N2 = splay(invec) if not pretransposed: invec = fft_transpose(invec) first_phase(invec, outvec, N1=N1, N2=N2) out = fast_second_phase(outvec, indices, N1=N1, N2=N2) return out
python
def pruned_c2cifft(invec, outvec, indices, pretransposed=False): N1, N2 = splay(invec) if not pretransposed: invec = fft_transpose(invec) first_phase(invec, outvec, N1=N1, N2=N2) out = fast_second_phase(outvec, indices, N1=N1, N2=N2) return out
[ "def", "pruned_c2cifft", "(", "invec", ",", "outvec", ",", "indices", ",", "pretransposed", "=", "False", ")", ":", "N1", ",", "N2", "=", "splay", "(", "invec", ")", "if", "not", "pretransposed", ":", "invec", "=", "fft_transpose", "(", "invec", ")", "...
Perform a pruned iFFT, only valid for power of 2 iffts as the decomposition is easier to choose. This is not a strict requirement of the functions, but it is unlikely to the optimal to use anything but power of 2. (Alex to provide more details in write up. Parameters ----------- invec : array The input vector. This should be the correlation between the data and the template at full sample rate. Ideally this is pre-transposed, but if not this will be transposed in this function. outvec : array The output of the first phase of the pruned FFT. indices : array of ints The indexes at which to calculate the full sample-rate SNR. pretransposed : boolean, default=False Used to indicate whether or not invec is pretransposed. Returns -------- SNRs : array The complex SNRs at the indexes given by indices.
[ "Perform", "a", "pruned", "iFFT", "only", "valid", "for", "power", "of", "2", "iffts", "as", "the", "decomposition", "is", "easier", "to", "choose", ".", "This", "is", "not", "a", "strict", "requirement", "of", "the", "functions", "but", "it", "is", "unl...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/fft/fftw_pruned.py#L281-L312
227,901
gwastro/pycbc
pycbc/waveform/sinegauss.py
fd_sine_gaussian
def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f): """ Generate a Fourier domain sine-Gaussian Parameters ---------- amp: float Amplitude of the sine-Gaussian quality: float The quality factor central_frequency: float The central frequency of the sine-Gaussian fmin: float The minimum frequency to generate the sine-Gaussian. This determines the length of the output vector. fmax: float The maximum frequency to generate the sine-Gaussian delta_f: float The size of the frequency step Returns ------- sg: pycbc.types.Frequencyseries A Fourier domain sine-Gaussian """ kmin = int(round(fmin / delta_f)) kmax = int(round(fmax / delta_f)) f = numpy.arange(kmin, kmax) * delta_f tau = quality / 2 / numpy.pi / central_frequency A = amp * numpy.pi ** 0.5 / 2 * tau d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0) d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency)) v = numpy.zeros(kmax, dtype=numpy.complex128) v[kmin:kmax] = d[:] return pycbc.types.FrequencySeries(v, delta_f=delta_f)
python
def fd_sine_gaussian(amp, quality, central_frequency, fmin, fmax, delta_f): kmin = int(round(fmin / delta_f)) kmax = int(round(fmax / delta_f)) f = numpy.arange(kmin, kmax) * delta_f tau = quality / 2 / numpy.pi / central_frequency A = amp * numpy.pi ** 0.5 / 2 * tau d = A * numpy.exp(-(numpy.pi * tau * (f - central_frequency))**2.0) d *= (1 + numpy.exp(-quality ** 2.0 * f / central_frequency)) v = numpy.zeros(kmax, dtype=numpy.complex128) v[kmin:kmax] = d[:] return pycbc.types.FrequencySeries(v, delta_f=delta_f)
[ "def", "fd_sine_gaussian", "(", "amp", ",", "quality", ",", "central_frequency", ",", "fmin", ",", "fmax", ",", "delta_f", ")", ":", "kmin", "=", "int", "(", "round", "(", "fmin", "/", "delta_f", ")", ")", "kmax", "=", "int", "(", "round", "(", "fmax...
Generate a Fourier domain sine-Gaussian Parameters ---------- amp: float Amplitude of the sine-Gaussian quality: float The quality factor central_frequency: float The central frequency of the sine-Gaussian fmin: float The minimum frequency to generate the sine-Gaussian. This determines the length of the output vector. fmax: float The maximum frequency to generate the sine-Gaussian delta_f: float The size of the frequency step Returns ------- sg: pycbc.types.Frequencyseries A Fourier domain sine-Gaussian
[ "Generate", "a", "Fourier", "domain", "sine", "-", "Gaussian" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/sinegauss.py#L7-L40
227,902
gwastro/pycbc
pycbc/results/followup.py
columns_from_file_list
def columns_from_file_list(file_list, columns, ifo, start, end): """ Return columns of information stored in single detector trigger files. Parameters ---------- file_list_file : string pickle file containing the list of single detector triggers. ifo : string The ifo to return triggers for. columns : list of strings The list of columns to read from the trigger files. start : int The start time to get triggers from end : int The end time to get triggers from Returns ------- trigger_dict : dict A dictionary of column vectors with column names as keys. """ file_list = file_list.find_output_with_ifo(ifo) file_list = file_list.find_all_output_in_range(ifo, segment(start, end)) trig_dict = {} for trig_file in file_list: f = h5py.File(trig_file.storage_path, 'r') time = f['end_time'][:] pick = numpy.logical_and(time < end, time > start) pick_loc = numpy.where(pick)[0] for col in columns: if col not in trig_dict: trig_dict[col] = [] trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]]) return trig_dict
python
def columns_from_file_list(file_list, columns, ifo, start, end): file_list = file_list.find_output_with_ifo(ifo) file_list = file_list.find_all_output_in_range(ifo, segment(start, end)) trig_dict = {} for trig_file in file_list: f = h5py.File(trig_file.storage_path, 'r') time = f['end_time'][:] pick = numpy.logical_and(time < end, time > start) pick_loc = numpy.where(pick)[0] for col in columns: if col not in trig_dict: trig_dict[col] = [] trig_dict[col] = numpy.concatenate([trig_dict[col], f[col][:][pick_loc]]) return trig_dict
[ "def", "columns_from_file_list", "(", "file_list", ",", "columns", ",", "ifo", ",", "start", ",", "end", ")", ":", "file_list", "=", "file_list", ".", "find_output_with_ifo", "(", "ifo", ")", "file_list", "=", "file_list", ".", "find_all_output_in_range", "(", ...
Return columns of information stored in single detector trigger files. Parameters ---------- file_list_file : string pickle file containing the list of single detector triggers. ifo : string The ifo to return triggers for. columns : list of strings The list of columns to read from the trigger files. start : int The start time to get triggers from end : int The end time to get triggers from Returns ------- trigger_dict : dict A dictionary of column vectors with column names as keys.
[ "Return", "columns", "of", "information", "stored", "in", "single", "detector", "trigger", "files", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/followup.py#L39-L78
227,903
gwastro/pycbc
tools/timing/banksim/banksim.py
make_padded_frequency_series
def make_padded_frequency_series(vec,filter_N=None): """Pad a TimeSeries with a length of zeros greater than its length, such that the total length is the closest power of 2. This prevents the effects of wraparound. """ if filter_N is None: power = ceil(log(len(vec),2))+1 N = 2 ** power else: N = filter_N n = N/2+1 if isinstance(vec,FrequencySeries): vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), delta_f=1.0,copy=False) if len(vectilde) < len(vec): cplen = len(vectilde) else: cplen = len(vec) vectilde[0:cplen] = vec[0:cplen] delta_f = vec.delta_f if isinstance(vec,TimeSeries): vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t, dtype=real_same_precision_as(vec)) vec_pad[0:len(vec)] = vec delta_f = 1.0/(vec.delta_t*N) vectilde = FrequencySeries(zeros(n),delta_f=1.0, dtype=complex_same_precision_as(vec)) fft(vec_pad,vectilde) vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64) return vectilde
python
def make_padded_frequency_series(vec,filter_N=None): if filter_N is None: power = ceil(log(len(vec),2))+1 N = 2 ** power else: N = filter_N n = N/2+1 if isinstance(vec,FrequencySeries): vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)), delta_f=1.0,copy=False) if len(vectilde) < len(vec): cplen = len(vectilde) else: cplen = len(vec) vectilde[0:cplen] = vec[0:cplen] delta_f = vec.delta_f if isinstance(vec,TimeSeries): vec_pad = TimeSeries(zeros(N),delta_t=vec.delta_t, dtype=real_same_precision_as(vec)) vec_pad[0:len(vec)] = vec delta_f = 1.0/(vec.delta_t*N) vectilde = FrequencySeries(zeros(n),delta_f=1.0, dtype=complex_same_precision_as(vec)) fft(vec_pad,vectilde) vectilde = FrequencySeries(vectilde * DYN_RANGE_FAC,delta_f=delta_f,dtype=complex64) return vectilde
[ "def", "make_padded_frequency_series", "(", "vec", ",", "filter_N", "=", "None", ")", ":", "if", "filter_N", "is", "None", ":", "power", "=", "ceil", "(", "log", "(", "len", "(", "vec", ")", ",", "2", ")", ")", "+", "1", "N", "=", "2", "**", "pow...
Pad a TimeSeries with a length of zeros greater than its length, such that the total length is the closest power of 2. This prevents the effects of wraparound.
[ "Pad", "a", "TimeSeries", "with", "a", "length", "of", "zeros", "greater", "than", "its", "length", "such", "that", "the", "total", "length", "is", "the", "closest", "power", "of", "2", ".", "This", "prevents", "the", "effects", "of", "wraparound", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/tools/timing/banksim/banksim.py#L75-L109
227,904
gwastro/pycbc
pycbc/scheme.py
insert_processing_option_group
def insert_processing_option_group(parser): """ Adds the options used to choose a processing scheme. This should be used if your program supports the ability to select the processing scheme. Parameters ---------- parser : object OptionParser instance """ processing_group = parser.add_argument_group("Options for selecting the" " processing scheme in this program.") processing_group.add_argument("--processing-scheme", help="The choice of processing scheme. " "Choices are " + str(list(set(scheme_prefix.values()))) + ". (optional for CPU scheme) The number of " "execution threads " "can be indicated by cpu:NUM_THREADS, " "where NUM_THREADS " "is an integer. The default is a single thread. " "If the scheme is provided as cpu:env, the number " "of threads can be provided by the PYCBC_NUM_THREADS " "environment variable. If the environment variable " "is not set, the number of threads matches the number " "of logical cores. ", default="cpu") processing_group.add_argument("--processing-device-id", help="(optional) ID of GPU to use for accelerated " "processing", default=0, type=int)
python
def insert_processing_option_group(parser): processing_group = parser.add_argument_group("Options for selecting the" " processing scheme in this program.") processing_group.add_argument("--processing-scheme", help="The choice of processing scheme. " "Choices are " + str(list(set(scheme_prefix.values()))) + ". (optional for CPU scheme) The number of " "execution threads " "can be indicated by cpu:NUM_THREADS, " "where NUM_THREADS " "is an integer. The default is a single thread. " "If the scheme is provided as cpu:env, the number " "of threads can be provided by the PYCBC_NUM_THREADS " "environment variable. If the environment variable " "is not set, the number of threads matches the number " "of logical cores. ", default="cpu") processing_group.add_argument("--processing-device-id", help="(optional) ID of GPU to use for accelerated " "processing", default=0, type=int)
[ "def", "insert_processing_option_group", "(", "parser", ")", ":", "processing_group", "=", "parser", ".", "add_argument_group", "(", "\"Options for selecting the\"", "\" processing scheme in this program.\"", ")", "processing_group", ".", "add_argument", "(", "\"--processing-sc...
Adds the options used to choose a processing scheme. This should be used if your program supports the ability to select the processing scheme. Parameters ---------- parser : object OptionParser instance
[ "Adds", "the", "options", "used", "to", "choose", "a", "processing", "scheme", ".", "This", "should", "be", "used", "if", "your", "program", "supports", "the", "ability", "to", "select", "the", "processing", "scheme", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/scheme.py#L219-L249
227,905
gwastro/pycbc
pycbc/scheme.py
from_cli
def from_cli(opt): """Parses the command line options and returns a precessing scheme. Parameters ---------- opt: object Result of parsing the CLI with OptionParser, or any object with the required attributes. Returns ------- ctx: Scheme Returns the requested processing scheme. """ scheme_str = opt.processing_scheme.split(':') name = scheme_str[0] if name == "cuda": logging.info("Running with CUDA support") ctx = CUDAScheme(opt.processing_device_id) elif name == "mkl": if len(scheme_str) > 1: numt = scheme_str[1] if numt.isdigit(): numt = int(numt) ctx = MKLScheme(num_threads=numt) else: ctx = MKLScheme() logging.info("Running with MKL support: %s threads" % ctx.num_threads) else: if len(scheme_str) > 1: numt = scheme_str[1] if numt.isdigit(): numt = int(numt) ctx = CPUScheme(num_threads=numt) else: ctx = CPUScheme() logging.info("Running with CPU support: %s threads" % ctx.num_threads) return ctx
python
def from_cli(opt): scheme_str = opt.processing_scheme.split(':') name = scheme_str[0] if name == "cuda": logging.info("Running with CUDA support") ctx = CUDAScheme(opt.processing_device_id) elif name == "mkl": if len(scheme_str) > 1: numt = scheme_str[1] if numt.isdigit(): numt = int(numt) ctx = MKLScheme(num_threads=numt) else: ctx = MKLScheme() logging.info("Running with MKL support: %s threads" % ctx.num_threads) else: if len(scheme_str) > 1: numt = scheme_str[1] if numt.isdigit(): numt = int(numt) ctx = CPUScheme(num_threads=numt) else: ctx = CPUScheme() logging.info("Running with CPU support: %s threads" % ctx.num_threads) return ctx
[ "def", "from_cli", "(", "opt", ")", ":", "scheme_str", "=", "opt", ".", "processing_scheme", ".", "split", "(", "':'", ")", "name", "=", "scheme_str", "[", "0", "]", "if", "name", "==", "\"cuda\"", ":", "logging", ".", "info", "(", "\"Running with CUDA s...
Parses the command line options and returns a precessing scheme. Parameters ---------- opt: object Result of parsing the CLI with OptionParser, or any object with the required attributes. Returns ------- ctx: Scheme Returns the requested processing scheme.
[ "Parses", "the", "command", "line", "options", "and", "returns", "a", "precessing", "scheme", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/scheme.py#L251-L289
227,906
gwastro/pycbc
pycbc/scheme.py
verify_processing_options
def verify_processing_options(opt, parser): """Parses the processing scheme options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance. """ scheme_types = scheme_prefix.values() if opt.processing_scheme.split(':')[0] not in scheme_types: parser.error("(%s) is not a valid scheme type.")
python
def verify_processing_options(opt, parser): scheme_types = scheme_prefix.values() if opt.processing_scheme.split(':')[0] not in scheme_types: parser.error("(%s) is not a valid scheme type.")
[ "def", "verify_processing_options", "(", "opt", ",", "parser", ")", ":", "scheme_types", "=", "scheme_prefix", ".", "values", "(", ")", "if", "opt", ".", "processing_scheme", ".", "split", "(", "':'", ")", "[", "0", "]", "not", "in", "scheme_types", ":", ...
Parses the processing scheme options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance.
[ "Parses", "the", "processing", "scheme", "options", "and", "verifies", "that", "they", "are", "reasonable", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/scheme.py#L291-L306
227,907
gwastro/pycbc
pycbc/tmpltbank/bank_output_utils.py
convert_to_sngl_inspiral_table
def convert_to_sngl_inspiral_table(params, proc_id): ''' Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral table with mass and spin parameters populated and event IDs assigned Parameters ----------- params : iterable Each entry in the params iterable should be a sequence of [mass1, mass2, spin1z, spin2z] in that order proc_id : ilwd char Process ID to add to each row of the sngl_inspiral table Returns ---------- SnglInspiralTable Bank of templates in SnglInspiralTable format ''' sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) col_names = ['mass1','mass2','spin1z','spin2z'] for values in params: tmplt = return_empty_sngl() tmplt.process_id = proc_id for colname, value in zip(col_names, values): setattr(tmplt, colname, value) tmplt.mtotal, tmplt.eta = pnutils.mass1_mass2_to_mtotal_eta( tmplt.mass1, tmplt.mass2) tmplt.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( tmplt.mass1, tmplt.mass2) tmplt.template_duration = 0 # FIXME tmplt.event_id = sngl_inspiral_table.get_next_id() sngl_inspiral_table.append(tmplt) return sngl_inspiral_table
python
def convert_to_sngl_inspiral_table(params, proc_id): ''' Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral table with mass and spin parameters populated and event IDs assigned Parameters ----------- params : iterable Each entry in the params iterable should be a sequence of [mass1, mass2, spin1z, spin2z] in that order proc_id : ilwd char Process ID to add to each row of the sngl_inspiral table Returns ---------- SnglInspiralTable Bank of templates in SnglInspiralTable format ''' sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) col_names = ['mass1','mass2','spin1z','spin2z'] for values in params: tmplt = return_empty_sngl() tmplt.process_id = proc_id for colname, value in zip(col_names, values): setattr(tmplt, colname, value) tmplt.mtotal, tmplt.eta = pnutils.mass1_mass2_to_mtotal_eta( tmplt.mass1, tmplt.mass2) tmplt.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( tmplt.mass1, tmplt.mass2) tmplt.template_duration = 0 # FIXME tmplt.event_id = sngl_inspiral_table.get_next_id() sngl_inspiral_table.append(tmplt) return sngl_inspiral_table
[ "def", "convert_to_sngl_inspiral_table", "(", "params", ",", "proc_id", ")", ":", "sngl_inspiral_table", "=", "lsctables", ".", "New", "(", "lsctables", ".", "SnglInspiralTable", ")", "col_names", "=", "[", "'mass1'", ",", "'mass2'", ",", "'spin1z'", ",", "'spin...
Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral table with mass and spin parameters populated and event IDs assigned Parameters ----------- params : iterable Each entry in the params iterable should be a sequence of [mass1, mass2, spin1z, spin2z] in that order proc_id : ilwd char Process ID to add to each row of the sngl_inspiral table Returns ---------- SnglInspiralTable Bank of templates in SnglInspiralTable format
[ "Convert", "a", "list", "of", "m1", "m2", "spin1z", "spin2z", "values", "into", "a", "basic", "sngl_inspiral", "table", "with", "mass", "and", "spin", "parameters", "populated", "and", "event", "IDs", "assigned" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/bank_output_utils.py#L102-L137
227,908
gwastro/pycbc
pycbc/tmpltbank/bank_output_utils.py
output_sngl_inspiral_table
def output_sngl_inspiral_table(outputFile, tempBank, metricParams, ethincaParams, programName="", optDict = None, outdoc=None, **kwargs): """ Function that converts the information produced by the various pyCBC bank generation codes into a valid LIGOLW xml file containing a sngl_inspiral table and outputs to file. Parameters ----------- outputFile : string Name of the file that the bank will be written to tempBank : iterable Each entry in the tempBank iterable should be a sequence of [mass1,mass2,spin1z,spin2z] in that order. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. ethincaParams: {ethincaParameters instance, None} Structure holding options relevant to the ethinca metric computation including the upper frequency cutoff to be used for filtering. NOTE: The computation is currently only valid for non-spinning systems and uses the TaylorF2 approximant. programName (key-word-argument) : string Name of the executable that has been run optDict (key-word argument) : dictionary Dictionary of the command line arguments passed to the program outdoc (key-word argument) : ligolw xml document If given add template bank to this representation of a xml document and write to disk. If not given create a new document. kwargs : key-word arguments All other key word arguments will be passed directly to ligolw_process.register_to_xmldoc """ if optDict is None: optDict = {} if outdoc is None: outdoc = ligolw.Document() outdoc.appendChild(ligolw.LIGO_LW()) # get IFO to put in search summary table ifos = [] if 'channel_name' in optDict.keys(): if optDict['channel_name'] is not None: ifos = [optDict['channel_name'][0:2]] proc_id = ligolw_process.register_to_xmldoc(outdoc, programName, optDict, ifos=ifos, **kwargs).process_id sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id) # Calculate Gamma components if needed if ethincaParams is not None: if ethincaParams.doEthinca: for sngl in sngl_inspiral_table: # Set tau_0 and tau_3 values needed for the calculation of # ethinca metric distances (sngl.tau0,sngl.tau3) = pnutils.mass1_mass2_to_tau0_tau3( sngl.mass1, sngl.mass2, metricParams.f0) fMax_theor, GammaVals = calculate_ethinca_metric_comps( metricParams, ethincaParams, sngl.mass1, sngl.mass2, spin1z=sngl.spin1z, spin2z=sngl.spin2z, full_ethinca=ethincaParams.full_ethinca) # assign the upper frequency cutoff and Gamma0-5 values sngl.f_final = fMax_theor for i in xrange(len(GammaVals)): setattr(sngl, "Gamma"+str(i), GammaVals[i]) # If Gamma metric components are not wanted, assign f_final from an # upper frequency cutoff specified in ethincaParams elif ethincaParams.cutoff is not None: for sngl in sngl_inspiral_table: sngl.f_final = pnutils.frequency_cutoff_from_name( ethincaParams.cutoff, sngl.mass1, sngl.mass2, sngl.spin1z, sngl.spin2z) # set per-template low-frequency cutoff if 'f_low_column' in optDict and 'f_low' in optDict and \ optDict['f_low_column'] is not None: for sngl in sngl_inspiral_table: setattr(sngl, optDict['f_low_column'], optDict['f_low']) outdoc.childNodes[0].appendChild(sngl_inspiral_table) # get times to put in search summary table start_time = 0 end_time = 0 if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys(): start_time = optDict['gps_start_time'] end_time = optDict['gps_end_time'] # make search summary table search_summary_table = lsctables.New(lsctables.SearchSummaryTable) search_summary = return_search_summary(start_time, end_time, len(sngl_inspiral_table), ifos, **kwargs) search_summary_table.append(search_summary) outdoc.childNodes[0].appendChild(search_summary_table) # write the xml doc to disk ligolw_utils.write_filename(outdoc, outputFile, gz=outputFile.endswith('.gz'))
python
def output_sngl_inspiral_table(outputFile, tempBank, metricParams, ethincaParams, programName="", optDict = None, outdoc=None, **kwargs): if optDict is None: optDict = {} if outdoc is None: outdoc = ligolw.Document() outdoc.appendChild(ligolw.LIGO_LW()) # get IFO to put in search summary table ifos = [] if 'channel_name' in optDict.keys(): if optDict['channel_name'] is not None: ifos = [optDict['channel_name'][0:2]] proc_id = ligolw_process.register_to_xmldoc(outdoc, programName, optDict, ifos=ifos, **kwargs).process_id sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id) # Calculate Gamma components if needed if ethincaParams is not None: if ethincaParams.doEthinca: for sngl in sngl_inspiral_table: # Set tau_0 and tau_3 values needed for the calculation of # ethinca metric distances (sngl.tau0,sngl.tau3) = pnutils.mass1_mass2_to_tau0_tau3( sngl.mass1, sngl.mass2, metricParams.f0) fMax_theor, GammaVals = calculate_ethinca_metric_comps( metricParams, ethincaParams, sngl.mass1, sngl.mass2, spin1z=sngl.spin1z, spin2z=sngl.spin2z, full_ethinca=ethincaParams.full_ethinca) # assign the upper frequency cutoff and Gamma0-5 values sngl.f_final = fMax_theor for i in xrange(len(GammaVals)): setattr(sngl, "Gamma"+str(i), GammaVals[i]) # If Gamma metric components are not wanted, assign f_final from an # upper frequency cutoff specified in ethincaParams elif ethincaParams.cutoff is not None: for sngl in sngl_inspiral_table: sngl.f_final = pnutils.frequency_cutoff_from_name( ethincaParams.cutoff, sngl.mass1, sngl.mass2, sngl.spin1z, sngl.spin2z) # set per-template low-frequency cutoff if 'f_low_column' in optDict and 'f_low' in optDict and \ optDict['f_low_column'] is not None: for sngl in sngl_inspiral_table: setattr(sngl, optDict['f_low_column'], optDict['f_low']) outdoc.childNodes[0].appendChild(sngl_inspiral_table) # get times to put in search summary table start_time = 0 end_time = 0 if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys(): start_time = optDict['gps_start_time'] end_time = optDict['gps_end_time'] # make search summary table search_summary_table = lsctables.New(lsctables.SearchSummaryTable) search_summary = return_search_summary(start_time, end_time, len(sngl_inspiral_table), ifos, **kwargs) search_summary_table.append(search_summary) outdoc.childNodes[0].appendChild(search_summary_table) # write the xml doc to disk ligolw_utils.write_filename(outdoc, outputFile, gz=outputFile.endswith('.gz'))
[ "def", "output_sngl_inspiral_table", "(", "outputFile", ",", "tempBank", ",", "metricParams", ",", "ethincaParams", ",", "programName", "=", "\"\"", ",", "optDict", "=", "None", ",", "outdoc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "optDict", ...
Function that converts the information produced by the various pyCBC bank generation codes into a valid LIGOLW xml file containing a sngl_inspiral table and outputs to file. Parameters ----------- outputFile : string Name of the file that the bank will be written to tempBank : iterable Each entry in the tempBank iterable should be a sequence of [mass1,mass2,spin1z,spin2z] in that order. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. ethincaParams: {ethincaParameters instance, None} Structure holding options relevant to the ethinca metric computation including the upper frequency cutoff to be used for filtering. NOTE: The computation is currently only valid for non-spinning systems and uses the TaylorF2 approximant. programName (key-word-argument) : string Name of the executable that has been run optDict (key-word argument) : dictionary Dictionary of the command line arguments passed to the program outdoc (key-word argument) : ligolw xml document If given add template bank to this representation of a xml document and write to disk. If not given create a new document. kwargs : key-word arguments All other key word arguments will be passed directly to ligolw_process.register_to_xmldoc
[ "Function", "that", "converts", "the", "information", "produced", "by", "the", "various", "pyCBC", "bank", "generation", "codes", "into", "a", "valid", "LIGOLW", "xml", "file", "containing", "a", "sngl_inspiral", "table", "and", "outputs", "to", "file", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/bank_output_utils.py#L292-L390
227,909
gwastro/pycbc
pycbc/waveform/spa_tmplt.py
spa_length_in_time
def spa_length_in_time(**kwds): """ Returns the length in time of the template, based on the masses, PN order, and low-frequency cut-off. """ m1 = kwds['mass1'] m2 = kwds['mass2'] flow = kwds['f_lower'] porder = int(kwds['phase_order']) # For now, we call the swig-wrapped function below in # lalinspiral. Eventually would be nice to replace this # with a function using PN coeffs from lalsimulation. return findchirp_chirptime(m1, m2, flow, porder)
python
def spa_length_in_time(**kwds): m1 = kwds['mass1'] m2 = kwds['mass2'] flow = kwds['f_lower'] porder = int(kwds['phase_order']) # For now, we call the swig-wrapped function below in # lalinspiral. Eventually would be nice to replace this # with a function using PN coeffs from lalsimulation. return findchirp_chirptime(m1, m2, flow, porder)
[ "def", "spa_length_in_time", "(", "*", "*", "kwds", ")", ":", "m1", "=", "kwds", "[", "'mass1'", "]", "m2", "=", "kwds", "[", "'mass2'", "]", "flow", "=", "kwds", "[", "'f_lower'", "]", "porder", "=", "int", "(", "kwds", "[", "'phase_order'", "]", ...
Returns the length in time of the template, based on the masses, PN order, and low-frequency cut-off.
[ "Returns", "the", "length", "in", "time", "of", "the", "template", "based", "on", "the", "masses", "PN", "order", "and", "low", "-", "frequency", "cut", "-", "off", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/spa_tmplt.py#L80-L94
227,910
gwastro/pycbc
pycbc/waveform/spa_tmplt.py
spa_tmplt_precondition
def spa_tmplt_precondition(length, delta_f, kmin=0): """Return the amplitude portion of the TaylorF2 approximant, used to precondition the strain data. The result is cached, and so should not be modified only read. """ global _prec if _prec is None or _prec.delta_f != delta_f or len(_prec) < length: v = numpy.arange(0, (kmin+length*2), 1.0) * delta_f v = numpy.power(v[1:len(v)], -7.0/6.0) _prec = FrequencySeries(v, delta_f=delta_f, dtype=float32) return _prec[kmin:kmin + length]
python
def spa_tmplt_precondition(length, delta_f, kmin=0): global _prec if _prec is None or _prec.delta_f != delta_f or len(_prec) < length: v = numpy.arange(0, (kmin+length*2), 1.0) * delta_f v = numpy.power(v[1:len(v)], -7.0/6.0) _prec = FrequencySeries(v, delta_f=delta_f, dtype=float32) return _prec[kmin:kmin + length]
[ "def", "spa_tmplt_precondition", "(", "length", ",", "delta_f", ",", "kmin", "=", "0", ")", ":", "global", "_prec", "if", "_prec", "is", "None", "or", "_prec", ".", "delta_f", "!=", "delta_f", "or", "len", "(", "_prec", ")", "<", "length", ":", "v", ...
Return the amplitude portion of the TaylorF2 approximant, used to precondition the strain data. The result is cached, and so should not be modified only read.
[ "Return", "the", "amplitude", "portion", "of", "the", "TaylorF2", "approximant", "used", "to", "precondition", "the", "strain", "data", ".", "The", "result", "is", "cached", "and", "so", "should", "not", "be", "modified", "only", "read", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/spa_tmplt.py#L116-L125
227,911
gwastro/pycbc
pycbc/io/hdf.py
combine_and_copy
def combine_and_copy(f, files, group): """ Combine the same column from multiple files and save to a third""" f[group] = np.concatenate([fi[group][:] if group in fi else \ np.array([], dtype=np.uint32) for fi in files])
python
def combine_and_copy(f, files, group): f[group] = np.concatenate([fi[group][:] if group in fi else \ np.array([], dtype=np.uint32) for fi in files])
[ "def", "combine_and_copy", "(", "f", ",", "files", ",", "group", ")", ":", "f", "[", "group", "]", "=", "np", ".", "concatenate", "(", "[", "fi", "[", "group", "]", "[", ":", "]", "if", "group", "in", "fi", "else", "np", ".", "array", "(", "[",...
Combine the same column from multiple files and save to a third
[ "Combine", "the", "same", "column", "from", "multiple", "files", "and", "save", "to", "a", "third" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L916-L919
227,912
gwastro/pycbc
pycbc/io/hdf.py
HFile.select
def select(self, fcn, *args, **kwds): """ Return arrays from an hdf5 file that satisfy the given function Parameters ---------- fcn : a function A function that accepts the same number of argument as keys given and returns a boolean array of the same length. args : strings A variable number of strings that are keys into the hdf5. These must refer to arrays of equal length. chunksize : {1e6, int}, optional Number of elements to read and process at a time. return_indices : bool, optional If True, also return the indices of elements passing the function. Returns ------- values : np.ndarrays A variable number of arrays depending on the number of keys into the hdf5 file that are given. If return_indices is True, the first element is an array of indices of elements passing the function. >>> f = HFile(filename) >>> snr = f.select(lambda snr: snr > 6, 'H1/snr') """ # get references to each array refs = {} data = {} for arg in args: refs[arg] = self[arg] data[arg] = [] return_indices = kwds.get('return_indices', False) indices = np.array([], dtype=np.uint64) # To conserve memory read the array in chunks chunksize = kwds.get('chunksize', int(1e6)) size = len(refs[arg]) i = 0 while i < size: r = i + chunksize if i + chunksize < size else size #Read each chunks worth of data and find where it passes the function partial = [refs[arg][i:r] for arg in args] keep = fcn(*partial) if return_indices: indices = np.concatenate([indices, np.flatnonzero(keep) + i]) #store only the results that pass the function for arg, part in zip(args, partial): data[arg].append(part[keep]) i += chunksize # Combine the partial results into full arrays if len(args) == 1: res = np.concatenate(data[args[0]]) if return_indices: return indices, res else: return res else: res = tuple(np.concatenate(data[arg]) for arg in args) if return_indices: return (indices,) + res else: return res
python
def select(self, fcn, *args, **kwds): # get references to each array refs = {} data = {} for arg in args: refs[arg] = self[arg] data[arg] = [] return_indices = kwds.get('return_indices', False) indices = np.array([], dtype=np.uint64) # To conserve memory read the array in chunks chunksize = kwds.get('chunksize', int(1e6)) size = len(refs[arg]) i = 0 while i < size: r = i + chunksize if i + chunksize < size else size #Read each chunks worth of data and find where it passes the function partial = [refs[arg][i:r] for arg in args] keep = fcn(*partial) if return_indices: indices = np.concatenate([indices, np.flatnonzero(keep) + i]) #store only the results that pass the function for arg, part in zip(args, partial): data[arg].append(part[keep]) i += chunksize # Combine the partial results into full arrays if len(args) == 1: res = np.concatenate(data[args[0]]) if return_indices: return indices, res else: return res else: res = tuple(np.concatenate(data[arg]) for arg in args) if return_indices: return (indices,) + res else: return res
[ "def", "select", "(", "self", ",", "fcn", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "# get references to each array", "refs", "=", "{", "}", "data", "=", "{", "}", "for", "arg", "in", "args", ":", "refs", "[", "arg", "]", "=", "self", "[...
Return arrays from an hdf5 file that satisfy the given function Parameters ---------- fcn : a function A function that accepts the same number of argument as keys given and returns a boolean array of the same length. args : strings A variable number of strings that are keys into the hdf5. These must refer to arrays of equal length. chunksize : {1e6, int}, optional Number of elements to read and process at a time. return_indices : bool, optional If True, also return the indices of elements passing the function. Returns ------- values : np.ndarrays A variable number of arrays depending on the number of keys into the hdf5 file that are given. If return_indices is True, the first element is an array of indices of elements passing the function. >>> f = HFile(filename) >>> snr = f.select(lambda snr: snr > 6, 'H1/snr')
[ "Return", "arrays", "from", "an", "hdf5", "file", "that", "satisfy", "the", "given", "function" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L28-L100
227,913
gwastro/pycbc
pycbc/io/hdf.py
DictArray.select
def select(self, idx): """ Return a new DictArray containing only the indexed values """ data = {} for k in self.data: data[k] = self.data[k][idx] return self._return(data=data)
python
def select(self, idx): data = {} for k in self.data: data[k] = self.data[k][idx] return self._return(data=data)
[ "def", "select", "(", "self", ",", "idx", ")", ":", "data", "=", "{", "}", "for", "k", "in", "self", ".", "data", ":", "data", "[", "k", "]", "=", "self", ".", "data", "[", "k", "]", "[", "idx", "]", "return", "self", ".", "_return", "(", "...
Return a new DictArray containing only the indexed values
[ "Return", "a", "new", "DictArray", "containing", "only", "the", "indexed", "values" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L156-L162
227,914
gwastro/pycbc
pycbc/io/hdf.py
DictArray.remove
def remove(self, idx): """ Return a new DictArray that does not contain the indexed values """ data = {} for k in self.data: data[k] = np.delete(self.data[k], idx) return self._return(data=data)
python
def remove(self, idx): data = {} for k in self.data: data[k] = np.delete(self.data[k], idx) return self._return(data=data)
[ "def", "remove", "(", "self", ",", "idx", ")", ":", "data", "=", "{", "}", "for", "k", "in", "self", ".", "data", ":", "data", "[", "k", "]", "=", "np", ".", "delete", "(", "self", ".", "data", "[", "k", "]", ",", "idx", ")", "return", "sel...
Return a new DictArray that does not contain the indexed values
[ "Return", "a", "new", "DictArray", "that", "does", "not", "contain", "the", "indexed", "values" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L164-L170
227,915
gwastro/pycbc
pycbc/io/hdf.py
FileData.mask
def mask(self): """ Create a mask implementing the requested filter on the datasets Returns ------- array of Boolean True for dataset indices to be returned by the get_column method """ if self.filter_func is None: raise RuntimeError("Can't get a mask without a filter function!") else: # only evaluate if no previous calculation was done if self._mask is None: # get required columns into the namespace as numpy arrays for column in self.columns: if column in self.filter_func: setattr(self, column, self.group[column][:]) self._mask = eval(self.filter_func) return self._mask
python
def mask(self): if self.filter_func is None: raise RuntimeError("Can't get a mask without a filter function!") else: # only evaluate if no previous calculation was done if self._mask is None: # get required columns into the namespace as numpy arrays for column in self.columns: if column in self.filter_func: setattr(self, column, self.group[column][:]) self._mask = eval(self.filter_func) return self._mask
[ "def", "mask", "(", "self", ")", ":", "if", "self", ".", "filter_func", "is", "None", ":", "raise", "RuntimeError", "(", "\"Can't get a mask without a filter function!\"", ")", "else", ":", "# only evaluate if no previous calculation was done", "if", "self", ".", "_ma...
Create a mask implementing the requested filter on the datasets Returns ------- array of Boolean True for dataset indices to be returned by the get_column method
[ "Create", "a", "mask", "implementing", "the", "requested", "filter", "on", "the", "datasets" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L294-L313
227,916
gwastro/pycbc
pycbc/io/hdf.py
DataFromFiles.get_column
def get_column(self, col): """ Loop over files getting the requested dataset values from each Parameters ---------- col : string Name of the dataset to be returned Returns ------- numpy array Values from the dataset, filtered if requested and concatenated in order of file list """ logging.info('getting %s' % col) vals = [] for f in self.files: d = FileData(f, group=self.group, columnlist=self.columns, filter_func=self.filter_func) vals.append(d.get_column(col)) # Close each file since h5py has an upper limit on the number of # open file objects (approx. 1000) d.close() logging.info('- got %i values' % sum(len(v) for v in vals)) return np.concatenate(vals)
python
def get_column(self, col): logging.info('getting %s' % col) vals = [] for f in self.files: d = FileData(f, group=self.group, columnlist=self.columns, filter_func=self.filter_func) vals.append(d.get_column(col)) # Close each file since h5py has an upper limit on the number of # open file objects (approx. 1000) d.close() logging.info('- got %i values' % sum(len(v) for v in vals)) return np.concatenate(vals)
[ "def", "get_column", "(", "self", ",", "col", ")", ":", "logging", ".", "info", "(", "'getting %s'", "%", "col", ")", "vals", "=", "[", "]", "for", "f", "in", "self", ".", "files", ":", "d", "=", "FileData", "(", "f", ",", "group", "=", "self", ...
Loop over files getting the requested dataset values from each Parameters ---------- col : string Name of the dataset to be returned Returns ------- numpy array Values from the dataset, filtered if requested and concatenated in order of file list
[ "Loop", "over", "files", "getting", "the", "requested", "dataset", "values", "from", "each" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L345-L370
227,917
gwastro/pycbc
pycbc/io/hdf.py
SingleDetTriggers.get_param_names
def get_param_names(cls): """Returns a list of plottable CBC parameter variables""" return [m[0] for m in inspect.getmembers(cls) \ if type(m[1]) == property]
python
def get_param_names(cls): return [m[0] for m in inspect.getmembers(cls) \ if type(m[1]) == property]
[ "def", "get_param_names", "(", "cls", ")", ":", "return", "[", "m", "[", "0", "]", "for", "m", "in", "inspect", ".", "getmembers", "(", "cls", ")", "if", "type", "(", "m", "[", "1", "]", ")", "==", "property", "]" ]
Returns a list of plottable CBC parameter variables
[ "Returns", "a", "list", "of", "plottable", "CBC", "parameter", "variables" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/hdf.py#L436-L439
227,918
gwastro/pycbc
pycbc/inference/sampler/base.py
create_new_output_file
def create_new_output_file(sampler, filename, force=False, injection_file=None, **kwargs): """Creates a new output file. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler filename : str Name of the file to create. force : bool, optional Create the file even if it already exists. Default is False. injection_file : str, optional If an injection was added to the data, write its information. \**kwargs : All other keyword arguments are passed through to the file's ``write_metadata`` function. """ if os.path.exists(filename): if force: os.remove(filename) else: raise OSError("output-file already exists; use force if you " "wish to overwrite it.") logging.info("Creating file {}".format(filename)) with sampler.io(filename, "w") as fp: # create the samples group and sampler info group fp.create_group(fp.samples_group) fp.create_group(fp.sampler_group) # save the sampler's metadata fp.write_sampler_metadata(sampler) # save injection parameters if injection_file is not None: logging.info("Writing injection file to output") # just use the first one fp.write_injections(injection_file)
python
def create_new_output_file(sampler, filename, force=False, injection_file=None, **kwargs): if os.path.exists(filename): if force: os.remove(filename) else: raise OSError("output-file already exists; use force if you " "wish to overwrite it.") logging.info("Creating file {}".format(filename)) with sampler.io(filename, "w") as fp: # create the samples group and sampler info group fp.create_group(fp.samples_group) fp.create_group(fp.sampler_group) # save the sampler's metadata fp.write_sampler_metadata(sampler) # save injection parameters if injection_file is not None: logging.info("Writing injection file to output") # just use the first one fp.write_injections(injection_file)
[ "def", "create_new_output_file", "(", "sampler", ",", "filename", ",", "force", "=", "False", ",", "injection_file", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "if", "force", ":", ...
Creates a new output file. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler filename : str Name of the file to create. force : bool, optional Create the file even if it already exists. Default is False. injection_file : str, optional If an injection was added to the data, write its information. \**kwargs : All other keyword arguments are passed through to the file's ``write_metadata`` function.
[ "Creates", "a", "new", "output", "file", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base.py#L204-L242
227,919
gwastro/pycbc
pycbc/inference/sampler/base.py
initial_dist_from_config
def initial_dist_from_config(cp, variable_params): r"""Loads a distribution for the sampler start from the given config file. A distribution will only be loaded if the config file has a [initial-\*] section(s). Parameters ---------- cp : Config parser The config parser to try to load from. variable_params : list of str The variable parameters for the distribution. Returns ------- JointDistribution or None : The initial distribution. If no [initial-\*] section found in the config file, will just return None. """ if len(cp.get_subsections("initial")): logging.info("Using a different distribution for the starting points " "than the prior.") initial_dists = distributions.read_distributions_from_config( cp, section="initial") constraints = distributions.read_constraints_from_config( cp, constraint_section="initial_constraint") init_dist = distributions.JointDistribution( variable_params, *initial_dists, **{"constraints": constraints}) else: init_dist = None return init_dist
python
def initial_dist_from_config(cp, variable_params): r"""Loads a distribution for the sampler start from the given config file. A distribution will only be loaded if the config file has a [initial-\*] section(s). Parameters ---------- cp : Config parser The config parser to try to load from. variable_params : list of str The variable parameters for the distribution. Returns ------- JointDistribution or None : The initial distribution. If no [initial-\*] section found in the config file, will just return None. """ if len(cp.get_subsections("initial")): logging.info("Using a different distribution for the starting points " "than the prior.") initial_dists = distributions.read_distributions_from_config( cp, section="initial") constraints = distributions.read_constraints_from_config( cp, constraint_section="initial_constraint") init_dist = distributions.JointDistribution( variable_params, *initial_dists, **{"constraints": constraints}) else: init_dist = None return init_dist
[ "def", "initial_dist_from_config", "(", "cp", ",", "variable_params", ")", ":", "if", "len", "(", "cp", ".", "get_subsections", "(", "\"initial\"", ")", ")", ":", "logging", ".", "info", "(", "\"Using a different distribution for the starting points \"", "\"than the p...
r"""Loads a distribution for the sampler start from the given config file. A distribution will only be loaded if the config file has a [initial-\*] section(s). Parameters ---------- cp : Config parser The config parser to try to load from. variable_params : list of str The variable parameters for the distribution. Returns ------- JointDistribution or None : The initial distribution. If no [initial-\*] section found in the config file, will just return None.
[ "r", "Loads", "a", "distribution", "for", "the", "sampler", "start", "from", "the", "given", "config", "file", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base.py#L245-L276
227,920
gwastro/pycbc
pycbc/inference/sampler/base.py
BaseSampler.setup_output
def setup_output(self, output_file, force=False, injection_file=None): """Sets up the sampler's checkpoint and output files. The checkpoint file has the same name as the output file, but with ``.checkpoint`` appended to the name. A backup file will also be created. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler output_file : str Name of the output file. force : bool, optional If the output file already exists, overwrite it. injection_file : str, optional If an injection was added to the data, write its information. """ # check for backup file(s) checkpoint_file = output_file + '.checkpoint' backup_file = output_file + '.bkup' # check if we have a good checkpoint and/or backup file logging.info("Looking for checkpoint file") checkpoint_valid = validate_checkpoint_files(checkpoint_file, backup_file) # Create a new file if the checkpoint doesn't exist, or if it is # corrupted self.new_checkpoint = False # keeps track if this is a new file or not if not checkpoint_valid: logging.info("Checkpoint not found or not valid") create_new_output_file(self, checkpoint_file, force=force, injection_file=injection_file) # now the checkpoint is valid self.new_checkpoint = True # copy to backup shutil.copy(checkpoint_file, backup_file) # write the command line, startup for fn in [checkpoint_file, backup_file]: with self.io(fn, "a") as fp: fp.write_command_line() fp.write_resume_point() # store self.checkpoint_file = checkpoint_file self.backup_file = backup_file self.checkpoint_valid = checkpoint_valid
python
def setup_output(self, output_file, force=False, injection_file=None): # check for backup file(s) checkpoint_file = output_file + '.checkpoint' backup_file = output_file + '.bkup' # check if we have a good checkpoint and/or backup file logging.info("Looking for checkpoint file") checkpoint_valid = validate_checkpoint_files(checkpoint_file, backup_file) # Create a new file if the checkpoint doesn't exist, or if it is # corrupted self.new_checkpoint = False # keeps track if this is a new file or not if not checkpoint_valid: logging.info("Checkpoint not found or not valid") create_new_output_file(self, checkpoint_file, force=force, injection_file=injection_file) # now the checkpoint is valid self.new_checkpoint = True # copy to backup shutil.copy(checkpoint_file, backup_file) # write the command line, startup for fn in [checkpoint_file, backup_file]: with self.io(fn, "a") as fp: fp.write_command_line() fp.write_resume_point() # store self.checkpoint_file = checkpoint_file self.backup_file = backup_file self.checkpoint_valid = checkpoint_valid
[ "def", "setup_output", "(", "self", ",", "output_file", ",", "force", "=", "False", ",", "injection_file", "=", "None", ")", ":", "# check for backup file(s)", "checkpoint_file", "=", "output_file", "+", "'.checkpoint'", "backup_file", "=", "output_file", "+", "'....
Sets up the sampler's checkpoint and output files. The checkpoint file has the same name as the output file, but with ``.checkpoint`` appended to the name. A backup file will also be created. If the output file already exists, an ``OSError`` will be raised. This can be overridden by setting ``force`` to ``True``. Parameters ---------- sampler : sampler instance Sampler output_file : str Name of the output file. force : bool, optional If the output file already exists, overwrite it. injection_file : str, optional If an injection was added to the data, write its information.
[ "Sets", "up", "the", "sampler", "s", "checkpoint", "and", "output", "files", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/sampler/base.py#L146-L193
227,921
gwastro/pycbc
pycbc/types/frequencyseries.py
load_frequencyseries
def load_frequencyseries(path, group=None): """ Load a FrequencySeries from a .hdf, .txt or .npy file. The default data types will be double precision floating point. Parameters ---------- path: string source file path. Must end with either .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': data = _numpy.load(path) elif ext == '.txt': data = _numpy.loadtxt(path) elif ext == '.hdf': key = 'data' if group is None else group f = h5py.File(path, 'r') data = f[key][:] series = FrequencySeries(data, delta_f=f[key].attrs['delta_f'], epoch=f[key].attrs['epoch']) f.close() return series else: raise ValueError('Path must end with .npy, .hdf, or .txt') if data.ndim == 2: delta_f = (data[-1][0] - data[0][0]) / (len(data)-1) epoch = _lal.LIGOTimeGPS(data[0][0]) return FrequencySeries(data[:,1], delta_f=delta_f, epoch=epoch) elif data.ndim == 3: delta_f = (data[-1][0] - data[0][0]) / (len(data)-1) epoch = _lal.LIGOTimeGPS(data[0][0]) return FrequencySeries(data[:,1] + 1j*data[:,2], delta_f=delta_f, epoch=epoch) else: raise ValueError('File has %s dimensions, cannot convert to Array, \ must be 2 (real) or 3 (complex)' % data.ndim)
python
def load_frequencyseries(path, group=None): ext = _os.path.splitext(path)[1] if ext == '.npy': data = _numpy.load(path) elif ext == '.txt': data = _numpy.loadtxt(path) elif ext == '.hdf': key = 'data' if group is None else group f = h5py.File(path, 'r') data = f[key][:] series = FrequencySeries(data, delta_f=f[key].attrs['delta_f'], epoch=f[key].attrs['epoch']) f.close() return series else: raise ValueError('Path must end with .npy, .hdf, or .txt') if data.ndim == 2: delta_f = (data[-1][0] - data[0][0]) / (len(data)-1) epoch = _lal.LIGOTimeGPS(data[0][0]) return FrequencySeries(data[:,1], delta_f=delta_f, epoch=epoch) elif data.ndim == 3: delta_f = (data[-1][0] - data[0][0]) / (len(data)-1) epoch = _lal.LIGOTimeGPS(data[0][0]) return FrequencySeries(data[:,1] + 1j*data[:,2], delta_f=delta_f, epoch=epoch) else: raise ValueError('File has %s dimensions, cannot convert to Array, \ must be 2 (real) or 3 (complex)' % data.ndim)
[ "def", "load_frequencyseries", "(", "path", ",", "group", "=", "None", ")", ":", "ext", "=", "_os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", "if", "ext", "==", "'.npy'", ":", "data", "=", "_numpy", ".", "load", "(", "path", ...
Load a FrequencySeries from a .hdf, .txt or .npy file. The default data types will be double precision floating point. Parameters ---------- path: string source file path. Must end with either .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt.
[ "Load", "a", "FrequencySeries", "from", "a", ".", "hdf", ".", "txt", "or", ".", "npy", "file", ".", "The", "default", "data", "types", "will", "be", "double", "precision", "floating", "point", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L552-L598
227,922
gwastro/pycbc
pycbc/types/frequencyseries.py
FrequencySeries.almost_equal_elem
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0): """ Compare whether two frequency series are almost equal, element by element. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(self[i]-other[i]) <= tol*abs(self[i]) for all elements of the series. If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(self[i]-other[i]) <= tol for all elements of the series. The method also checks that self.delta_f is within 'dtol' of other.delta_f; if 'dtol' has its default value of 0 then exact equality between the two is required. Other meta-data (type, dtype, length, and epoch) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other: another Python object, that should be tested for almost-equality with 'self', element-by-element. tol: a non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative: A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). dtol: a non-negative number, the tolerance for delta_f. Like 'tol', it is interpreted as relative or absolute based on the value of 'relative'. This parameter defaults to zero, enforcing exact equality between the delta_f values of the two FrequencySeries. Returns ------- boolean: 'True' if the data and delta_fs agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, dtypes, and epochs are exactly the same. """ # Check that the delta_f tolerance is non-negative; raise an exception # if needed. if (dtol < 0.0): raise ValueError("Tolerance in delta_f cannot be negative") if super(FrequencySeries,self).almost_equal_elem(other,tol=tol,relative=relative): if relative: return (self._epoch == other._epoch and abs(self._delta_f-other._delta_f) <= dtol*self._delta_f) else: return (self._epoch == other._epoch and abs(self._delta_f-other._delta_f) <= dtol) else: return False
python
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0): # Check that the delta_f tolerance is non-negative; raise an exception # if needed. if (dtol < 0.0): raise ValueError("Tolerance in delta_f cannot be negative") if super(FrequencySeries,self).almost_equal_elem(other,tol=tol,relative=relative): if relative: return (self._epoch == other._epoch and abs(self._delta_f-other._delta_f) <= dtol*self._delta_f) else: return (self._epoch == other._epoch and abs(self._delta_f-other._delta_f) <= dtol) else: return False
[ "def", "almost_equal_elem", "(", "self", ",", "other", ",", "tol", ",", "relative", "=", "True", ",", "dtol", "=", "0.0", ")", ":", "# Check that the delta_f tolerance is non-negative; raise an exception", "# if needed.", "if", "(", "dtol", "<", "0.0", ")", ":", ...
Compare whether two frequency series are almost equal, element by element. If the 'relative' parameter is 'True' (the default) then the 'tol' parameter (which must be positive) is interpreted as a relative tolerance, and the comparison returns 'True' only if abs(self[i]-other[i]) <= tol*abs(self[i]) for all elements of the series. If 'relative' is 'False', then 'tol' is an absolute tolerance, and the comparison is true only if abs(self[i]-other[i]) <= tol for all elements of the series. The method also checks that self.delta_f is within 'dtol' of other.delta_f; if 'dtol' has its default value of 0 then exact equality between the two is required. Other meta-data (type, dtype, length, and epoch) must be exactly equal. If either object's memory lives on the GPU it will be copied to the CPU for the comparison, which may be slow. But the original object itself will not have its memory relocated nor scheme changed. Parameters ---------- other: another Python object, that should be tested for almost-equality with 'self', element-by-element. tol: a non-negative number, the tolerance, which is interpreted as either a relative tolerance (the default) or an absolute tolerance. relative: A boolean, indicating whether 'tol' should be interpreted as a relative tolerance (if True, the default if this argument is omitted) or as an absolute tolerance (if tol is False). dtol: a non-negative number, the tolerance for delta_f. Like 'tol', it is interpreted as relative or absolute based on the value of 'relative'. This parameter defaults to zero, enforcing exact equality between the delta_f values of the two FrequencySeries. Returns ------- boolean: 'True' if the data and delta_fs agree within the tolerance, as interpreted by the 'relative' keyword, and if the types, lengths, dtypes, and epochs are exactly the same.
[ "Compare", "whether", "two", "frequency", "series", "are", "almost", "equal", "element", "by", "element", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L210-L269
227,923
gwastro/pycbc
pycbc/types/frequencyseries.py
FrequencySeries.lal
def lal(self): """Produces a LAL frequency series object equivalent to self. Returns ------- lal_data : {lal.*FrequencySeries} LAL frequency series object containing the same data as self. The actual type depends on the sample's dtype. If the epoch of self was 'None', the epoch of the returned LAL object will be LIGOTimeGPS(0,0); otherwise, the same as that of self. Raises ------ TypeError If frequency series is stored in GPU memory. """ lal_data = None if self._epoch is None: ep = _lal.LIGOTimeGPS(0,0) else: ep = self._epoch if self._data.dtype == _numpy.float32: lal_data = _lal.CreateREAL4FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.float64: lal_data = _lal.CreateREAL8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.complex64: lal_data = _lal.CreateCOMPLEX8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.complex128: lal_data = _lal.CreateCOMPLEX16FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) lal_data.data.data[:] = self.numpy() return lal_data
python
def lal(self): lal_data = None if self._epoch is None: ep = _lal.LIGOTimeGPS(0,0) else: ep = self._epoch if self._data.dtype == _numpy.float32: lal_data = _lal.CreateREAL4FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.float64: lal_data = _lal.CreateREAL8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.complex64: lal_data = _lal.CreateCOMPLEX8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) elif self._data.dtype == _numpy.complex128: lal_data = _lal.CreateCOMPLEX16FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self)) lal_data.data.data[:] = self.numpy() return lal_data
[ "def", "lal", "(", "self", ")", ":", "lal_data", "=", "None", "if", "self", ".", "_epoch", "is", "None", ":", "ep", "=", "_lal", ".", "LIGOTimeGPS", "(", "0", ",", "0", ")", "else", ":", "ep", "=", "self", ".", "_epoch", "if", "self", ".", "_da...
Produces a LAL frequency series object equivalent to self. Returns ------- lal_data : {lal.*FrequencySeries} LAL frequency series object containing the same data as self. The actual type depends on the sample's dtype. If the epoch of self was 'None', the epoch of the returned LAL object will be LIGOTimeGPS(0,0); otherwise, the same as that of self. Raises ------ TypeError If frequency series is stored in GPU memory.
[ "Produces", "a", "LAL", "frequency", "series", "object", "equivalent", "to", "self", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L330-L364
227,924
gwastro/pycbc
pycbc/types/frequencyseries.py
FrequencySeries.save
def save(self, path, group=None, ifo='P1'): """ Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt. """ ext = _os.path.splitext(path)[1] if ext == '.npy': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T _numpy.save(path, output) elif ext == '.txt': if self.kind == 'real': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T elif self.kind == 'complex': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.xml' or path.endswith('.xml.gz'): from pycbc.io.live import make_psd_xmldoc from glue.ligolw import utils if self.kind != 'real': raise ValueError('XML only supports real frequency series') output = self.lal() # When writing in this format we must *not* have the 0 values at # frequencies less than flow. To resolve this we set the first # non-zero value < flow. data_lal = output.data.data first_idx = _numpy.argmax(data_lal>0) if not first_idx == 0: data_lal[:first_idx] = data_lal[first_idx] psddict = {ifo: output} utils.write_filename(make_psd_xmldoc(psddict), path, gz=path.endswith(".gz")) elif ext =='.hdf': key = 'data' if group is None else group f = h5py.File(path) ds = f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) ds.attrs['epoch'] = float(self.epoch) ds.attrs['delta_f'] = float(self.delta_f) else: raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' 'or .hdf')
python
def save(self, path, group=None, ifo='P1'): ext = _os.path.splitext(path)[1] if ext == '.npy': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T _numpy.save(path, output) elif ext == '.txt': if self.kind == 'real': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy())).T elif self.kind == 'complex': output = _numpy.vstack((self.sample_frequencies.numpy(), self.numpy().real, self.numpy().imag)).T _numpy.savetxt(path, output) elif ext == '.xml' or path.endswith('.xml.gz'): from pycbc.io.live import make_psd_xmldoc from glue.ligolw import utils if self.kind != 'real': raise ValueError('XML only supports real frequency series') output = self.lal() # When writing in this format we must *not* have the 0 values at # frequencies less than flow. To resolve this we set the first # non-zero value < flow. data_lal = output.data.data first_idx = _numpy.argmax(data_lal>0) if not first_idx == 0: data_lal[:first_idx] = data_lal[first_idx] psddict = {ifo: output} utils.write_filename(make_psd_xmldoc(psddict), path, gz=path.endswith(".gz")) elif ext =='.hdf': key = 'data' if group is None else group f = h5py.File(path) ds = f.create_dataset(key, data=self.numpy(), compression='gzip', compression_opts=9, shuffle=True) ds.attrs['epoch'] = float(self.epoch) ds.attrs['delta_f'] = float(self.delta_f) else: raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz ' 'or .hdf')
[ "def", "save", "(", "self", ",", "path", ",", "group", "=", "None", ",", "ifo", "=", "'P1'", ")", ":", "ext", "=", "_os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", "if", "ext", "==", "'.npy'", ":", "output", "=", "_numpy", ...
Save frequency series to a Numpy .npy, hdf, or text file. The first column contains the sample frequencies, the second contains the values. In the case of a complex frequency series saved as text, the imaginary part is written as a third column. When using hdf format, the data is stored as a single vector, along with relevant attributes. Parameters ---------- path: string Destination file path. Must end with either .hdf, .npy or .txt. group: string Additional name for internal storage use. Ex. hdf storage uses this as the key value. Raises ------ ValueError If path does not end in .npy or .txt.
[ "Save", "frequency", "series", "to", "a", "Numpy", ".", "npy", "hdf", "or", "text", "file", ".", "The", "first", "column", "contains", "the", "sample", "frequencies", "the", "second", "contains", "the", "values", ".", "In", "the", "case", "of", "a", "com...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L366-L429
227,925
gwastro/pycbc
pycbc/types/frequencyseries.py
FrequencySeries.to_timeseries
def to_timeseries(self, delta_t=None): """ Return the Fourier transform of this time series. Note that this assumes even length time series! Parameters ---------- delta_t : {None, float}, optional The time resolution of the returned series. By default the resolution is determined by length and delta_f of this frequency series. Returns ------- TimeSeries: The inverse fourier transform of this frequency series. """ from pycbc.fft import ifft from pycbc.types import TimeSeries, real_same_precision_as nat_delta_t = 1.0 / ((len(self)-1)*2) / self.delta_f if not delta_t: delta_t = nat_delta_t # add 0.5 to round integer tlen = int(1.0 / self.delta_f / delta_t + 0.5) flen = int(tlen / 2 + 1) if flen < len(self): raise ValueError("The value of delta_t (%s) would be " "undersampled. Maximum delta_t " "is %s." % (delta_t, nat_delta_t)) if not delta_t: tmp = self else: tmp = FrequencySeries(zeros(flen, dtype=self.dtype), delta_f=self.delta_f, epoch=self.epoch) tmp[:len(self)] = self[:] f = TimeSeries(zeros(tlen, dtype=real_same_precision_as(self)), delta_t=delta_t) ifft(tmp, f) return f
python
def to_timeseries(self, delta_t=None): from pycbc.fft import ifft from pycbc.types import TimeSeries, real_same_precision_as nat_delta_t = 1.0 / ((len(self)-1)*2) / self.delta_f if not delta_t: delta_t = nat_delta_t # add 0.5 to round integer tlen = int(1.0 / self.delta_f / delta_t + 0.5) flen = int(tlen / 2 + 1) if flen < len(self): raise ValueError("The value of delta_t (%s) would be " "undersampled. Maximum delta_t " "is %s." % (delta_t, nat_delta_t)) if not delta_t: tmp = self else: tmp = FrequencySeries(zeros(flen, dtype=self.dtype), delta_f=self.delta_f, epoch=self.epoch) tmp[:len(self)] = self[:] f = TimeSeries(zeros(tlen, dtype=real_same_precision_as(self)), delta_t=delta_t) ifft(tmp, f) return f
[ "def", "to_timeseries", "(", "self", ",", "delta_t", "=", "None", ")", ":", "from", "pycbc", ".", "fft", "import", "ifft", "from", "pycbc", ".", "types", "import", "TimeSeries", ",", "real_same_precision_as", "nat_delta_t", "=", "1.0", "/", "(", "(", "len"...
Return the Fourier transform of this time series. Note that this assumes even length time series! Parameters ---------- delta_t : {None, float}, optional The time resolution of the returned series. By default the resolution is determined by length and delta_f of this frequency series. Returns ------- TimeSeries: The inverse fourier transform of this frequency series.
[ "Return", "the", "Fourier", "transform", "of", "this", "time", "series", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L432-L474
227,926
gwastro/pycbc
pycbc/types/frequencyseries.py
FrequencySeries.cyclic_time_shift
def cyclic_time_shift(self, dt): """Shift the data and timestamps by a given number of seconds Shift the data and timestamps in the time domain a given number of seconds. To just change the time stamps, do ts.start_time += dt. The time shift may be smaller than the intrinsic sample rate of the data. Note that data will be cycliclly rotated, so if you shift by 2 seconds, the final 2 seconds of your data will now be at the beginning of the data set. Parameters ---------- dt : float Amount of time to shift the vector. Returns ------- data : pycbc.types.FrequencySeries The time shifted frequency series. """ from pycbc.waveform import apply_fseries_time_shift data = apply_fseries_time_shift(self, dt) data.start_time = self.start_time - dt return data
python
def cyclic_time_shift(self, dt): from pycbc.waveform import apply_fseries_time_shift data = apply_fseries_time_shift(self, dt) data.start_time = self.start_time - dt return data
[ "def", "cyclic_time_shift", "(", "self", ",", "dt", ")", ":", "from", "pycbc", ".", "waveform", "import", "apply_fseries_time_shift", "data", "=", "apply_fseries_time_shift", "(", "self", ",", "dt", ")", "data", ".", "start_time", "=", "self", ".", "start_time...
Shift the data and timestamps by a given number of seconds Shift the data and timestamps in the time domain a given number of seconds. To just change the time stamps, do ts.start_time += dt. The time shift may be smaller than the intrinsic sample rate of the data. Note that data will be cycliclly rotated, so if you shift by 2 seconds, the final 2 seconds of your data will now be at the beginning of the data set. Parameters ---------- dt : float Amount of time to shift the vector. Returns ------- data : pycbc.types.FrequencySeries The time shifted frequency series.
[ "Shift", "the", "data", "and", "timestamps", "by", "a", "given", "number", "of", "seconds" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/types/frequencyseries.py#L477-L500
227,927
gwastro/pycbc
pycbc/tmpltbank/brute_force_methods.py
stack_xi_direction_brute
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num, req_match, massRangeParams, metricParams, fUpper, scaleFactor=0.8, numIterations=3000): """ This function is used to assess the depth of the xi_space in a specified dimension at a specified point in the higher dimensions. It does this by iteratively throwing points at the space to find maxima and minima. Parameters ----------- xis : list or array Position in the xi space at which to assess the depth. This can be only a subset of the higher dimensions than that being sampled. bestMasses : list Contains [totalMass, eta, spin1z, spin2z]. Is a physical position mapped to xi coordinates in bestXis that is close to the xis point. This is aimed to give the code a starting point. bestXis : list Contains the position of bestMasses in the xi coordinate system. direction_num : int The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) req_match : float When considering points to assess the depth with, only consider points with a mismatch that is smaller than this with xis. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) scaleFactor : float, optional (default = 0.8) The value of the scale factor that is used when calling pycbc.tmpltbank.get_mass_distribution. numIterations : int, optional (default = 3000) The number of times to make calls to get_mass_distribution when assessing the maximum/minimum of this parameter space. Making this smaller makes the code faster, but at the cost of accuracy. Returns -------- xi_min : float The minimal value of the specified dimension at the specified point in parameter space. xi_max : float The maximal value of the specified dimension at the specified point in parameter space. """ # Find minimum ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ req_match, massRangeParams, metricParams, \ fUpper, find_minimum=True, \ scaleFactor=scaleFactor, \ numIterations=numIterations) # Find maximum ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ req_match, massRangeParams, metricParams, \ fUpper, find_minimum=False, \ scaleFactor=scaleFactor, \ numIterations=numIterations) return ximin, ximax
python
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num, req_match, massRangeParams, metricParams, fUpper, scaleFactor=0.8, numIterations=3000): # Find minimum ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ req_match, massRangeParams, metricParams, \ fUpper, find_minimum=True, \ scaleFactor=scaleFactor, \ numIterations=numIterations) # Find maximum ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \ req_match, massRangeParams, metricParams, \ fUpper, find_minimum=False, \ scaleFactor=scaleFactor, \ numIterations=numIterations) return ximin, ximax
[ "def", "stack_xi_direction_brute", "(", "xis", ",", "bestMasses", ",", "bestXis", ",", "direction_num", ",", "req_match", ",", "massRangeParams", ",", "metricParams", ",", "fUpper", ",", "scaleFactor", "=", "0.8", ",", "numIterations", "=", "3000", ")", ":", "...
This function is used to assess the depth of the xi_space in a specified dimension at a specified point in the higher dimensions. It does this by iteratively throwing points at the space to find maxima and minima. Parameters ----------- xis : list or array Position in the xi space at which to assess the depth. This can be only a subset of the higher dimensions than that being sampled. bestMasses : list Contains [totalMass, eta, spin1z, spin2z]. Is a physical position mapped to xi coordinates in bestXis that is close to the xis point. This is aimed to give the code a starting point. bestXis : list Contains the position of bestMasses in the xi coordinate system. direction_num : int The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) req_match : float When considering points to assess the depth with, only consider points with a mismatch that is smaller than this with xis. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) scaleFactor : float, optional (default = 0.8) The value of the scale factor that is used when calling pycbc.tmpltbank.get_mass_distribution. numIterations : int, optional (default = 3000) The number of times to make calls to get_mass_distribution when assessing the maximum/minimum of this parameter space. Making this smaller makes the code faster, but at the cost of accuracy. Returns -------- xi_min : float The minimal value of the specified dimension at the specified point in parameter space. xi_max : float The maximal value of the specified dimension at the specified point in parameter space.
[ "This", "function", "is", "used", "to", "assess", "the", "depth", "of", "the", "xi_space", "in", "a", "specified", "dimension", "at", "a", "specified", "point", "in", "the", "higher", "dimensions", ".", "It", "does", "this", "by", "iteratively", "throwing", ...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/brute_force_methods.py#L349-L419
227,928
gwastro/pycbc
pycbc/tmpltbank/brute_force_methods.py
find_xi_extrema_brute
def find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, req_match, \ massRangeParams, metricParams, fUpper, \ find_minimum=False, scaleFactor=0.8, \ numIterations=3000): """ This function is used to find the largest or smallest value of the xi space in a specified dimension at a specified point in the higher dimensions. It does this by iteratively throwing points at the space to find extrema. Parameters ----------- xis : list or array Position in the xi space at which to assess the depth. This can be only a subset of the higher dimensions than that being sampled. bestMasses : list Contains [totalMass, eta, spin1z, spin2z]. Is a physical position mapped to xi coordinates in bestXis that is close to the xis point. This is aimed to give the code a starting point. bestXis : list Contains the position of bestMasses in the xi coordinate system. direction_num : int The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) req_match : float When considering points to assess the depth with, only consider points with a mismatch that is smaller than this with xis. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) find_minimum : boolean, optional (default = False) If True, find the minimum value of the xi direction. If False find the maximum value. scaleFactor : float, optional (default = 0.8) The value of the scale factor that is used when calling pycbc.tmpltbank.get_mass_distribution. numIterations : int, optional (default = 3000) The number of times to make calls to get_mass_distribution when assessing the maximum/minimum of this parameter space. Making this smaller makes the code faster, but at the cost of accuracy. Returns -------- xi_extent : float The extremal value of the specified dimension at the specified point in parameter space. """ # Setup xi_size = len(xis) bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) if find_minimum: xiextrema = 10000000000 else: xiextrema = -100000000000 for _ in range(numIterations): # Evaluate extrema of the xi direction specified totmass, eta, spin1z, spin2z, _, _, new_xis = \ get_mass_distribution([bestChirpmass,bestMasses[1],bestMasses[2], bestMasses[3]], scaleFactor, massRangeParams, metricParams, fUpper) cDist = (new_xis[0] - xis[0])**2 for j in range(1, xi_size): cDist += (new_xis[j] - xis[j])**2 redCDist = cDist[cDist < req_match] if len(redCDist): if not find_minimum: new_xis[direction_num][cDist > req_match] = -10000000 currXiExtrema = (new_xis[direction_num]).max() idx = (new_xis[direction_num]).argmax() else: new_xis[direction_num][cDist > req_match] = 10000000 currXiExtrema = (new_xis[direction_num]).min() idx = (new_xis[direction_num]).argmin() if ( ((not find_minimum) and (currXiExtrema > xiextrema)) or \ (find_minimum and (currXiExtrema < xiextrema)) ): xiextrema = currXiExtrema bestMasses[0] = totmass[idx] bestMasses[1] = eta[idx] bestMasses[2] = spin1z[idx] bestMasses[3] = spin2z[idx] bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) return xiextrema
python
def find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, req_match, \ massRangeParams, metricParams, fUpper, \ find_minimum=False, scaleFactor=0.8, \ numIterations=3000): # Setup xi_size = len(xis) bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) if find_minimum: xiextrema = 10000000000 else: xiextrema = -100000000000 for _ in range(numIterations): # Evaluate extrema of the xi direction specified totmass, eta, spin1z, spin2z, _, _, new_xis = \ get_mass_distribution([bestChirpmass,bestMasses[1],bestMasses[2], bestMasses[3]], scaleFactor, massRangeParams, metricParams, fUpper) cDist = (new_xis[0] - xis[0])**2 for j in range(1, xi_size): cDist += (new_xis[j] - xis[j])**2 redCDist = cDist[cDist < req_match] if len(redCDist): if not find_minimum: new_xis[direction_num][cDist > req_match] = -10000000 currXiExtrema = (new_xis[direction_num]).max() idx = (new_xis[direction_num]).argmax() else: new_xis[direction_num][cDist > req_match] = 10000000 currXiExtrema = (new_xis[direction_num]).min() idx = (new_xis[direction_num]).argmin() if ( ((not find_minimum) and (currXiExtrema > xiextrema)) or \ (find_minimum and (currXiExtrema < xiextrema)) ): xiextrema = currXiExtrema bestMasses[0] = totmass[idx] bestMasses[1] = eta[idx] bestMasses[2] = spin1z[idx] bestMasses[3] = spin2z[idx] bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.) return xiextrema
[ "def", "find_xi_extrema_brute", "(", "xis", ",", "bestMasses", ",", "bestXis", ",", "direction_num", ",", "req_match", ",", "massRangeParams", ",", "metricParams", ",", "fUpper", ",", "find_minimum", "=", "False", ",", "scaleFactor", "=", "0.8", ",", "numIterati...
This function is used to find the largest or smallest value of the xi space in a specified dimension at a specified point in the higher dimensions. It does this by iteratively throwing points at the space to find extrema. Parameters ----------- xis : list or array Position in the xi space at which to assess the depth. This can be only a subset of the higher dimensions than that being sampled. bestMasses : list Contains [totalMass, eta, spin1z, spin2z]. Is a physical position mapped to xi coordinates in bestXis that is close to the xis point. This is aimed to give the code a starting point. bestXis : list Contains the position of bestMasses in the xi coordinate system. direction_num : int The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...) req_match : float When considering points to assess the depth with, only consider points with a mismatch that is smaller than this with xis. massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper that was used when obtaining the xi_i coordinates. This lets us know how to rotate potential physical points into the correct xi_i space. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) find_minimum : boolean, optional (default = False) If True, find the minimum value of the xi direction. If False find the maximum value. scaleFactor : float, optional (default = 0.8) The value of the scale factor that is used when calling pycbc.tmpltbank.get_mass_distribution. numIterations : int, optional (default = 3000) The number of times to make calls to get_mass_distribution when assessing the maximum/minimum of this parameter space. Making this smaller makes the code faster, but at the cost of accuracy. Returns -------- xi_extent : float The extremal value of the specified dimension at the specified point in parameter space.
[ "This", "function", "is", "used", "to", "find", "the", "largest", "or", "smallest", "value", "of", "the", "xi", "space", "in", "a", "specified", "dimension", "at", "a", "specified", "point", "in", "the", "higher", "dimensions", ".", "It", "does", "this", ...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/brute_force_methods.py#L421-L515
227,929
gwastro/pycbc
pycbc/pool.py
is_main_process
def is_main_process(): """ Check if this is the main control process and may handle one time tasks """ try: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() return rank == 0 except (ImportError, ValueError, RuntimeError): return True
python
def is_main_process(): try: from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() return rank == 0 except (ImportError, ValueError, RuntimeError): return True
[ "def", "is_main_process", "(", ")", ":", "try", ":", "from", "mpi4py", "import", "MPI", "comm", "=", "MPI", ".", "COMM_WORLD", "rank", "=", "comm", ".", "Get_rank", "(", ")", "return", "rank", "==", "0", "except", "(", "ImportError", ",", "ValueError", ...
Check if this is the main control process and may handle one time tasks
[ "Check", "if", "this", "is", "the", "main", "control", "process", "and", "may", "handle", "one", "time", "tasks" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pool.py#L11-L20
227,930
gwastro/pycbc
pycbc/pool.py
_lockstep_fcn
def _lockstep_fcn(values): """ Wrapper to ensure that all processes execute together """ numrequired, fcn, args = values with _process_lock: _numdone.value += 1 # yep this is an ugly busy loop, do something better please # when we care about the performance of this call and not just the # guarantee it provides (ok... maybe never) while 1: if _numdone.value == numrequired: return fcn(args)
python
def _lockstep_fcn(values): numrequired, fcn, args = values with _process_lock: _numdone.value += 1 # yep this is an ugly busy loop, do something better please # when we care about the performance of this call and not just the # guarantee it provides (ok... maybe never) while 1: if _numdone.value == numrequired: return fcn(args)
[ "def", "_lockstep_fcn", "(", "values", ")", ":", "numrequired", ",", "fcn", ",", "args", "=", "values", "with", "_process_lock", ":", "_numdone", ".", "value", "+=", "1", "# yep this is an ugly busy loop, do something better please", "# when we care about the performance ...
Wrapper to ensure that all processes execute together
[ "Wrapper", "to", "ensure", "that", "all", "processes", "execute", "together" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pool.py#L31-L41
227,931
gwastro/pycbc
pycbc/pool.py
BroadcastPool.broadcast
def broadcast(self, fcn, args): """ Do a function call on every worker. Parameters ---------- fcn: funtion Function to call. args: tuple The arguments for Pool.map """ results = self.map(_lockstep_fcn, [(len(self), fcn, args)] * len(self)) _numdone.value = 0 return results
python
def broadcast(self, fcn, args): results = self.map(_lockstep_fcn, [(len(self), fcn, args)] * len(self)) _numdone.value = 0 return results
[ "def", "broadcast", "(", "self", ",", "fcn", ",", "args", ")", ":", "results", "=", "self", ".", "map", "(", "_lockstep_fcn", ",", "[", "(", "len", "(", "self", ")", ",", "fcn", ",", "args", ")", "]", "*", "len", "(", "self", ")", ")", "_numdon...
Do a function call on every worker. Parameters ---------- fcn: funtion Function to call. args: tuple The arguments for Pool.map
[ "Do", "a", "function", "call", "on", "every", "worker", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pool.py#L62-L74
227,932
gwastro/pycbc
pycbc/pool.py
BroadcastPool.allmap
def allmap(self, fcn, args): """ Do a function call on every worker with different arguments Parameters ---------- fcn: funtion Function to call. args: tuple The arguments for Pool.map """ results = self.map(_lockstep_fcn, [(len(self), fcn, arg) for arg in args]) _numdone.value = 0 return results
python
def allmap(self, fcn, args): results = self.map(_lockstep_fcn, [(len(self), fcn, arg) for arg in args]) _numdone.value = 0 return results
[ "def", "allmap", "(", "self", ",", "fcn", ",", "args", ")", ":", "results", "=", "self", ".", "map", "(", "_lockstep_fcn", ",", "[", "(", "len", "(", "self", ")", ",", "fcn", ",", "arg", ")", "for", "arg", "in", "args", "]", ")", "_numdone", "....
Do a function call on every worker with different arguments Parameters ---------- fcn: funtion Function to call. args: tuple The arguments for Pool.map
[ "Do", "a", "function", "call", "on", "every", "worker", "with", "different", "arguments" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pool.py#L76-L89
227,933
gwastro/pycbc
pycbc/pool.py
BroadcastPool.map
def map(self, func, items, chunksize=None): """ Catch keyboard interuppts to allow the pool to exit cleanly. Parameters ---------- func: function Function to call items: list of tuples Arguments to pass chunksize: int, Optional Number of calls for each process to handle at once """ results = self.map_async(func, items, chunksize) while True: try: return results.get(1800) except TimeoutError: pass except KeyboardInterrupt: self.terminate() self.join() raise KeyboardInterrupt
python
def map(self, func, items, chunksize=None): results = self.map_async(func, items, chunksize) while True: try: return results.get(1800) except TimeoutError: pass except KeyboardInterrupt: self.terminate() self.join() raise KeyboardInterrupt
[ "def", "map", "(", "self", ",", "func", ",", "items", ",", "chunksize", "=", "None", ")", ":", "results", "=", "self", ".", "map_async", "(", "func", ",", "items", ",", "chunksize", ")", "while", "True", ":", "try", ":", "return", "results", ".", "...
Catch keyboard interuppts to allow the pool to exit cleanly. Parameters ---------- func: function Function to call items: list of tuples Arguments to pass chunksize: int, Optional Number of calls for each process to handle at once
[ "Catch", "keyboard", "interuppts", "to", "allow", "the", "pool", "to", "exit", "cleanly", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/pool.py#L91-L112
227,934
gwastro/pycbc
pycbc/filter/resample.py
lfilter
def lfilter(coefficients, timeseries): """ Apply filter coefficients to a time series Parameters ---------- coefficients: numpy.ndarray Filter coefficients to apply timeseries: numpy.ndarray Time series to be filtered. Returns ------- tseries: numpy.ndarray filtered array """ from pycbc.filter import correlate # If there aren't many points just use the default scipy method if len(timeseries) < 2**7: if hasattr(timeseries, 'numpy'): timeseries = timeseries.numpy() series = scipy.signal.lfilter(coefficients, 1.0, timeseries) return series else: cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype) cseries.resize(len(timeseries)) cseries.roll(len(timeseries) - len(coefficients) + 1) timeseries = Array(timeseries, copy=False) flen = len(cseries) / 2 + 1 ftype = complex_same_precision_as(timeseries) cfreq = zeros(flen, dtype=ftype) tfreq = zeros(flen, dtype=ftype) fft(Array(cseries), cfreq) fft(Array(timeseries), tfreq) cout = zeros(flen, ftype) out = zeros(len(timeseries), dtype=timeseries) correlate(cfreq, tfreq, cout) ifft(cout, out) return out.numpy() / len(out)
python
def lfilter(coefficients, timeseries): from pycbc.filter import correlate # If there aren't many points just use the default scipy method if len(timeseries) < 2**7: if hasattr(timeseries, 'numpy'): timeseries = timeseries.numpy() series = scipy.signal.lfilter(coefficients, 1.0, timeseries) return series else: cseries = (Array(coefficients[::-1] * 1)).astype(timeseries.dtype) cseries.resize(len(timeseries)) cseries.roll(len(timeseries) - len(coefficients) + 1) timeseries = Array(timeseries, copy=False) flen = len(cseries) / 2 + 1 ftype = complex_same_precision_as(timeseries) cfreq = zeros(flen, dtype=ftype) tfreq = zeros(flen, dtype=ftype) fft(Array(cseries), cfreq) fft(Array(timeseries), tfreq) cout = zeros(flen, ftype) out = zeros(len(timeseries), dtype=timeseries) correlate(cfreq, tfreq, cout) ifft(cout, out) return out.numpy() / len(out)
[ "def", "lfilter", "(", "coefficients", ",", "timeseries", ")", ":", "from", "pycbc", ".", "filter", "import", "correlate", "# If there aren't many points just use the default scipy method", "if", "len", "(", "timeseries", ")", "<", "2", "**", "7", ":", "if", "hasa...
Apply filter coefficients to a time series Parameters ---------- coefficients: numpy.ndarray Filter coefficients to apply timeseries: numpy.ndarray Time series to be filtered. Returns ------- tseries: numpy.ndarray filtered array
[ "Apply", "filter", "coefficients", "to", "a", "time", "series" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/resample.py#L34-L78
227,935
gwastro/pycbc
pycbc/filter/resample.py
resample_to_delta_t
def resample_to_delta_t(timeseries, delta_t, method='butterworth'): """Resmple the time_series to delta_t Resamples the TimeSeries instance time_series to the given time step, delta_t. Only powers of two and real valued time series are supported at this time. Additional restrictions may apply to particular filter methods. Parameters ---------- time_series: TimeSeries The time series to be resampled delta_t: float The desired time step Returns ------- Time Series: TimeSeries A TimeSeries that has been resampled to delta_t. Raises ------ TypeError: time_series is not an instance of TimeSeries. TypeError: time_series is not real valued Examples -------- >>> h_plus_sampled = resample_to_delta_t(h_plus, 1.0/2048) """ if not isinstance(timeseries,TimeSeries): raise TypeError("Can only resample time series") if timeseries.kind is not 'real': raise TypeError("Time series must be real") if timeseries.delta_t == delta_t: return timeseries * 1 if method == 'butterworth': lal_data = timeseries.lal() _resample_func[timeseries.dtype](lal_data, delta_t) data = lal_data.data.data elif method == 'ldas': factor = int(delta_t / timeseries.delta_t) numtaps = factor * 20 + 1 # The kaiser window has been testing using the LDAS implementation # and is in the same configuration as used in the original lalinspiral filter_coefficients = scipy.signal.firwin(numtaps, 1.0 / factor, window=('kaiser', 5)) # apply the filter and decimate data = fir_zero_filter(filter_coefficients, timeseries)[::factor] else: raise ValueError('Invalid resampling method: %s' % method) ts = TimeSeries(data, delta_t = delta_t, dtype=timeseries.dtype, epoch=timeseries._epoch) # From the construction of the LDAS FIR filter there will be 10 corrupted samples # explanation here http://software.ligo.org/docs/lalsuite/lal/group___resample_time_series__c.html ts.corrupted_samples = 10 return ts
python
def resample_to_delta_t(timeseries, delta_t, method='butterworth'): if not isinstance(timeseries,TimeSeries): raise TypeError("Can only resample time series") if timeseries.kind is not 'real': raise TypeError("Time series must be real") if timeseries.delta_t == delta_t: return timeseries * 1 if method == 'butterworth': lal_data = timeseries.lal() _resample_func[timeseries.dtype](lal_data, delta_t) data = lal_data.data.data elif method == 'ldas': factor = int(delta_t / timeseries.delta_t) numtaps = factor * 20 + 1 # The kaiser window has been testing using the LDAS implementation # and is in the same configuration as used in the original lalinspiral filter_coefficients = scipy.signal.firwin(numtaps, 1.0 / factor, window=('kaiser', 5)) # apply the filter and decimate data = fir_zero_filter(filter_coefficients, timeseries)[::factor] else: raise ValueError('Invalid resampling method: %s' % method) ts = TimeSeries(data, delta_t = delta_t, dtype=timeseries.dtype, epoch=timeseries._epoch) # From the construction of the LDAS FIR filter there will be 10 corrupted samples # explanation here http://software.ligo.org/docs/lalsuite/lal/group___resample_time_series__c.html ts.corrupted_samples = 10 return ts
[ "def", "resample_to_delta_t", "(", "timeseries", ",", "delta_t", ",", "method", "=", "'butterworth'", ")", ":", "if", "not", "isinstance", "(", "timeseries", ",", "TimeSeries", ")", ":", "raise", "TypeError", "(", "\"Can only resample time series\"", ")", "if", ...
Resmple the time_series to delta_t Resamples the TimeSeries instance time_series to the given time step, delta_t. Only powers of two and real valued time series are supported at this time. Additional restrictions may apply to particular filter methods. Parameters ---------- time_series: TimeSeries The time series to be resampled delta_t: float The desired time step Returns ------- Time Series: TimeSeries A TimeSeries that has been resampled to delta_t. Raises ------ TypeError: time_series is not an instance of TimeSeries. TypeError: time_series is not real valued Examples -------- >>> h_plus_sampled = resample_to_delta_t(h_plus, 1.0/2048)
[ "Resmple", "the", "time_series", "to", "delta_t" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/resample.py#L107-L175
227,936
gwastro/pycbc
pycbc/filter/resample.py
highpass
def highpass(timeseries, frequency, filter_order=8, attenuation=0.1): """Return a new timeseries that is highpassed. Return a new time series that is highpassed above the `frequency`. Parameters ---------- Time Series: TimeSeries The time series to be high-passed. frequency: float The frequency below which is suppressed. filter_order: {8, int}, optional The order of the filter to use when high-passing the time series. attenuation: {0.1, float}, optional The attenuation of the filter. Returns ------- Time Series: TimeSeries A new TimeSeries that has been high-passed. Raises ------ TypeError: time_series is not an instance of TimeSeries. TypeError: time_series is not real valued """ if not isinstance(timeseries, TimeSeries): raise TypeError("Can only resample time series") if timeseries.kind is not 'real': raise TypeError("Time series must be real") lal_data = timeseries.lal() _highpass_func[timeseries.dtype](lal_data, frequency, 1-attenuation, filter_order) return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT, dtype=timeseries.dtype, epoch=timeseries._epoch)
python
def highpass(timeseries, frequency, filter_order=8, attenuation=0.1): if not isinstance(timeseries, TimeSeries): raise TypeError("Can only resample time series") if timeseries.kind is not 'real': raise TypeError("Time series must be real") lal_data = timeseries.lal() _highpass_func[timeseries.dtype](lal_data, frequency, 1-attenuation, filter_order) return TimeSeries(lal_data.data.data, delta_t = lal_data.deltaT, dtype=timeseries.dtype, epoch=timeseries._epoch)
[ "def", "highpass", "(", "timeseries", ",", "frequency", ",", "filter_order", "=", "8", ",", "attenuation", "=", "0.1", ")", ":", "if", "not", "isinstance", "(", "timeseries", ",", "TimeSeries", ")", ":", "raise", "TypeError", "(", "\"Can only resample time ser...
Return a new timeseries that is highpassed. Return a new time series that is highpassed above the `frequency`. Parameters ---------- Time Series: TimeSeries The time series to be high-passed. frequency: float The frequency below which is suppressed. filter_order: {8, int}, optional The order of the filter to use when high-passing the time series. attenuation: {0.1, float}, optional The attenuation of the filter. Returns ------- Time Series: TimeSeries A new TimeSeries that has been high-passed. Raises ------ TypeError: time_series is not an instance of TimeSeries. TypeError: time_series is not real valued
[ "Return", "a", "new", "timeseries", "that", "is", "highpassed", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/resample.py#L252-L293
227,937
gwastro/pycbc
pycbc/filter/resample.py
interpolate_complex_frequency
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'): """Interpolate complex frequency series to desired delta_f. Return a new complex frequency series that has been interpolated to the desired delta_f. Parameters ---------- series : FrequencySeries Frequency series to be interpolated. delta_f : float The desired delta_f of the output zeros_offset : optional, {0, int} Number of sample to delay the start of the zero padding side : optional, {'right', str} The side of the vector to zero pad Returns ------- interpolated series : FrequencySeries A new FrequencySeries that has been interpolated. """ new_n = int( (len(series)-1) * series.delta_f / delta_f + 1) old_N = int( (len(series)-1) * 2 ) new_N = int( (new_n - 1) * 2 ) time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N), dtype=real_same_precision_as(series)) ifft(series, time_series) time_series.roll(-zeros_offset) time_series.resize(new_N) if side == 'left': time_series.roll(zeros_offset + new_N - old_N) elif side == 'right': time_series.roll(zeros_offset) out_series = FrequencySeries(zeros(new_n), epoch=series.epoch, delta_f=delta_f, dtype=series.dtype) fft(time_series, out_series) return out_series
python
def interpolate_complex_frequency(series, delta_f, zeros_offset=0, side='right'): new_n = int( (len(series)-1) * series.delta_f / delta_f + 1) old_N = int( (len(series)-1) * 2 ) new_N = int( (new_n - 1) * 2 ) time_series = TimeSeries(zeros(old_N), delta_t =1.0/(series.delta_f*old_N), dtype=real_same_precision_as(series)) ifft(series, time_series) time_series.roll(-zeros_offset) time_series.resize(new_N) if side == 'left': time_series.roll(zeros_offset + new_N - old_N) elif side == 'right': time_series.roll(zeros_offset) out_series = FrequencySeries(zeros(new_n), epoch=series.epoch, delta_f=delta_f, dtype=series.dtype) fft(time_series, out_series) return out_series
[ "def", "interpolate_complex_frequency", "(", "series", ",", "delta_f", ",", "zeros_offset", "=", "0", ",", "side", "=", "'right'", ")", ":", "new_n", "=", "int", "(", "(", "len", "(", "series", ")", "-", "1", ")", "*", "series", ".", "delta_f", "/", ...
Interpolate complex frequency series to desired delta_f. Return a new complex frequency series that has been interpolated to the desired delta_f. Parameters ---------- series : FrequencySeries Frequency series to be interpolated. delta_f : float The desired delta_f of the output zeros_offset : optional, {0, int} Number of sample to delay the start of the zero padding side : optional, {'right', str} The side of the vector to zero pad Returns ------- interpolated series : FrequencySeries A new FrequencySeries that has been interpolated.
[ "Interpolate", "complex", "frequency", "series", "to", "desired", "delta_f", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/resample.py#L295-L337
227,938
gwastro/pycbc
pycbc/events/coinc_rate.py
multiifo_noise_coinc_rate
def multiifo_noise_coinc_rate(rates, slop): """ Calculate the expected rate of noise coincidences for multiple detectors Parameters ---------- rates: dict Dictionary keyed on ifo string Value is a sequence of single-detector trigger rates, units assumed to be Hz slop: float time added to maximum time-of-flight between detectors to account for timing error Returns ------- expected_coinc_rates: dict Dictionary keyed on the ifo combination string Value is expected coincidence rate in the combination, units Hz """ ifos = numpy.array(sorted(rates.keys())) rates_raw = list(rates[ifo] for ifo in ifos) expected_coinc_rates = {} # Calculate coincidence for all-ifo combination # multiply product of trigger rates by the overlap time allowed_area = multiifo_noise_coincident_area(ifos, slop) rateprod = [numpy.prod(rs) for rs in zip(*rates_raw)] ifostring = ' '.join(ifos) expected_coinc_rates[ifostring] = allowed_area * numpy.array(rateprod) # if more than one possible coincidence type exists, # calculate coincidence for subsets through recursion if len(ifos) > 2: # Calculate rate for each 'miss-one-out' detector combination subsets = itertools.combinations(ifos, len(ifos) - 1) for subset in subsets: rates_subset = {} for ifo in subset: rates_subset[ifo] = rates[ifo] sub_coinc_rates = multiifo_noise_coinc_rate(rates_subset, slop) # add these sub-coincidences to the overall dictionary for sub_coinc in sub_coinc_rates: expected_coinc_rates[sub_coinc] = sub_coinc_rates[sub_coinc] return expected_coinc_rates
python
def multiifo_noise_coinc_rate(rates, slop): ifos = numpy.array(sorted(rates.keys())) rates_raw = list(rates[ifo] for ifo in ifos) expected_coinc_rates = {} # Calculate coincidence for all-ifo combination # multiply product of trigger rates by the overlap time allowed_area = multiifo_noise_coincident_area(ifos, slop) rateprod = [numpy.prod(rs) for rs in zip(*rates_raw)] ifostring = ' '.join(ifos) expected_coinc_rates[ifostring] = allowed_area * numpy.array(rateprod) # if more than one possible coincidence type exists, # calculate coincidence for subsets through recursion if len(ifos) > 2: # Calculate rate for each 'miss-one-out' detector combination subsets = itertools.combinations(ifos, len(ifos) - 1) for subset in subsets: rates_subset = {} for ifo in subset: rates_subset[ifo] = rates[ifo] sub_coinc_rates = multiifo_noise_coinc_rate(rates_subset, slop) # add these sub-coincidences to the overall dictionary for sub_coinc in sub_coinc_rates: expected_coinc_rates[sub_coinc] = sub_coinc_rates[sub_coinc] return expected_coinc_rates
[ "def", "multiifo_noise_coinc_rate", "(", "rates", ",", "slop", ")", ":", "ifos", "=", "numpy", ".", "array", "(", "sorted", "(", "rates", ".", "keys", "(", ")", ")", ")", "rates_raw", "=", "list", "(", "rates", "[", "ifo", "]", "for", "ifo", "in", ...
Calculate the expected rate of noise coincidences for multiple detectors Parameters ---------- rates: dict Dictionary keyed on ifo string Value is a sequence of single-detector trigger rates, units assumed to be Hz slop: float time added to maximum time-of-flight between detectors to account for timing error Returns ------- expected_coinc_rates: dict Dictionary keyed on the ifo combination string Value is expected coincidence rate in the combination, units Hz
[ "Calculate", "the", "expected", "rate", "of", "noise", "coincidences", "for", "multiple", "detectors" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/coinc_rate.py#L17-L62
227,939
gwastro/pycbc
pycbc/events/coinc_rate.py
multiifo_noise_coincident_area
def multiifo_noise_coincident_area(ifos, slop): """ calculate the total extent of time offset between 2 detectors, or area of the 2d space of time offsets for 3 detectors, for which a coincidence can be generated Parameters ---------- ifos: list of strings list of interferometers slop: float extra time to add to maximum time-of-flight for timing error Returns ------- allowed_area: float area in units of seconds^(n_ifos-1) that coincident values can fall in """ # set up detector objects dets = {} for ifo in ifos: dets[ifo] = pycbc.detector.Detector(ifo) n_ifos = len(ifos) if n_ifos == 2: allowed_area = 2. * \ (dets[ifos[0]].light_travel_time_to_detector(dets[ifos[1]]) + slop) elif n_ifos == 3: tofs = numpy.zeros(n_ifos) ifo2_num = [] # calculate travel time between detectors (plus extra for timing error) # TO DO: allow for different timing errors between different detectors for i, ifo in enumerate(ifos): ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) det0 = dets[ifo] det1 = dets[ifos[ifo2_num[i]]] tofs[i] = det0.light_travel_time_to_detector(det1) + slop # combine these to calculate allowed area allowed_area = 0 for i, _ in enumerate(ifos): allowed_area += 2 * tofs[i] * tofs[ifo2_num[i]] - tofs[i]**2 else: raise NotImplementedError("Not able to deal with more than 3 ifos") return allowed_area
python
def multiifo_noise_coincident_area(ifos, slop): # set up detector objects dets = {} for ifo in ifos: dets[ifo] = pycbc.detector.Detector(ifo) n_ifos = len(ifos) if n_ifos == 2: allowed_area = 2. * \ (dets[ifos[0]].light_travel_time_to_detector(dets[ifos[1]]) + slop) elif n_ifos == 3: tofs = numpy.zeros(n_ifos) ifo2_num = [] # calculate travel time between detectors (plus extra for timing error) # TO DO: allow for different timing errors between different detectors for i, ifo in enumerate(ifos): ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) det0 = dets[ifo] det1 = dets[ifos[ifo2_num[i]]] tofs[i] = det0.light_travel_time_to_detector(det1) + slop # combine these to calculate allowed area allowed_area = 0 for i, _ in enumerate(ifos): allowed_area += 2 * tofs[i] * tofs[ifo2_num[i]] - tofs[i]**2 else: raise NotImplementedError("Not able to deal with more than 3 ifos") return allowed_area
[ "def", "multiifo_noise_coincident_area", "(", "ifos", ",", "slop", ")", ":", "# set up detector objects", "dets", "=", "{", "}", "for", "ifo", "in", "ifos", ":", "dets", "[", "ifo", "]", "=", "pycbc", ".", "detector", ".", "Detector", "(", "ifo", ")", "n...
calculate the total extent of time offset between 2 detectors, or area of the 2d space of time offsets for 3 detectors, for which a coincidence can be generated Parameters ---------- ifos: list of strings list of interferometers slop: float extra time to add to maximum time-of-flight for timing error Returns ------- allowed_area: float area in units of seconds^(n_ifos-1) that coincident values can fall in
[ "calculate", "the", "total", "extent", "of", "time", "offset", "between", "2", "detectors", "or", "area", "of", "the", "2d", "space", "of", "time", "offsets", "for", "3", "detectors", "for", "which", "a", "coincidence", "can", "be", "generated" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/coinc_rate.py#L65-L111
227,940
gwastro/pycbc
pycbc/events/coinc_rate.py
multiifo_signal_coincident_area
def multiifo_signal_coincident_area(ifos): """ Calculate the area in which signal time differences are physically allowed Parameters ---------- ifos: list of strings list of interferometers Returns ------- allowed_area: float area in units of seconds^(n_ifos-1) that coincident signals will occupy """ n_ifos = len(ifos) if n_ifos == 2: det0 = pycbc.detector.Detector(ifos[0]) det1 = pycbc.detector.Detector(ifos[1]) allowed_area = 2 * det0.light_travel_time_to_detector(det1) elif n_ifos == 3: dets = {} tofs = numpy.zeros(n_ifos) ifo2_num = [] # set up detector objects for ifo in ifos: dets[ifo] = pycbc.detector.Detector(ifo) # calculate travel time between detectors for i, ifo in enumerate(ifos): ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) det0 = dets[ifo] det1 = dets[ifos[ifo2_num[i]]] tofs[i] = det0.light_travel_time_to_detector(det1) # calculate allowed area phi_12 = numpy.arccos((tofs[0]**2 + tofs[1]**2 - tofs[2]**2) / (2 * tofs[0] * tofs[1])) allowed_area = numpy.pi * tofs[0] * tofs[1] * numpy.sin(phi_12) else: raise NotImplementedError("Not able to deal with more than 3 ifos") return allowed_area
python
def multiifo_signal_coincident_area(ifos): n_ifos = len(ifos) if n_ifos == 2: det0 = pycbc.detector.Detector(ifos[0]) det1 = pycbc.detector.Detector(ifos[1]) allowed_area = 2 * det0.light_travel_time_to_detector(det1) elif n_ifos == 3: dets = {} tofs = numpy.zeros(n_ifos) ifo2_num = [] # set up detector objects for ifo in ifos: dets[ifo] = pycbc.detector.Detector(ifo) # calculate travel time between detectors for i, ifo in enumerate(ifos): ifo2_num.append(int(numpy.mod(i + 1, n_ifos))) det0 = dets[ifo] det1 = dets[ifos[ifo2_num[i]]] tofs[i] = det0.light_travel_time_to_detector(det1) # calculate allowed area phi_12 = numpy.arccos((tofs[0]**2 + tofs[1]**2 - tofs[2]**2) / (2 * tofs[0] * tofs[1])) allowed_area = numpy.pi * tofs[0] * tofs[1] * numpy.sin(phi_12) else: raise NotImplementedError("Not able to deal with more than 3 ifos") return allowed_area
[ "def", "multiifo_signal_coincident_area", "(", "ifos", ")", ":", "n_ifos", "=", "len", "(", "ifos", ")", "if", "n_ifos", "==", "2", ":", "det0", "=", "pycbc", ".", "detector", ".", "Detector", "(", "ifos", "[", "0", "]", ")", "det1", "=", "pycbc", "....
Calculate the area in which signal time differences are physically allowed Parameters ---------- ifos: list of strings list of interferometers Returns ------- allowed_area: float area in units of seconds^(n_ifos-1) that coincident signals will occupy
[ "Calculate", "the", "area", "in", "which", "signal", "time", "differences", "are", "physically", "allowed" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/events/coinc_rate.py#L114-L156
227,941
gwastro/pycbc
pycbc/waveform/ringdown.py
lm_amps_phases
def lm_amps_phases(**kwargs): """ Take input_params and return dictionaries with amplitudes and phases of each overtone of a specific lm mode, checking that all of them are given. """ l, m = kwargs['l'], kwargs['m'] amps, phis = {}, {} # amp220 is always required, because the amplitudes of subdominant modes # are given as fractions of amp220. try: amps['220'] = kwargs['amp220'] except KeyError: raise ValueError('amp220 is always required') # Get amplitudes of subdominant modes and all phases for n in range(kwargs['nmodes']): # If it is the 22 mode, skip 220 if (l, m, n) != (2, 2, 0): try: amps['%d%d%d' %(l,m,n)] = kwargs['amp%d%d%d' %(l,m,n)] * amps['220'] except KeyError: raise ValueError('amp%d%d%d is required' %(l,m,n)) try: phis['%d%d%d' %(l,m,n)] = kwargs['phi%d%d%d' %(l,m,n)] except KeyError: raise ValueError('phi%d%d%d is required' %(l,m,n)) return amps, phis
python
def lm_amps_phases(**kwargs): l, m = kwargs['l'], kwargs['m'] amps, phis = {}, {} # amp220 is always required, because the amplitudes of subdominant modes # are given as fractions of amp220. try: amps['220'] = kwargs['amp220'] except KeyError: raise ValueError('amp220 is always required') # Get amplitudes of subdominant modes and all phases for n in range(kwargs['nmodes']): # If it is the 22 mode, skip 220 if (l, m, n) != (2, 2, 0): try: amps['%d%d%d' %(l,m,n)] = kwargs['amp%d%d%d' %(l,m,n)] * amps['220'] except KeyError: raise ValueError('amp%d%d%d is required' %(l,m,n)) try: phis['%d%d%d' %(l,m,n)] = kwargs['phi%d%d%d' %(l,m,n)] except KeyError: raise ValueError('phi%d%d%d is required' %(l,m,n)) return amps, phis
[ "def", "lm_amps_phases", "(", "*", "*", "kwargs", ")", ":", "l", ",", "m", "=", "kwargs", "[", "'l'", "]", ",", "kwargs", "[", "'m'", "]", "amps", ",", "phis", "=", "{", "}", ",", "{", "}", "# amp220 is always required, because the amplitudes of subdominan...
Take input_params and return dictionaries with amplitudes and phases of each overtone of a specific lm mode, checking that all of them are given.
[ "Take", "input_params", "and", "return", "dictionaries", "with", "amplitudes", "and", "phases", "of", "each", "overtone", "of", "a", "specific", "lm", "mode", "checking", "that", "all", "of", "them", "are", "given", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L68-L94
227,942
gwastro/pycbc
pycbc/waveform/ringdown.py
lm_freqs_taus
def lm_freqs_taus(**kwargs): """ Take input_params and return dictionaries with frequencies and damping times of each overtone of a specific lm mode, checking that all of them are given. """ lmns = kwargs['lmns'] freqs, taus = {}, {} for lmn in lmns: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) for n in range(nmodes): try: freqs['%d%d%d' %(l,m,n)] = kwargs['f_%d%d%d' %(l,m,n)] except KeyError: raise ValueError('f_%d%d%d is required' %(l,m,n)) try: taus['%d%d%d' %(l,m,n)] = kwargs['tau_%d%d%d' %(l,m,n)] except KeyError: raise ValueError('tau_%d%d%d is required' %(l,m,n)) return freqs, taus
python
def lm_freqs_taus(**kwargs): lmns = kwargs['lmns'] freqs, taus = {}, {} for lmn in lmns: l, m, nmodes = int(lmn[0]), int(lmn[1]), int(lmn[2]) for n in range(nmodes): try: freqs['%d%d%d' %(l,m,n)] = kwargs['f_%d%d%d' %(l,m,n)] except KeyError: raise ValueError('f_%d%d%d is required' %(l,m,n)) try: taus['%d%d%d' %(l,m,n)] = kwargs['tau_%d%d%d' %(l,m,n)] except KeyError: raise ValueError('tau_%d%d%d is required' %(l,m,n)) return freqs, taus
[ "def", "lm_freqs_taus", "(", "*", "*", "kwargs", ")", ":", "lmns", "=", "kwargs", "[", "'lmns'", "]", "freqs", ",", "taus", "=", "{", "}", ",", "{", "}", "for", "lmn", "in", "lmns", ":", "l", ",", "m", ",", "nmodes", "=", "int", "(", "lmn", "...
Take input_params and return dictionaries with frequencies and damping times of each overtone of a specific lm mode, checking that all of them are given.
[ "Take", "input_params", "and", "return", "dictionaries", "with", "frequencies", "and", "damping", "times", "of", "each", "overtone", "of", "a", "specific", "lm", "mode", "checking", "that", "all", "of", "them", "are", "given", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L96-L116
227,943
gwastro/pycbc
pycbc/waveform/ringdown.py
qnm_freq_decay
def qnm_freq_decay(f_0, tau, decay): """Return the frequency at which the amplitude of the ringdown falls to decay of the peak amplitude. Parameters ---------- f_0 : float The ringdown-frequency, which gives the peak amplitude. tau : float The damping time of the sinusoid. decay: float The fraction of the peak amplitude. Returns ------- f_decay: float The frequency at which the amplitude of the frequency-domain ringdown falls to decay of the peak amplitude. """ q_0 = pi * f_0 * tau alpha = 1. / decay alpha_sq = 1. / decay / decay # Expression obtained analytically under the assumption # that 1./alpha_sq, q_0^2 >> 1 q_sq = (alpha_sq + 4*q_0*q_0 + alpha*numpy.sqrt(alpha_sq + 16*q_0*q_0)) / 4. return numpy.sqrt(q_sq) / pi / tau
python
def qnm_freq_decay(f_0, tau, decay): q_0 = pi * f_0 * tau alpha = 1. / decay alpha_sq = 1. / decay / decay # Expression obtained analytically under the assumption # that 1./alpha_sq, q_0^2 >> 1 q_sq = (alpha_sq + 4*q_0*q_0 + alpha*numpy.sqrt(alpha_sq + 16*q_0*q_0)) / 4. return numpy.sqrt(q_sq) / pi / tau
[ "def", "qnm_freq_decay", "(", "f_0", ",", "tau", ",", "decay", ")", ":", "q_0", "=", "pi", "*", "f_0", "*", "tau", "alpha", "=", "1.", "/", "decay", "alpha_sq", "=", "1.", "/", "decay", "/", "decay", "# Expression obtained analytically under the assumption",...
Return the frequency at which the amplitude of the ringdown falls to decay of the peak amplitude. Parameters ---------- f_0 : float The ringdown-frequency, which gives the peak amplitude. tau : float The damping time of the sinusoid. decay: float The fraction of the peak amplitude. Returns ------- f_decay: float The frequency at which the amplitude of the frequency-domain ringdown falls to decay of the peak amplitude.
[ "Return", "the", "frequency", "at", "which", "the", "amplitude", "of", "the", "ringdown", "falls", "to", "decay", "of", "the", "peak", "amplitude", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L140-L167
227,944
gwastro/pycbc
pycbc/waveform/ringdown.py
spher_harms
def spher_harms(l, m, inclination): """Return spherical harmonic polarizations """ # FIXME: we are using spin -2 weighted spherical harmonics for now, # when possible switch to spheroidal harmonics. Y_lm = lal.SpinWeightedSphericalHarmonic(inclination, 0., -2, l, m).real Y_lminusm = lal.SpinWeightedSphericalHarmonic(inclination, 0., -2, l, -m).real Y_plus = Y_lm + (-1)**l * Y_lminusm Y_cross = Y_lm - (-1)**l * Y_lminusm return Y_plus, Y_cross
python
def spher_harms(l, m, inclination): # FIXME: we are using spin -2 weighted spherical harmonics for now, # when possible switch to spheroidal harmonics. Y_lm = lal.SpinWeightedSphericalHarmonic(inclination, 0., -2, l, m).real Y_lminusm = lal.SpinWeightedSphericalHarmonic(inclination, 0., -2, l, -m).real Y_plus = Y_lm + (-1)**l * Y_lminusm Y_cross = Y_lm - (-1)**l * Y_lminusm return Y_plus, Y_cross
[ "def", "spher_harms", "(", "l", ",", "m", ",", "inclination", ")", ":", "# FIXME: we are using spin -2 weighted spherical harmonics for now,", "# when possible switch to spheroidal harmonics.", "Y_lm", "=", "lal", ".", "SpinWeightedSphericalHarmonic", "(", "inclination", ",", ...
Return spherical harmonic polarizations
[ "Return", "spherical", "harmonic", "polarizations" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L237-L248
227,945
gwastro/pycbc
pycbc/waveform/ringdown.py
apply_taper
def apply_taper(delta_t, taper, f_0, tau, amp, phi, l, m, inclination): """Return tapering window. """ # Times of tapering do not include t=0 taper_times = -numpy.arange(1, int(taper*tau/delta_t))[::-1] * delta_t Y_plus, Y_cross = spher_harms(l, m, inclination) taper_hp = amp * Y_plus * numpy.exp(10*taper_times/tau) * \ numpy.cos(two_pi*f_0*taper_times + phi) taper_hc = amp * Y_cross * numpy.exp(10*taper_times/tau) * \ numpy.sin(two_pi*f_0*taper_times + phi) return taper_hp, taper_hc
python
def apply_taper(delta_t, taper, f_0, tau, amp, phi, l, m, inclination): # Times of tapering do not include t=0 taper_times = -numpy.arange(1, int(taper*tau/delta_t))[::-1] * delta_t Y_plus, Y_cross = spher_harms(l, m, inclination) taper_hp = amp * Y_plus * numpy.exp(10*taper_times/tau) * \ numpy.cos(two_pi*f_0*taper_times + phi) taper_hc = amp * Y_cross * numpy.exp(10*taper_times/tau) * \ numpy.sin(two_pi*f_0*taper_times + phi) return taper_hp, taper_hc
[ "def", "apply_taper", "(", "delta_t", ",", "taper", ",", "f_0", ",", "tau", ",", "amp", ",", "phi", ",", "l", ",", "m", ",", "inclination", ")", ":", "# Times of tapering do not include t=0", "taper_times", "=", "-", "numpy", ".", "arange", "(", "1", ","...
Return tapering window.
[ "Return", "tapering", "window", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L264-L276
227,946
gwastro/pycbc
pycbc/waveform/ringdown.py
get_td_qnm
def get_td_qnm(template=None, taper=None, **kwargs): """Return a time domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi) inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude. t_final : {None, float}, optional The ending time of the output time series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplus: TimeSeries The plus phase of the ringdown in time domain. hcross: TimeSeries The cross phase of the ringdown in time domain. """ input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = 1. / qnm_freq_decay(f_0, tau, 1./1000) if delta_t < min_dt: delta_t = min_dt if not t_final: t_final = qnm_time_decay(tau, 1./1000) kmax = int(t_final / delta_t) + 1 times = numpy.arange(kmax) * delta_t if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 hplus = amp * Y_plus * numpy.exp(-times/tau) * \ numpy.cos(two_pi*f_0*times + phi) hcross = amp * Y_cross * numpy.exp(-times/tau) * \ numpy.sin(two_pi*f_0*times + phi) if taper and delta_t < taper*tau: taper_window = int(taper*tau/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax), delta_t=delta_t) outcross = TimeSeries(zeros(kmax), delta_t=delta_t) # If size of tapering window is less than delta_t, do not apply taper. if not taper or delta_t > taper*tau: outplus.data[:kmax] = hplus outcross.data[:kmax] = hcross return outplus, outcross else: taper_hp, taper_hc = apply_taper(delta_t, taper, f_0, tau, amp, phi, l, m, inc) start = - taper * tau outplus.data[:taper_window] = taper_hp outplus.data[taper_window:] = hplus outcross.data[:taper_window] = taper_hc outcross.data[taper_window:] = hcross outplus._epoch, outcross._epoch = start, start return outplus, outcross
python
def get_td_qnm(template=None, taper=None, **kwargs): input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = 1. / qnm_freq_decay(f_0, tau, 1./1000) if delta_t < min_dt: delta_t = min_dt if not t_final: t_final = qnm_time_decay(tau, 1./1000) kmax = int(t_final / delta_t) + 1 times = numpy.arange(kmax) * delta_t if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 hplus = amp * Y_plus * numpy.exp(-times/tau) * \ numpy.cos(two_pi*f_0*times + phi) hcross = amp * Y_cross * numpy.exp(-times/tau) * \ numpy.sin(two_pi*f_0*times + phi) if taper and delta_t < taper*tau: taper_window = int(taper*tau/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax), delta_t=delta_t) outcross = TimeSeries(zeros(kmax), delta_t=delta_t) # If size of tapering window is less than delta_t, do not apply taper. if not taper or delta_t > taper*tau: outplus.data[:kmax] = hplus outcross.data[:kmax] = hcross return outplus, outcross else: taper_hp, taper_hc = apply_taper(delta_t, taper, f_0, tau, amp, phi, l, m, inc) start = - taper * tau outplus.data[:taper_window] = taper_hp outplus.data[taper_window:] = hplus outcross.data[:taper_window] = taper_hc outcross.data[taper_window:] = hcross outplus._epoch, outcross._epoch = start, start return outplus, outcross
[ "def", "get_td_qnm", "(", "template", "=", "None", ",", "taper", "=", "None", ",", "*", "*", "kwargs", ")", ":", "input_params", "=", "props", "(", "template", ",", "qnm_required_args", ",", "*", "*", "kwargs", ")", "f_0", "=", "input_params", ".", "po...
Return a time domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi) inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude. t_final : {None, float}, optional The ending time of the output time series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplus: TimeSeries The plus phase of the ringdown in time domain. hcross: TimeSeries The cross phase of the ringdown in time domain.
[ "Return", "a", "time", "domain", "damped", "sinusoid", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L296-L399
227,947
gwastro/pycbc
pycbc/waveform/ringdown.py
get_fd_qnm
def get_fd_qnm(template=None, **kwargs): """Return a frequency domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. t_0 : {0, float}, optional The starting time of the ringdown. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude. f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplustilde: FrequencySeries The plus phase of the ringdown in frequency domain. hcrosstilde: FrequencySeries The cross phase of the ringdown in frequency domain. """ input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following have defaults, and so will be populated t_0 = input_params.pop('t_0') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = 1. / qnm_time_decay(tau, 1./1000) if not f_lower: f_lower = delta_f kmin = 0 else: kmin = int(f_lower / delta_f) if not f_final: f_final = qnm_freq_decay(f_0, tau, 1./1000) if f_final > max_freq: f_final = max_freq kmax = int(f_final / delta_f) + 1 freqs = numpy.arange(kmin, kmax)*delta_f if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 denominator = 1 + (4j * pi * freqs * tau) - (4 * pi_sq * ( freqs*freqs - f_0*f_0) * tau*tau) norm = amp * tau / denominator if t_0 != 0: time_shift = numpy.exp(-1j * two_pi * freqs * t_0) norm *= time_shift # Analytical expression for the Fourier transform of the ringdown (damped sinusoid) hp_tilde = norm * Y_plus * ( (1 + 2j * pi * freqs * tau) * numpy.cos(phi) - two_pi * f_0 * tau * numpy.sin(phi) ) hc_tilde = norm * Y_cross * ( (1 + 2j * pi * freqs * tau) * numpy.sin(phi) + two_pi * f_0 * tau * numpy.cos(phi) ) outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outplus.data[kmin:kmax] = hp_tilde outcross.data[kmin:kmax] = hc_tilde return outplus, outcross
python
def get_fd_qnm(template=None, **kwargs): input_params = props(template, qnm_required_args, **kwargs) f_0 = input_params.pop('f_0') tau = input_params.pop('tau') amp = input_params.pop('amp') phi = input_params.pop('phi') # the following have defaults, and so will be populated t_0 = input_params.pop('t_0') # the following may not be in input_params inc = input_params.pop('inclination', None) l = input_params.pop('l', 2) m = input_params.pop('m', 2) delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = 1. / qnm_time_decay(tau, 1./1000) if not f_lower: f_lower = delta_f kmin = 0 else: kmin = int(f_lower / delta_f) if not f_final: f_final = qnm_freq_decay(f_0, tau, 1./1000) if f_final > max_freq: f_final = max_freq kmax = int(f_final / delta_f) + 1 freqs = numpy.arange(kmin, kmax)*delta_f if inc is not None: Y_plus, Y_cross = spher_harms(l, m, inc) else: Y_plus, Y_cross = 1, 1 denominator = 1 + (4j * pi * freqs * tau) - (4 * pi_sq * ( freqs*freqs - f_0*f_0) * tau*tau) norm = amp * tau / denominator if t_0 != 0: time_shift = numpy.exp(-1j * two_pi * freqs * t_0) norm *= time_shift # Analytical expression for the Fourier transform of the ringdown (damped sinusoid) hp_tilde = norm * Y_plus * ( (1 + 2j * pi * freqs * tau) * numpy.cos(phi) - two_pi * f_0 * tau * numpy.sin(phi) ) hc_tilde = norm * Y_cross * ( (1 + 2j * pi * freqs * tau) * numpy.sin(phi) + two_pi * f_0 * tau * numpy.cos(phi) ) outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outplus.data[kmin:kmax] = hp_tilde outcross.data[kmin:kmax] = hc_tilde return outplus, outcross
[ "def", "get_fd_qnm", "(", "template", "=", "None", ",", "*", "*", "kwargs", ")", ":", "input_params", "=", "props", "(", "template", ",", "qnm_required_args", ",", "*", "*", "kwargs", ")", "f_0", "=", "input_params", ".", "pop", "(", "'f_0'", ")", "tau...
Return a frequency domain damped sinusoid. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. f_0 : float The ringdown-frequency. tau : float The damping time of the sinusoid. amp : float The amplitude of the ringdown (constant for now). phi : float The initial phase of the ringdown. Should also include the information from the azimuthal angle (phi_0 + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. l : {2, int}, optional l mode for the spherical harmonics. Default is l=2. m : {2, int}, optional m mode for the spherical harmonics. Default is m=2. t_0 : {0, float}, optional The starting time of the ringdown. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude. f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude. Returns ------- hplustilde: FrequencySeries The plus phase of the ringdown in frequency domain. hcrosstilde: FrequencySeries The cross phase of the ringdown in frequency domain.
[ "Return", "a", "frequency", "domain", "damped", "sinusoid", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L401-L498
227,948
gwastro/pycbc
pycbc/waveform/ringdown.py
get_td_lm
def get_td_lm(template=None, taper=None, **kwargs): """Return time domain lm mode with the given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. Each overtone will have a different taper depending on its tau, the final taper being the superposition of all the tapers. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amp220 : float Amplitude of the fundamental 220 mode, needed for any lm. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). t_final : {None, float}, optional The ending time of the output time series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplus: TimeSeries The plus phase of a lm mode with overtones (n) in time domain. hcross: TimeSeries The cross phase of a lm mode with overtones (n) in time domain. """ input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') inc = input_params.pop('inclination', None) l, m = input_params.pop('l'), input_params.pop('m') nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = lm_deltat(f_0, tau, ['%d%d%d' %(l,m,nmodes)]) if not t_final: t_final = lm_tfinal(tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(t_final / delta_t) + 1 # Different overtones will have different tapering window-size # Find maximum window size to create long enough output vector if taper: taper_window = int(taper*max(tau.values())/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) if taper: start = - taper * max(tau.values()) outplus._epoch, outcross._epoch = start, start for n in range(nmodes): hplus, hcross = get_td_qnm(template=None, taper=taper, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_t=delta_t, t_final=t_final) if not taper: outplus.data += hplus.data outcross.data += hcross.data else: outplus = taper_shift(hplus, outplus) outcross = taper_shift(hcross, outcross) return outplus, outcross
python
def get_td_lm(template=None, taper=None, **kwargs): input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') inc = input_params.pop('inclination', None) l, m = input_params.pop('l'), input_params.pop('m') nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_t = input_params.pop('delta_t', None) t_final = input_params.pop('t_final', None) if not delta_t: delta_t = lm_deltat(f_0, tau, ['%d%d%d' %(l,m,nmodes)]) if not t_final: t_final = lm_tfinal(tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(t_final / delta_t) + 1 # Different overtones will have different tapering window-size # Find maximum window size to create long enough output vector if taper: taper_window = int(taper*max(tau.values())/delta_t) kmax += taper_window outplus = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) outcross = TimeSeries(zeros(kmax, dtype=float64), delta_t=delta_t) if taper: start = - taper * max(tau.values()) outplus._epoch, outcross._epoch = start, start for n in range(nmodes): hplus, hcross = get_td_qnm(template=None, taper=taper, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_t=delta_t, t_final=t_final) if not taper: outplus.data += hplus.data outcross.data += hcross.data else: outplus = taper_shift(hplus, outplus) outcross = taper_shift(hcross, outcross) return outplus, outcross
[ "def", "get_td_lm", "(", "template", "=", "None", ",", "taper", "=", "None", ",", "*", "*", "kwargs", ")", ":", "input_params", "=", "props", "(", "template", ",", "lm_required_args", ",", "*", "*", "kwargs", ")", "# Get required args", "amps", ",", "phi...
Return time domain lm mode with the given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. taper: {None, float}, optional Tapering at the beginning of the waveform with duration taper * tau. This option is recommended with timescales taper=1./2 or 1. for time-domain ringdown-only injections. The abrupt turn on of the ringdown can cause issues on the waveform when doing the fourier transform to the frequency domain. Setting taper will add a rapid ringup with timescale tau/10. Each overtone will have a different taper depending on its tau, the final taper being the superposition of all the tapers. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amp220 : float Amplitude of the fundamental 220 mode, needed for any lm. amplmn : float Fraction of the amplitude of the lmn overtone relative to the fundamental mode, as many as the number of subdominant modes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. delta_t : {None, float}, optional The time step used to generate the ringdown. If None, it will be set to the inverse of the frequency at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). t_final : {None, float}, optional The ending time of the output time series. If None, it will be set to the time at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplus: TimeSeries The plus phase of a lm mode with overtones (n) in time domain. hcross: TimeSeries The cross phase of a lm mode with overtones (n) in time domain.
[ "Return", "time", "domain", "lm", "mode", "with", "the", "given", "number", "of", "overtones", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L504-L609
227,949
gwastro/pycbc
pycbc/waveform/ringdown.py
get_fd_lm
def get_fd_lm(template=None, **kwargs): """Return frequency domain lm mode with a given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amplmn : float Amplitude of the lmn overtone, as many as the number of nmodes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplustilde: FrequencySeries The plus phase of a lm mode with n overtones in frequency domain. hcrosstilde: FrequencySeries The cross phase of a lm mode with n overtones in frequency domain. """ input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') l, m = input_params.pop('l'), input_params.pop('m') inc = input_params.pop('inclination', None) nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = lm_deltaf(tau, ['%d%d%d' %(l,m,nmodes)]) if not f_final: f_final = lm_ffinal(f_0, tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(f_final / delta_f) + 1 outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) for n in range(nmodes): hplus, hcross = get_fd_qnm(template=None, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_f=delta_f, f_lower=f_lower, f_final=f_final) outplus.data += hplus.data outcross.data += hcross.data return outplus, outcross
python
def get_fd_lm(template=None, **kwargs): input_params = props(template, lm_required_args, **kwargs) # Get required args amps, phis = lm_amps_phases(**input_params) f_0 = input_params.pop('freqs') tau = input_params.pop('taus') l, m = input_params.pop('l'), input_params.pop('m') inc = input_params.pop('inclination', None) nmodes = input_params.pop('nmodes') if int(nmodes) == 0: raise ValueError('Number of overtones (nmodes) must be greater ' 'than zero.') # The following may not be in input_params delta_f = input_params.pop('delta_f', None) f_lower = input_params.pop('f_lower', None) f_final = input_params.pop('f_final', None) if not delta_f: delta_f = lm_deltaf(tau, ['%d%d%d' %(l,m,nmodes)]) if not f_final: f_final = lm_ffinal(f_0, tau, ['%d%d%d' %(l, m, nmodes)]) kmax = int(f_final / delta_f) + 1 outplus = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) outcross = FrequencySeries(zeros(kmax, dtype=complex128), delta_f=delta_f) for n in range(nmodes): hplus, hcross = get_fd_qnm(template=None, f_0=f_0['%d%d%d' %(l,m,n)], tau=tau['%d%d%d' %(l,m,n)], amp=amps['%d%d%d' %(l,m,n)], phi=phis['%d%d%d' %(l,m,n)], inclination=inc, l=l, m=m, delta_f=delta_f, f_lower=f_lower, f_final=f_final) outplus.data += hplus.data outcross.data += hcross.data return outplus, outcross
[ "def", "get_fd_lm", "(", "template", "=", "None", ",", "*", "*", "kwargs", ")", ":", "input_params", "=", "props", "(", "template", ",", "lm_required_args", ",", "*", "*", "kwargs", ")", "# Get required args", "amps", ",", "phis", "=", "lm_amps_phases", "(...
Return frequency domain lm mode with a given number of overtones. Parameters ---------- template: object An object that has attached properties. This can be used to substitute for keyword arguments. A common example would be a row in an xml table. freqs : dict {lmn:f_lmn} Dictionary of the central frequencies for each overtone, as many as number of modes. taus : dict {lmn:tau_lmn} Dictionary of the damping times for each overtone, as many as number of modes. l : int l mode (lm modes available: 22, 21, 33, 44, 55). m : int m mode (lm modes available: 22, 21, 33, 44, 55). nmodes: int Number of overtones desired (maximum n=8) amplmn : float Amplitude of the lmn overtone, as many as the number of nmodes. philmn : float Phase of the lmn overtone, as many as the number of modes. Should also include the information from the azimuthal angle (phi + m*Phi). inclination : {None, float}, optional Inclination of the system in radians for the spherical harmonics. delta_f : {None, float}, optional The frequency step used to generate the ringdown. If None, it will be set to the inverse of the time at which the amplitude is 1/1000 of the peak amplitude (the minimum of all modes). f_lower: {None, float}, optional The starting frequency of the output frequency series. If None, it will be set to delta_f. f_final : {None, float}, optional The ending frequency of the output frequency series. If None, it will be set to the frequency at which the amplitude is 1/1000 of the peak amplitude (the maximum of all modes). Returns ------- hplustilde: FrequencySeries The plus phase of a lm mode with n overtones in frequency domain. hcrosstilde: FrequencySeries The cross phase of a lm mode with n overtones in frequency domain.
[ "Return", "frequency", "domain", "lm", "mode", "with", "a", "given", "number", "of", "overtones", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/ringdown.py#L611-L694
227,950
gwastro/pycbc
pycbc/rate.py
normalize_pdf
def normalize_pdf(mu, pofmu): """ Takes a function pofmu defined at rate sample values mu and normalizes it to be a suitable pdf. Both mu and pofmu must be arrays or lists of the same length. """ if min(pofmu) < 0: raise ValueError("Probabilities cannot be negative, don't ask me to " "normalize a function with negative values!") if min(mu) < 0: raise ValueError("Rates cannot be negative, don't ask me to " "normalize a function over a negative domain!") dp = integral_element(mu, pofmu) return mu, pofmu/sum(dp)
python
def normalize_pdf(mu, pofmu): if min(pofmu) < 0: raise ValueError("Probabilities cannot be negative, don't ask me to " "normalize a function with negative values!") if min(mu) < 0: raise ValueError("Rates cannot be negative, don't ask me to " "normalize a function over a negative domain!") dp = integral_element(mu, pofmu) return mu, pofmu/sum(dp)
[ "def", "normalize_pdf", "(", "mu", ",", "pofmu", ")", ":", "if", "min", "(", "pofmu", ")", "<", "0", ":", "raise", "ValueError", "(", "\"Probabilities cannot be negative, don't ask me to \"", "\"normalize a function with negative values!\"", ")", "if", "min", "(", "...
Takes a function pofmu defined at rate sample values mu and normalizes it to be a suitable pdf. Both mu and pofmu must be arrays or lists of the same length.
[ "Takes", "a", "function", "pofmu", "defined", "at", "rate", "sample", "values", "mu", "and", "normalizes", "it", "to", "be", "a", "suitable", "pdf", ".", "Both", "mu", "and", "pofmu", "must", "be", "arrays", "or", "lists", "of", "the", "same", "length", ...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/rate.py#L18-L32
227,951
gwastro/pycbc
pycbc/rate.py
compute_upper_limit
def compute_upper_limit(mu_in, post, alpha=0.9): """ Returns the upper limit mu_high of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized. """ if 0 < alpha < 1: dp = integral_element(mu_in, post) high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha) # if alpha is in (0,1] and post is non-negative, bisect_left # will always return an index in the range of mu since # post.cumsum()/post.sum() will always begin at 0 and end at 1 mu_high = mu_in[high_idx] elif alpha == 1: mu_high = numpy.max(mu_in[post > 0]) else: raise ValueError("Confidence level must be in (0,1].") return mu_high
python
def compute_upper_limit(mu_in, post, alpha=0.9): if 0 < alpha < 1: dp = integral_element(mu_in, post) high_idx = bisect.bisect_left(dp.cumsum() / dp.sum(), alpha) # if alpha is in (0,1] and post is non-negative, bisect_left # will always return an index in the range of mu since # post.cumsum()/post.sum() will always begin at 0 and end at 1 mu_high = mu_in[high_idx] elif alpha == 1: mu_high = numpy.max(mu_in[post > 0]) else: raise ValueError("Confidence level must be in (0,1].") return mu_high
[ "def", "compute_upper_limit", "(", "mu_in", ",", "post", ",", "alpha", "=", "0.9", ")", ":", "if", "0", "<", "alpha", "<", "1", ":", "dp", "=", "integral_element", "(", "mu_in", ",", "post", ")", "high_idx", "=", "bisect", ".", "bisect_left", "(", "d...
Returns the upper limit mu_high of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized.
[ "Returns", "the", "upper", "limit", "mu_high", "of", "confidence", "level", "alpha", "for", "a", "posterior", "distribution", "post", "on", "the", "given", "parameter", "mu", ".", "The", "posterior", "need", "not", "be", "normalized", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/rate.py#L35-L53
227,952
gwastro/pycbc
pycbc/rate.py
compute_lower_limit
def compute_lower_limit(mu_in, post, alpha=0.9): """ Returns the lower limit mu_low of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized. """ if 0 < alpha < 1: dp = integral_element(mu_in, post) low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), 1 - alpha) # if alpha is in [0,1) and post is non-negative, bisect_right # will always return an index in the range of mu since # post.cumsum()/post.sum() will always begin at 0 and end at 1 mu_low = mu_in[low_idx] elif alpha == 1: mu_low = numpy.min(mu_in[post > 0]) else: raise ValueError("Confidence level must be in (0,1].") return mu_low
python
def compute_lower_limit(mu_in, post, alpha=0.9): if 0 < alpha < 1: dp = integral_element(mu_in, post) low_idx = bisect.bisect_right(dp.cumsum() / dp.sum(), 1 - alpha) # if alpha is in [0,1) and post is non-negative, bisect_right # will always return an index in the range of mu since # post.cumsum()/post.sum() will always begin at 0 and end at 1 mu_low = mu_in[low_idx] elif alpha == 1: mu_low = numpy.min(mu_in[post > 0]) else: raise ValueError("Confidence level must be in (0,1].") return mu_low
[ "def", "compute_lower_limit", "(", "mu_in", ",", "post", ",", "alpha", "=", "0.9", ")", ":", "if", "0", "<", "alpha", "<", "1", ":", "dp", "=", "integral_element", "(", "mu_in", ",", "post", ")", "low_idx", "=", "bisect", ".", "bisect_right", "(", "d...
Returns the lower limit mu_low of confidence level alpha for a posterior distribution post on the given parameter mu. The posterior need not be normalized.
[ "Returns", "the", "lower", "limit", "mu_low", "of", "confidence", "level", "alpha", "for", "a", "posterior", "distribution", "post", "on", "the", "given", "parameter", "mu", ".", "The", "posterior", "need", "not", "be", "normalized", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/rate.py#L56-L74
227,953
gwastro/pycbc
pycbc/rate.py
hpd_coverage
def hpd_coverage(mu, pdf, thresh): ''' Integrates a pdf over mu taking only bins where the mean over the bin is above a given threshold This gives the coverage of the HPD interval for the given threshold. ''' dp = integral_element(mu, pdf) bin_mean = (pdf[1:] + pdf[:-1]) / 2. return dp[bin_mean > thresh].sum()
python
def hpd_coverage(mu, pdf, thresh): ''' Integrates a pdf over mu taking only bins where the mean over the bin is above a given threshold This gives the coverage of the HPD interval for the given threshold. ''' dp = integral_element(mu, pdf) bin_mean = (pdf[1:] + pdf[:-1]) / 2. return dp[bin_mean > thresh].sum()
[ "def", "hpd_coverage", "(", "mu", ",", "pdf", ",", "thresh", ")", ":", "dp", "=", "integral_element", "(", "mu", ",", "pdf", ")", "bin_mean", "=", "(", "pdf", "[", "1", ":", "]", "+", "pdf", "[", ":", "-", "1", "]", ")", "/", "2.", "return", ...
Integrates a pdf over mu taking only bins where the mean over the bin is above a given threshold This gives the coverage of the HPD interval for the given threshold.
[ "Integrates", "a", "pdf", "over", "mu", "taking", "only", "bins", "where", "the", "mean", "over", "the", "bin", "is", "above", "a", "given", "threshold", "This", "gives", "the", "coverage", "of", "the", "HPD", "interval", "for", "the", "given", "threshold"...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/rate.py#L103-L113
227,954
gwastro/pycbc
pycbc/rate.py
hpd_threshold
def hpd_threshold(mu_in, post, alpha, tol): ''' For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance. ''' norm_post = normalize_pdf(mu_in, post) # initialize bisection search p_minus = 0.0 p_plus = max(post) while abs(hpd_coverage(mu_in, norm_post, p_minus) - hpd_coverage(mu_in, norm_post, p_plus)) >= tol: p_test = (p_minus + p_plus) / 2. if hpd_coverage(mu_in, post, p_test) >= alpha: # test value was too low or just right p_minus = p_test else: # test value was too high p_plus = p_test # p_minus never goes above the required threshold and p_plus never goes below # thus on exiting p_minus is at or below the required threshold and the # difference in coverage is within tolerance return p_minus
python
def hpd_threshold(mu_in, post, alpha, tol): ''' For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance. ''' norm_post = normalize_pdf(mu_in, post) # initialize bisection search p_minus = 0.0 p_plus = max(post) while abs(hpd_coverage(mu_in, norm_post, p_minus) - hpd_coverage(mu_in, norm_post, p_plus)) >= tol: p_test = (p_minus + p_plus) / 2. if hpd_coverage(mu_in, post, p_test) >= alpha: # test value was too low or just right p_minus = p_test else: # test value was too high p_plus = p_test # p_minus never goes above the required threshold and p_plus never goes below # thus on exiting p_minus is at or below the required threshold and the # difference in coverage is within tolerance return p_minus
[ "def", "hpd_threshold", "(", "mu_in", ",", "post", ",", "alpha", ",", "tol", ")", ":", "norm_post", "=", "normalize_pdf", "(", "mu_in", ",", "post", ")", "# initialize bisection search", "p_minus", "=", "0.0", "p_plus", "=", "max", "(", "post", ")", "while...
For a PDF post over samples mu_in, find a density threshold such that the region having higher density has coverage of at least alpha, and less than alpha plus a given tolerance.
[ "For", "a", "PDF", "post", "over", "samples", "mu_in", "find", "a", "density", "threshold", "such", "that", "the", "region", "having", "higher", "density", "has", "coverage", "of", "at", "least", "alpha", "and", "less", "than", "alpha", "plus", "a", "given...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/rate.py#L116-L139
227,955
gwastro/pycbc
pycbc/rate.py
compute_volume_vs_mass
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None): """ Compute the average luminosity an experiment was sensitive to Assumes that luminosity is uniformly distributed in space. Input is the sets of found and missed injections. """ # mean and std estimate for luminosity volArray = bin_utils.BinnedArray(mass_bins) vol2Array = bin_utils.BinnedArray(mass_bins) # found/missed stats foundArray = bin_utils.BinnedArray(mass_bins) missedArray = bin_utils.BinnedArray(mass_bins) # compute the mean luminosity in each mass bin effvmass = [] errvmass = [] # 2D case first if bin_type == "Mass1_Mass2": for j, mc1 in enumerate(mass_bins.centres()[0]): for k, mc2 in enumerate(mass_bins.centres()[1]): newfound = filter_injections_by_mass( found, mass_bins, j, bin_type, k) newmissed = filter_injections_by_mass( missed, mass_bins, j, bin_type, k) foundArray[(mc1, mc2)] = len(newfound) missedArray[(mc1, mc2)] = len(newmissed) # compute the volume using this injection set meaneff, efferr, meanvol, volerr = mean_efficiency_volume( newfound, newmissed, dbins) effvmass.append(meaneff) errvmass.append(efferr) volArray[(mc1, mc2)] = meanvol vol2Array[(mc1, mc2)] = volerr return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass for j, mc in enumerate(mass_bins.centres()[0]): # filter out injections not in this mass bin newfound = filter_injections_by_mass(found, mass_bins, j, bin_type) newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type) foundArray[(mc, )] = len(newfound) missedArray[(mc, )] = len(newmissed) # compute the volume using this injection set meaneff, efferr, meanvol, volerr = mean_efficiency_volume( newfound, newmissed, dbins) effvmass.append(meaneff) errvmass.append(efferr) volArray[(mc, )] = meanvol vol2Array[(mc, )] = volerr return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
python
def compute_volume_vs_mass(found, missed, mass_bins, bin_type, dbins=None): # mean and std estimate for luminosity volArray = bin_utils.BinnedArray(mass_bins) vol2Array = bin_utils.BinnedArray(mass_bins) # found/missed stats foundArray = bin_utils.BinnedArray(mass_bins) missedArray = bin_utils.BinnedArray(mass_bins) # compute the mean luminosity in each mass bin effvmass = [] errvmass = [] # 2D case first if bin_type == "Mass1_Mass2": for j, mc1 in enumerate(mass_bins.centres()[0]): for k, mc2 in enumerate(mass_bins.centres()[1]): newfound = filter_injections_by_mass( found, mass_bins, j, bin_type, k) newmissed = filter_injections_by_mass( missed, mass_bins, j, bin_type, k) foundArray[(mc1, mc2)] = len(newfound) missedArray[(mc1, mc2)] = len(newmissed) # compute the volume using this injection set meaneff, efferr, meanvol, volerr = mean_efficiency_volume( newfound, newmissed, dbins) effvmass.append(meaneff) errvmass.append(efferr) volArray[(mc1, mc2)] = meanvol vol2Array[(mc1, mc2)] = volerr return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass for j, mc in enumerate(mass_bins.centres()[0]): # filter out injections not in this mass bin newfound = filter_injections_by_mass(found, mass_bins, j, bin_type) newmissed = filter_injections_by_mass(missed, mass_bins, j, bin_type) foundArray[(mc, )] = len(newfound) missedArray[(mc, )] = len(newmissed) # compute the volume using this injection set meaneff, efferr, meanvol, volerr = mean_efficiency_volume( newfound, newmissed, dbins) effvmass.append(meaneff) errvmass.append(efferr) volArray[(mc, )] = meanvol vol2Array[(mc, )] = volerr return volArray, vol2Array, foundArray, missedArray, effvmass, errvmass
[ "def", "compute_volume_vs_mass", "(", "found", ",", "missed", ",", "mass_bins", ",", "bin_type", ",", "dbins", "=", "None", ")", ":", "# mean and std estimate for luminosity", "volArray", "=", "bin_utils", ".", "BinnedArray", "(", "mass_bins", ")", "vol2Array", "=...
Compute the average luminosity an experiment was sensitive to Assumes that luminosity is uniformly distributed in space. Input is the sets of found and missed injections.
[ "Compute", "the", "average", "luminosity", "an", "experiment", "was", "sensitive", "to" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/rate.py#L281-L338
227,956
gwastro/pycbc
pycbc/filter/autocorrelation.py
calculate_acf
def calculate_acf(data, delta_t=1.0, unbiased=False): r"""Calculates the one-sided autocorrelation function. Calculates the autocorrelation function (ACF) and returns the one-sided ACF. The ACF is defined as the autocovariance divided by the variance. The ACF can be estimated using .. math:: \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is the variance of :math:`X_{t}`. Parameters ----------- data : TimeSeries or numpy.array A TimeSeries or numpy.array of data. delta_t : float The time step of the data series if it is not a TimeSeries instance. unbiased : bool If True the normalization of the autocovariance function is n-k instead of n. This is called the unbiased estimation of the autocovariance. Note that this does not mean the ACF is unbiased. Returns ------- acf : numpy.array If data is a TimeSeries then acf will be a TimeSeries of the one-sided ACF. Else acf is a numpy.array. """ # if given a TimeSeries instance then get numpy.array if isinstance(data, TimeSeries): y = data.numpy() delta_t = data.delta_t else: y = data # Zero mean y = y - y.mean() ny_orig = len(y) npad = 1 while npad < 2*ny_orig: npad = npad << 1 ypad = numpy.zeros(npad) ypad[:ny_orig] = y # FFT data minus the mean fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries() # correlate # do not need to give the congjugate since correlate function does it cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype), delta_f=fdata.delta_f, copy=False) correlate(fdata, fdata, cdata) # IFFT correlated data to get unnormalized autocovariance time series acf = cdata.to_timeseries() acf = acf[:ny_orig] # normalize the autocovariance # note that dividing by acf[0] is the same as ( y.var() * len(acf) ) if unbiased: acf /= ( y.var() * numpy.arange(len(acf), 0, -1) ) else: acf /= acf[0] # return input datatype if isinstance(data, TimeSeries): return TimeSeries(acf, delta_t=delta_t) else: return acf
python
def calculate_acf(data, delta_t=1.0, unbiased=False): r"""Calculates the one-sided autocorrelation function. Calculates the autocorrelation function (ACF) and returns the one-sided ACF. The ACF is defined as the autocovariance divided by the variance. The ACF can be estimated using .. math:: \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is the variance of :math:`X_{t}`. Parameters ----------- data : TimeSeries or numpy.array A TimeSeries or numpy.array of data. delta_t : float The time step of the data series if it is not a TimeSeries instance. unbiased : bool If True the normalization of the autocovariance function is n-k instead of n. This is called the unbiased estimation of the autocovariance. Note that this does not mean the ACF is unbiased. Returns ------- acf : numpy.array If data is a TimeSeries then acf will be a TimeSeries of the one-sided ACF. Else acf is a numpy.array. """ # if given a TimeSeries instance then get numpy.array if isinstance(data, TimeSeries): y = data.numpy() delta_t = data.delta_t else: y = data # Zero mean y = y - y.mean() ny_orig = len(y) npad = 1 while npad < 2*ny_orig: npad = npad << 1 ypad = numpy.zeros(npad) ypad[:ny_orig] = y # FFT data minus the mean fdata = TimeSeries(ypad, delta_t=delta_t).to_frequencyseries() # correlate # do not need to give the congjugate since correlate function does it cdata = FrequencySeries(zeros(len(fdata), dtype=fdata.dtype), delta_f=fdata.delta_f, copy=False) correlate(fdata, fdata, cdata) # IFFT correlated data to get unnormalized autocovariance time series acf = cdata.to_timeseries() acf = acf[:ny_orig] # normalize the autocovariance # note that dividing by acf[0] is the same as ( y.var() * len(acf) ) if unbiased: acf /= ( y.var() * numpy.arange(len(acf), 0, -1) ) else: acf /= acf[0] # return input datatype if isinstance(data, TimeSeries): return TimeSeries(acf, delta_t=delta_t) else: return acf
[ "def", "calculate_acf", "(", "data", ",", "delta_t", "=", "1.0", ",", "unbiased", "=", "False", ")", ":", "# if given a TimeSeries instance then get numpy.array", "if", "isinstance", "(", "data", ",", "TimeSeries", ")", ":", "y", "=", "data", ".", "numpy", "("...
r"""Calculates the one-sided autocorrelation function. Calculates the autocorrelation function (ACF) and returns the one-sided ACF. The ACF is defined as the autocovariance divided by the variance. The ACF can be estimated using .. math:: \hat{R}(k) = \frac{1}{n \sigma^{2}} \sum_{t=1}^{n-k} \left( X_{t} - \mu \right) \left( X_{t+k} - \mu \right) Where :math:`\hat{R}(k)` is the ACF, :math:`X_{t}` is the data series at time t, :math:`\mu` is the mean of :math:`X_{t}`, and :math:`\sigma^{2}` is the variance of :math:`X_{t}`. Parameters ----------- data : TimeSeries or numpy.array A TimeSeries or numpy.array of data. delta_t : float The time step of the data series if it is not a TimeSeries instance. unbiased : bool If True the normalization of the autocovariance function is n-k instead of n. This is called the unbiased estimation of the autocovariance. Note that this does not mean the ACF is unbiased. Returns ------- acf : numpy.array If data is a TimeSeries then acf will be a TimeSeries of the one-sided ACF. Else acf is a numpy.array.
[ "r", "Calculates", "the", "one", "-", "sided", "autocorrelation", "function", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/filter/autocorrelation.py#L34-L108
227,957
gwastro/pycbc
pycbc/fft/fftw.py
insert_fft_options
def insert_fft_options(optgroup): """ Inserts the options that affect the behavior of this backend Parameters ---------- optgroup: fft_option OptionParser argument group whose options are extended """ optgroup.add_argument("--fftw-measure-level", help="Determines the measure level used in planning " "FFTW FFTs; allowed values are: " + str([0,1,2,3]), type=int, default=_default_measurelvl) optgroup.add_argument("--fftw-threads-backend", help="Give 'openmp', 'pthreads' or 'unthreaded' to specify which threaded FFTW to use", default=None) optgroup.add_argument("--fftw-input-float-wisdom-file", help="Filename from which to read single-precision wisdom", default=None) optgroup.add_argument("--fftw-input-double-wisdom-file", help="Filename from which to read double-precision wisdom", default=None) optgroup.add_argument("--fftw-output-float-wisdom-file", help="Filename to which to write single-precision wisdom", default=None) optgroup.add_argument("--fftw-output-double-wisdom-file", help="Filename to which to write double-precision wisdom", default=None) optgroup.add_argument("--fftw-import-system-wisdom", help = "If given, call fftw[f]_import_system_wisdom()", action = "store_true")
python
def insert_fft_options(optgroup): optgroup.add_argument("--fftw-measure-level", help="Determines the measure level used in planning " "FFTW FFTs; allowed values are: " + str([0,1,2,3]), type=int, default=_default_measurelvl) optgroup.add_argument("--fftw-threads-backend", help="Give 'openmp', 'pthreads' or 'unthreaded' to specify which threaded FFTW to use", default=None) optgroup.add_argument("--fftw-input-float-wisdom-file", help="Filename from which to read single-precision wisdom", default=None) optgroup.add_argument("--fftw-input-double-wisdom-file", help="Filename from which to read double-precision wisdom", default=None) optgroup.add_argument("--fftw-output-float-wisdom-file", help="Filename to which to write single-precision wisdom", default=None) optgroup.add_argument("--fftw-output-double-wisdom-file", help="Filename to which to write double-precision wisdom", default=None) optgroup.add_argument("--fftw-import-system-wisdom", help = "If given, call fftw[f]_import_system_wisdom()", action = "store_true")
[ "def", "insert_fft_options", "(", "optgroup", ")", ":", "optgroup", ".", "add_argument", "(", "\"--fftw-measure-level\"", ",", "help", "=", "\"Determines the measure level used in planning \"", "\"FFTW FFTs; allowed values are: \"", "+", "str", "(", "[", "0", ",", "1", ...
Inserts the options that affect the behavior of this backend Parameters ---------- optgroup: fft_option OptionParser argument group whose options are extended
[ "Inserts", "the", "options", "that", "affect", "the", "behavior", "of", "this", "backend" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/fft/fftw.py#L462-L492
227,958
gwastro/pycbc
pycbc/inference/entropy.py
kl
def kl(samples1, samples2, pdf1=False, pdf2=False, bins=30, hist_min=None, hist_max=None): """ Computes the Kullback-Leibler divergence for a single parameter from two distributions. Parameters ---------- samples1 : numpy.array Samples or probability density function (must also set `pdf1=True`). samples2 : numpy.array Samples or probability density function (must also set `pdf2=True`). pdf1 : bool Set to `True` if `samples1` is a probability density funtion already. pdf2 : bool Set to `True` if `samples2` is a probability density funtion already. bins : int Number of bins to use when calculating probability density function from a set of samples of the distribution. hist_min : numpy.float64 Minimum of the distributions' values to use. hist_max : numpy.float64 Maximum of the distributions' values to use. Returns ------- numpy.float64 The Kullback-Leibler divergence value. """ hist_range = (hist_min, hist_max) if not pdf1: samples1, _ = numpy.histogram(samples1, bins=bins, range=hist_range, normed=True) if not pdf2: samples2, _ = numpy.histogram(samples2, bins=bins, range=hist_range, normed=True) return stats.entropy(samples1, qk=samples2)
python
def kl(samples1, samples2, pdf1=False, pdf2=False, bins=30, hist_min=None, hist_max=None): hist_range = (hist_min, hist_max) if not pdf1: samples1, _ = numpy.histogram(samples1, bins=bins, range=hist_range, normed=True) if not pdf2: samples2, _ = numpy.histogram(samples2, bins=bins, range=hist_range, normed=True) return stats.entropy(samples1, qk=samples2)
[ "def", "kl", "(", "samples1", ",", "samples2", ",", "pdf1", "=", "False", ",", "pdf2", "=", "False", ",", "bins", "=", "30", ",", "hist_min", "=", "None", ",", "hist_max", "=", "None", ")", ":", "hist_range", "=", "(", "hist_min", ",", "hist_max", ...
Computes the Kullback-Leibler divergence for a single parameter from two distributions. Parameters ---------- samples1 : numpy.array Samples or probability density function (must also set `pdf1=True`). samples2 : numpy.array Samples or probability density function (must also set `pdf2=True`). pdf1 : bool Set to `True` if `samples1` is a probability density funtion already. pdf2 : bool Set to `True` if `samples2` is a probability density funtion already. bins : int Number of bins to use when calculating probability density function from a set of samples of the distribution. hist_min : numpy.float64 Minimum of the distributions' values to use. hist_max : numpy.float64 Maximum of the distributions' values to use. Returns ------- numpy.float64 The Kullback-Leibler divergence value.
[ "Computes", "the", "Kullback", "-", "Leibler", "divergence", "for", "a", "single", "parameter", "from", "two", "distributions", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/entropy.py#L10-L45
227,959
gwastro/pycbc
docs/_include/_dict_to_rst.py
rst_dict_table
def rst_dict_table(dict_, key_format=str, val_format=str, header=None, sort=True): """Returns an RST-formatted table of keys and values from a `dict` Parameters ---------- dict_ : dict data to display in table key_format : callable callable function with which to format keys val_format : callable callable function with which to format values header : None, tuple of str a 2-tuple of header for the two columns, or `None` to exclude a header line (default) sort : bool, optional Sort the dictionary keys alphabetically when writing the table. Examples -------- >>> a = {'key1': 'value1', 'key2': 'value2'} >>> print(rst_dict_table(a)) ==== ====== key1 value1 key2 value2 ==== ====== >>> print(rst_dict_table(a, key_format='``{}``'.format, ... val_format=':class:`{}`'.format, ... header=('Key', 'Value')) ======== =============== Key Value ======== =============== ``key1`` :class:`value1` ``key2`` :class:`value2` ======== =============== """ keys, values = zip(*dict_.items()) # apply formatting keys = map(key_format, keys) values = map(val_format, values) # work out longest elements in each column nckey = max(map(len, keys)) ncval = max(map(len, values)) if header: khead, vhead = header nckey = max(nckey, len(khead)) ncval = max(ncval, len(vhead)) # build table header line divider = "{} {}".format('='*nckey, '='*ncval) def row(key, val): fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) return fmt.format(key, val) # build table of lines lines = [divider] if header: lines.extend((row(*header), divider)) params = zip(keys, values) if sort: params = sorted(params) for key, val in params: fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) lines.append(fmt.format(key, val)) lines.append(divider) return '\n'.join(lines)
python
def rst_dict_table(dict_, key_format=str, val_format=str, header=None, sort=True): keys, values = zip(*dict_.items()) # apply formatting keys = map(key_format, keys) values = map(val_format, values) # work out longest elements in each column nckey = max(map(len, keys)) ncval = max(map(len, values)) if header: khead, vhead = header nckey = max(nckey, len(khead)) ncval = max(ncval, len(vhead)) # build table header line divider = "{} {}".format('='*nckey, '='*ncval) def row(key, val): fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) return fmt.format(key, val) # build table of lines lines = [divider] if header: lines.extend((row(*header), divider)) params = zip(keys, values) if sort: params = sorted(params) for key, val in params: fmt = '{{0:{0}s}} {{1}}'.format(nckey, ncval) lines.append(fmt.format(key, val)) lines.append(divider) return '\n'.join(lines)
[ "def", "rst_dict_table", "(", "dict_", ",", "key_format", "=", "str", ",", "val_format", "=", "str", ",", "header", "=", "None", ",", "sort", "=", "True", ")", ":", "keys", ",", "values", "=", "zip", "(", "*", "dict_", ".", "items", "(", ")", ")", ...
Returns an RST-formatted table of keys and values from a `dict` Parameters ---------- dict_ : dict data to display in table key_format : callable callable function with which to format keys val_format : callable callable function with which to format values header : None, tuple of str a 2-tuple of header for the two columns, or `None` to exclude a header line (default) sort : bool, optional Sort the dictionary keys alphabetically when writing the table. Examples -------- >>> a = {'key1': 'value1', 'key2': 'value2'} >>> print(rst_dict_table(a)) ==== ====== key1 value1 key2 value2 ==== ====== >>> print(rst_dict_table(a, key_format='``{}``'.format, ... val_format=':class:`{}`'.format, ... header=('Key', 'Value')) ======== =============== Key Value ======== =============== ``key1`` :class:`value1` ``key2`` :class:`value2` ======== ===============
[ "Returns", "an", "RST", "-", "formatted", "table", "of", "keys", "and", "values", "from", "a", "dict" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/docs/_include/_dict_to_rst.py#L23-L92
227,960
gwastro/pycbc
pycbc/strain/__init__.py
read_model_from_config
def read_model_from_config(cp, ifo, section="calibration"): """Returns an instance of the calibration model specified in the given configuration file. Parameters ---------- cp : WorflowConfigParser An open config file to read. ifo : string The detector (H1, L1) whose model will be loaded. section : {"calibration", string} Section name from which to retrieve the model. Returns ------- instance An instance of the calibration model class. """ model = cp.get_opt_tag(section, "{}_model".format(ifo.lower()), None) recalibrator = models[model].from_config(cp, ifo.lower(), section) return recalibrator
python
def read_model_from_config(cp, ifo, section="calibration"): model = cp.get_opt_tag(section, "{}_model".format(ifo.lower()), None) recalibrator = models[model].from_config(cp, ifo.lower(), section) return recalibrator
[ "def", "read_model_from_config", "(", "cp", ",", "ifo", ",", "section", "=", "\"calibration\"", ")", ":", "model", "=", "cp", ".", "get_opt_tag", "(", "section", ",", "\"{}_model\"", ".", "format", "(", "ifo", ".", "lower", "(", ")", ")", ",", "None", ...
Returns an instance of the calibration model specified in the given configuration file. Parameters ---------- cp : WorflowConfigParser An open config file to read. ifo : string The detector (H1, L1) whose model will be loaded. section : {"calibration", string} Section name from which to retrieve the model. Returns ------- instance An instance of the calibration model class.
[ "Returns", "an", "instance", "of", "the", "calibration", "model", "specified", "in", "the", "given", "configuration", "file", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/__init__.py#L18-L39
227,961
gwastro/pycbc
pycbc/strain/gate.py
_gates_from_cli
def _gates_from_cli(opts, gate_opt): """Parses the given `gate_opt` into something understandable by `strain.gate_data`. """ gates = {} if getattr(opts, gate_opt) is None: return gates for gate in getattr(opts, gate_opt): try: ifo, central_time, half_dur, taper_dur = gate.split(':') central_time = float(central_time) half_dur = float(half_dur) taper_dur = float(taper_dur) except ValueError: raise ValueError("--gate {} not formatted correctly; ".format( gate) + "see help") try: gates[ifo].append((central_time, half_dur, taper_dur)) except KeyError: gates[ifo] = [(central_time, half_dur, taper_dur)] return gates
python
def _gates_from_cli(opts, gate_opt): gates = {} if getattr(opts, gate_opt) is None: return gates for gate in getattr(opts, gate_opt): try: ifo, central_time, half_dur, taper_dur = gate.split(':') central_time = float(central_time) half_dur = float(half_dur) taper_dur = float(taper_dur) except ValueError: raise ValueError("--gate {} not formatted correctly; ".format( gate) + "see help") try: gates[ifo].append((central_time, half_dur, taper_dur)) except KeyError: gates[ifo] = [(central_time, half_dur, taper_dur)] return gates
[ "def", "_gates_from_cli", "(", "opts", ",", "gate_opt", ")", ":", "gates", "=", "{", "}", "if", "getattr", "(", "opts", ",", "gate_opt", ")", "is", "None", ":", "return", "gates", "for", "gate", "in", "getattr", "(", "opts", ",", "gate_opt", ")", ":"...
Parses the given `gate_opt` into something understandable by `strain.gate_data`.
[ "Parses", "the", "given", "gate_opt", "into", "something", "understandable", "by", "strain", ".", "gate_data", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/gate.py#L21-L41
227,962
gwastro/pycbc
pycbc/strain/gate.py
apply_gates_to_td
def apply_gates_to_td(strain_dict, gates): """Applies the given dictionary of gates to the given dictionary of strain. Parameters ---------- strain_dict : dict Dictionary of time-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of time-domain strain with the gates applied. """ # copy data to new dictionary outdict = dict(strain_dict.items()) for ifo in gates: outdict[ifo] = strain.gate_data(outdict[ifo], gates[ifo]) return outdict
python
def apply_gates_to_td(strain_dict, gates): # copy data to new dictionary outdict = dict(strain_dict.items()) for ifo in gates: outdict[ifo] = strain.gate_data(outdict[ifo], gates[ifo]) return outdict
[ "def", "apply_gates_to_td", "(", "strain_dict", ",", "gates", ")", ":", "# copy data to new dictionary", "outdict", "=", "dict", "(", "strain_dict", ".", "items", "(", ")", ")", "for", "ifo", "in", "gates", ":", "outdict", "[", "ifo", "]", "=", "strain", "...
Applies the given dictionary of gates to the given dictionary of strain. Parameters ---------- strain_dict : dict Dictionary of time-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of time-domain strain with the gates applied.
[ "Applies", "the", "given", "dictionary", "of", "gates", "to", "the", "given", "dictionary", "of", "strain", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/gate.py#L58-L80
227,963
gwastro/pycbc
pycbc/strain/gate.py
apply_gates_to_fd
def apply_gates_to_fd(stilde_dict, gates): """Applies the given dictionary of gates to the given dictionary of strain in the frequency domain. Gates are applied by IFFT-ing the strain data to the time domain, applying the gate, then FFT-ing back to the frequency domain. Parameters ---------- stilde_dict : dict Dictionary of frequency-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of frequency-domain strain with the gates applied. """ # copy data to new dictionary outdict = dict(stilde_dict.items()) # create a time-domin strain dictionary to apply the gates to strain_dict = dict([[ifo, outdict[ifo].to_timeseries()] for ifo in gates]) # apply gates and fft back to the frequency domain for ifo,d in apply_gates_to_td(strain_dict, gates).items(): outdict[ifo] = d.to_frequencyseries() return outdict
python
def apply_gates_to_fd(stilde_dict, gates): # copy data to new dictionary outdict = dict(stilde_dict.items()) # create a time-domin strain dictionary to apply the gates to strain_dict = dict([[ifo, outdict[ifo].to_timeseries()] for ifo in gates]) # apply gates and fft back to the frequency domain for ifo,d in apply_gates_to_td(strain_dict, gates).items(): outdict[ifo] = d.to_frequencyseries() return outdict
[ "def", "apply_gates_to_fd", "(", "stilde_dict", ",", "gates", ")", ":", "# copy data to new dictionary", "outdict", "=", "dict", "(", "stilde_dict", ".", "items", "(", ")", ")", "# create a time-domin strain dictionary to apply the gates to", "strain_dict", "=", "dict", ...
Applies the given dictionary of gates to the given dictionary of strain in the frequency domain. Gates are applied by IFFT-ing the strain data to the time domain, applying the gate, then FFT-ing back to the frequency domain. Parameters ---------- stilde_dict : dict Dictionary of frequency-domain strain, keyed by the ifos. gates : dict Dictionary of gates. Keys should be the ifo to apply the data to, values are a tuple giving the central time of the gate, the half duration, and the taper duration. Returns ------- dict Dictionary of frequency-domain strain with the gates applied.
[ "Applies", "the", "given", "dictionary", "of", "gates", "to", "the", "given", "dictionary", "of", "strain", "in", "the", "frequency", "domain", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/gate.py#L83-L111
227,964
gwastro/pycbc
pycbc/strain/gate.py
add_gate_option_group
def add_gate_option_group(parser): """Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance. """ gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
python
def add_gate_option_group(parser): gate_group = parser.add_argument_group("Options for gating data.") gate_group.add_argument("--gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data before " "filtering.") gate_group.add_argument("--gate-overwhitened", action="store_true", help="Overwhiten data first, then apply the " "gates specified in --gate. Overwhitening " "allows for sharper tapers to be used, " "since lines are not blurred.") gate_group.add_argument("--psd-gate", nargs="+", type=str, metavar="IFO:CENTRALTIME:HALFDUR:TAPERDUR", help="Apply one or more gates to the data used " "for computing the PSD. Gates are applied " "prior to FFT-ing the data for PSD " "estimation.") return gate_group
[ "def", "add_gate_option_group", "(", "parser", ")", ":", "gate_group", "=", "parser", ".", "add_argument_group", "(", "\"Options for gating data.\"", ")", "gate_group", ".", "add_argument", "(", "\"--gate\"", ",", "nargs", "=", "\"+\"", ",", "type", "=", "str", ...
Adds the options needed to apply gates to data. Parameters ---------- parser : object ArgumentParser instance.
[ "Adds", "the", "options", "needed", "to", "apply", "gates", "to", "data", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/gate.py#L114-L139
227,965
gwastro/pycbc
pycbc/vetoes/chisq.py
power_chisq_at_points_from_precomputed
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices): """Calculate the chisq timeseries from precomputed values for only select points. This function calculates the chisq at each point by explicitly time shifting and summing each bin. No FFT is involved. Parameters ---------- corr: FrequencySeries The product of the template and data in the frequency domain. snr: numpy.ndarray The unnormalized array of snr values at only the selected points in `indices`. snr_norm: float The normalization of the snr (EXPLAINME : refer to Findchirp paper?) bins: List of integers The edges of the equal power bins indices: Array The indices where we will calculate the chisq. These must be relative to the given `corr` series. Returns ------- chisq: Array An array containing only the chisq at the selected points. """ num_bins = len(bins) - 1 chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0)
python
def power_chisq_at_points_from_precomputed(corr, snr, snr_norm, bins, indices): num_bins = len(bins) - 1 chisq = shift_sum(corr, indices, bins) # pylint:disable=assignment-from-no-return return (chisq * num_bins - (snr.conj() * snr).real) * (snr_norm ** 2.0)
[ "def", "power_chisq_at_points_from_precomputed", "(", "corr", ",", "snr", ",", "snr_norm", ",", "bins", ",", "indices", ")", ":", "num_bins", "=", "len", "(", "bins", ")", "-", "1", "chisq", "=", "shift_sum", "(", "corr", ",", "indices", ",", "bins", ")"...
Calculate the chisq timeseries from precomputed values for only select points. This function calculates the chisq at each point by explicitly time shifting and summing each bin. No FFT is involved. Parameters ---------- corr: FrequencySeries The product of the template and data in the frequency domain. snr: numpy.ndarray The unnormalized array of snr values at only the selected points in `indices`. snr_norm: float The normalization of the snr (EXPLAINME : refer to Findchirp paper?) bins: List of integers The edges of the equal power bins indices: Array The indices where we will calculate the chisq. These must be relative to the given `corr` series. Returns ------- chisq: Array An array containing only the chisq at the selected points.
[ "Calculate", "the", "chisq", "timeseries", "from", "precomputed", "values", "for", "only", "select", "points", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/vetoes/chisq.py#L109-L136
227,966
gwastro/pycbc
pycbc/vetoes/chisq.py
power_chisq_from_precomputed
def power_chisq_from_precomputed(corr, snr, snr_norm, bins, indices=None, return_bins=False): """Calculate the chisq timeseries from precomputed values. This function calculates the chisq at all times by performing an inverse FFT of each bin. Parameters ---------- corr: FrequencySeries The produce of the template and data in the frequency domain. snr: TimeSeries The unnormalized snr time series. snr_norm: The snr normalization factor (true snr = snr * snr_norm) EXPLAINME - define 'true snr'? bins: List of integers The edges of the chisq bins. indices: {Array, None}, optional Index values into snr that indicate where to calculate chisq values. If none, calculate chisq for all possible indices. return_bins: {boolean, False}, optional Return a list of the SNRs for each chisq bin. Returns ------- chisq: TimeSeries """ # Get workspace memory global _q_l, _qtilde_l, _chisq_l bin_snrs = [] if _q_l is None or len(_q_l) != len(snr): q = zeros(len(snr), dtype=complex_same_precision_as(snr)) qtilde = zeros(len(snr), dtype=complex_same_precision_as(snr)) _q_l = q _qtilde_l = qtilde else: q = _q_l qtilde = _qtilde_l if indices is not None: snr = snr.take(indices) if _chisq_l is None or len(_chisq_l) < len(snr): chisq = zeros(len(snr), dtype=real_same_precision_as(snr)) _chisq_l = chisq else: chisq = _chisq_l[0:len(snr)] chisq.clear() num_bins = len(bins) - 1 for j in range(num_bins): k_min = int(bins[j]) k_max = int(bins[j+1]) qtilde[k_min:k_max] = corr[k_min:k_max] pycbc.fft.ifft(qtilde, q) qtilde[k_min:k_max].clear() if return_bins: bin_snrs.append(TimeSeries(q * snr_norm * num_bins ** 0.5, delta_t=snr.delta_t, epoch=snr.start_time)) if indices is not None: chisq_accum_bin(chisq, q.take(indices)) else: chisq_accum_bin(chisq, q) chisq = (chisq * num_bins - snr.squared_norm()) * (snr_norm ** 2.0) if indices is None: chisq = TimeSeries(chisq, delta_t=snr.delta_t, epoch=snr.start_time, copy=False) if return_bins: return chisq, bin_snrs else: return chisq
python
def power_chisq_from_precomputed(corr, snr, snr_norm, bins, indices=None, return_bins=False): # Get workspace memory global _q_l, _qtilde_l, _chisq_l bin_snrs = [] if _q_l is None or len(_q_l) != len(snr): q = zeros(len(snr), dtype=complex_same_precision_as(snr)) qtilde = zeros(len(snr), dtype=complex_same_precision_as(snr)) _q_l = q _qtilde_l = qtilde else: q = _q_l qtilde = _qtilde_l if indices is not None: snr = snr.take(indices) if _chisq_l is None or len(_chisq_l) < len(snr): chisq = zeros(len(snr), dtype=real_same_precision_as(snr)) _chisq_l = chisq else: chisq = _chisq_l[0:len(snr)] chisq.clear() num_bins = len(bins) - 1 for j in range(num_bins): k_min = int(bins[j]) k_max = int(bins[j+1]) qtilde[k_min:k_max] = corr[k_min:k_max] pycbc.fft.ifft(qtilde, q) qtilde[k_min:k_max].clear() if return_bins: bin_snrs.append(TimeSeries(q * snr_norm * num_bins ** 0.5, delta_t=snr.delta_t, epoch=snr.start_time)) if indices is not None: chisq_accum_bin(chisq, q.take(indices)) else: chisq_accum_bin(chisq, q) chisq = (chisq * num_bins - snr.squared_norm()) * (snr_norm ** 2.0) if indices is None: chisq = TimeSeries(chisq, delta_t=snr.delta_t, epoch=snr.start_time, copy=False) if return_bins: return chisq, bin_snrs else: return chisq
[ "def", "power_chisq_from_precomputed", "(", "corr", ",", "snr", ",", "snr_norm", ",", "bins", ",", "indices", "=", "None", ",", "return_bins", "=", "False", ")", ":", "# Get workspace memory", "global", "_q_l", ",", "_qtilde_l", ",", "_chisq_l", "bin_snrs", "=...
Calculate the chisq timeseries from precomputed values. This function calculates the chisq at all times by performing an inverse FFT of each bin. Parameters ---------- corr: FrequencySeries The produce of the template and data in the frequency domain. snr: TimeSeries The unnormalized snr time series. snr_norm: The snr normalization factor (true snr = snr * snr_norm) EXPLAINME - define 'true snr'? bins: List of integers The edges of the chisq bins. indices: {Array, None}, optional Index values into snr that indicate where to calculate chisq values. If none, calculate chisq for all possible indices. return_bins: {boolean, False}, optional Return a list of the SNRs for each chisq bin. Returns ------- chisq: TimeSeries
[ "Calculate", "the", "chisq", "timeseries", "from", "precomputed", "values", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/vetoes/chisq.py#L141-L220
227,967
gwastro/pycbc
pycbc/vetoes/chisq.py
power_chisq
def power_chisq(template, data, num_bins, psd, low_frequency_cutoff=None, high_frequency_cutoff=None, return_bins=False): """Calculate the chisq timeseries Parameters ---------- template: FrequencySeries or TimeSeries A time or frequency series that contains the filter template. data: FrequencySeries or TimeSeries A time or frequency series that contains the data to filter. The length must be commensurate with the template. (EXPLAINME - does this mean 'the same as' or something else?) num_bins: int The number of bins in the chisq. Note that the dof goes as 2*num_bins-2. psd: FrequencySeries The psd of the data. low_frequency_cutoff: {None, float}, optional The low frequency cutoff for the filter high_frequency_cutoff: {None, float}, optional The high frequency cutoff for the filter return_bins: {boolean, False}, optional Return a list of the individual chisq bins Returns ------- chisq: TimeSeries TimeSeries containing the chisq values for all times. """ htilde = make_frequency_series(template) stilde = make_frequency_series(data) bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff, high_frequency_cutoff) corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype) total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd, low_frequency_cutoff, high_frequency_cutoff, corr_out=corra) return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
python
def power_chisq(template, data, num_bins, psd, low_frequency_cutoff=None, high_frequency_cutoff=None, return_bins=False): htilde = make_frequency_series(template) stilde = make_frequency_series(data) bins = power_chisq_bins(htilde, num_bins, psd, low_frequency_cutoff, high_frequency_cutoff) corra = zeros((len(htilde)-1)*2, dtype=htilde.dtype) total_snr, corr, tnorm = matched_filter_core(htilde, stilde, psd, low_frequency_cutoff, high_frequency_cutoff, corr_out=corra) return power_chisq_from_precomputed(corr, total_snr, tnorm, bins, return_bins=return_bins)
[ "def", "power_chisq", "(", "template", ",", "data", ",", "num_bins", ",", "psd", ",", "low_frequency_cutoff", "=", "None", ",", "high_frequency_cutoff", "=", "None", ",", "return_bins", "=", "False", ")", ":", "htilde", "=", "make_frequency_series", "(", "temp...
Calculate the chisq timeseries Parameters ---------- template: FrequencySeries or TimeSeries A time or frequency series that contains the filter template. data: FrequencySeries or TimeSeries A time or frequency series that contains the data to filter. The length must be commensurate with the template. (EXPLAINME - does this mean 'the same as' or something else?) num_bins: int The number of bins in the chisq. Note that the dof goes as 2*num_bins-2. psd: FrequencySeries The psd of the data. low_frequency_cutoff: {None, float}, optional The low frequency cutoff for the filter high_frequency_cutoff: {None, float}, optional The high frequency cutoff for the filter return_bins: {boolean, False}, optional Return a list of the individual chisq bins Returns ------- chisq: TimeSeries TimeSeries containing the chisq values for all times.
[ "Calculate", "the", "chisq", "timeseries" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/vetoes/chisq.py#L259-L299
227,968
gwastro/pycbc
pycbc/vetoes/chisq.py
SingleDetSkyMaxPowerChisq.calculate_chisq_bins
def calculate_chisq_bins(self, template, psd): """ Obtain the chisq bins for this template and PSD. """ num_bins = int(self.parse_option(template, self.num_bins)) if hasattr(psd, 'sigmasq_vec') and \ template.approximant in psd.sigmasq_vec: kmin = int(template.f_lower / psd.delta_f) kmax = template.end_idx bins = power_chisq_bins_from_sigmasq_series( psd.sigmasq_vec[template.approximant], num_bins, kmin, kmax) else: bins = power_chisq_bins(template, num_bins, psd, template.f_lower) return bins
python
def calculate_chisq_bins(self, template, psd): num_bins = int(self.parse_option(template, self.num_bins)) if hasattr(psd, 'sigmasq_vec') and \ template.approximant in psd.sigmasq_vec: kmin = int(template.f_lower / psd.delta_f) kmax = template.end_idx bins = power_chisq_bins_from_sigmasq_series( psd.sigmasq_vec[template.approximant], num_bins, kmin, kmax) else: bins = power_chisq_bins(template, num_bins, psd, template.f_lower) return bins
[ "def", "calculate_chisq_bins", "(", "self", ",", "template", ",", "psd", ")", ":", "num_bins", "=", "int", "(", "self", ".", "parse_option", "(", "template", ",", "self", ".", "num_bins", ")", ")", "if", "hasattr", "(", "psd", ",", "'sigmasq_vec'", ")", ...
Obtain the chisq bins for this template and PSD.
[ "Obtain", "the", "chisq", "bins", "for", "this", "template", "and", "PSD", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/vetoes/chisq.py#L402-L414
227,969
gwastro/pycbc
docs/conf_std.py
build_includes
def build_includes(): """Creates rst files in the _include directory using the python scripts there. This will ignore any files in the _include directory that start with ``_``. """ print("Running scripts in _include:") cwd = os.getcwd() os.chdir('_include') pyfiles = glob.glob('*.py') for fn in pyfiles: if not fn.startswith('_'): print(' {}'.format(fn)) subprocess.check_output(['python', fn]) os.chdir(cwd)
python
def build_includes(): print("Running scripts in _include:") cwd = os.getcwd() os.chdir('_include') pyfiles = glob.glob('*.py') for fn in pyfiles: if not fn.startswith('_'): print(' {}'.format(fn)) subprocess.check_output(['python', fn]) os.chdir(cwd)
[ "def", "build_includes", "(", ")", ":", "print", "(", "\"Running scripts in _include:\"", ")", "cwd", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "'_include'", ")", "pyfiles", "=", "glob", ".", "glob", "(", "'*.py'", ")", "for", "fn", ...
Creates rst files in the _include directory using the python scripts there. This will ignore any files in the _include directory that start with ``_``.
[ "Creates", "rst", "files", "in", "the", "_include", "directory", "using", "the", "python", "scripts", "there", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/docs/conf_std.py#L267-L281
227,970
gwastro/pycbc
pycbc/boundaries.py
apply_cyclic
def apply_cyclic(value, bounds): """Given a value, applies cyclic boundary conditions between the minimum and maximum bounds. Parameters ---------- value : float The value to apply the cyclic conditions to. bounds : Bounds instance Boundaries to use for applying cyclic conditions. Returns ------- float The value after the cyclic bounds are applied. """ return (value - bounds._min) %(bounds._max - bounds._min) + bounds._min
python
def apply_cyclic(value, bounds): return (value - bounds._min) %(bounds._max - bounds._min) + bounds._min
[ "def", "apply_cyclic", "(", "value", ",", "bounds", ")", ":", "return", "(", "value", "-", "bounds", ".", "_min", ")", "%", "(", "bounds", ".", "_max", "-", "bounds", ".", "_min", ")", "+", "bounds", ".", "_min" ]
Given a value, applies cyclic boundary conditions between the minimum and maximum bounds. Parameters ---------- value : float The value to apply the cyclic conditions to. bounds : Bounds instance Boundaries to use for applying cyclic conditions. Returns ------- float The value after the cyclic bounds are applied.
[ "Given", "a", "value", "applies", "cyclic", "boundary", "conditions", "between", "the", "minimum", "and", "maximum", "bounds", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/boundaries.py#L110-L126
227,971
gwastro/pycbc
pycbc/boundaries.py
reflect_well
def reflect_well(value, bounds): """Given some boundaries, reflects the value until it falls within both boundaries. This is done iteratively, reflecting left off of the `boundaries.max`, then right off of the `boundaries.min`, etc. Parameters ---------- value : float The value to apply the reflected boundaries to. bounds : Bounds instance Boundaries to reflect between. Both `bounds.min` and `bounds.max` must be instances of `ReflectedBound`, otherwise an AttributeError is raised. Returns ------- float The value after being reflected between the two bounds. """ while value not in bounds: value = bounds._max.reflect_left(value) value = bounds._min.reflect_right(value) return value
python
def reflect_well(value, bounds): while value not in bounds: value = bounds._max.reflect_left(value) value = bounds._min.reflect_right(value) return value
[ "def", "reflect_well", "(", "value", ",", "bounds", ")", ":", "while", "value", "not", "in", "bounds", ":", "value", "=", "bounds", ".", "_max", ".", "reflect_left", "(", "value", ")", "value", "=", "bounds", ".", "_min", ".", "reflect_right", "(", "va...
Given some boundaries, reflects the value until it falls within both boundaries. This is done iteratively, reflecting left off of the `boundaries.max`, then right off of the `boundaries.min`, etc. Parameters ---------- value : float The value to apply the reflected boundaries to. bounds : Bounds instance Boundaries to reflect between. Both `bounds.min` and `bounds.max` must be instances of `ReflectedBound`, otherwise an AttributeError is raised. Returns ------- float The value after being reflected between the two bounds.
[ "Given", "some", "boundaries", "reflects", "the", "value", "until", "it", "falls", "within", "both", "boundaries", ".", "This", "is", "done", "iteratively", "reflecting", "left", "off", "of", "the", "boundaries", ".", "max", "then", "right", "off", "of", "th...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/boundaries.py#L128-L150
227,972
gwastro/pycbc
pycbc/boundaries.py
ReflectedBound.reflect_left
def reflect_left(self, value): """Only reflects the value if is > self.""" if value > self: value = self.reflect(value) return value
python
def reflect_left(self, value): if value > self: value = self.reflect(value) return value
[ "def", "reflect_left", "(", "self", ",", "value", ")", ":", "if", "value", ">", "self", ":", "value", "=", "self", ".", "reflect", "(", "value", ")", "return", "value" ]
Only reflects the value if is > self.
[ "Only", "reflects", "the", "value", "if", "is", ">", "self", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/boundaries.py#L86-L90
227,973
gwastro/pycbc
pycbc/boundaries.py
ReflectedBound.reflect_right
def reflect_right(self, value): """Only reflects the value if is < self.""" if value < self: value = self.reflect(value) return value
python
def reflect_right(self, value): if value < self: value = self.reflect(value) return value
[ "def", "reflect_right", "(", "self", ",", "value", ")", ":", "if", "value", "<", "self", ":", "value", "=", "self", ".", "reflect", "(", "value", ")", "return", "value" ]
Only reflects the value if is < self.
[ "Only", "reflects", "the", "value", "if", "is", "<", "self", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/boundaries.py#L92-L96
227,974
gwastro/pycbc
pycbc/boundaries.py
Bounds.apply_conditions
def apply_conditions(self, value): """Applies any boundary conditions to the given value. The value is manipulated according based on the following conditions: * If `self.cyclic` is True then `value` is wrapped around to the minimum (maximum) bound if `value` is `>= self.max` (`< self.min`) bound. For example, if the minimum and maximum bounds are `0, 2*pi` and `value = 5*pi`, then the returned value will be `pi`. * If `self.min` is a reflected boundary then `value` will be reflected to the right if it is `< self.min`. For example, if `self.min = 10` and `value = 3`, then the returned value will be 17. * If `self.max` is a reflected boundary then `value` will be reflected to the left if it is `> self.max`. For example, if `self.max = 20` and `value = 27`, then the returned value will be 13. * If `self.min` and `self.max` are both reflected boundaries, then `value` will be reflected between the two boundaries until it falls within the bounds. The first reflection occurs off of the maximum boundary. For example, if `self.min = 10`, `self.max = 20`, and `value = 42`, the returned value will be 18 ( the first reflection yields -2, the second 22, and the last 18). * If neither bounds are reflected and cyclic is False, then the value is just returned as-is. Parameters ---------- value : float The value to apply the conditions to. Returns ------- float The value after the conditions are applied; see above for details. """ retval = value if self._cyclic: retval = apply_cyclic(value, self) retval = self._reflect(retval) if isinstance(retval, numpy.ndarray) and retval.size == 1: try: retval = retval[0] except IndexError: retval = float(retval) return retval
python
def apply_conditions(self, value): retval = value if self._cyclic: retval = apply_cyclic(value, self) retval = self._reflect(retval) if isinstance(retval, numpy.ndarray) and retval.size == 1: try: retval = retval[0] except IndexError: retval = float(retval) return retval
[ "def", "apply_conditions", "(", "self", ",", "value", ")", ":", "retval", "=", "value", "if", "self", ".", "_cyclic", ":", "retval", "=", "apply_cyclic", "(", "value", ",", "self", ")", "retval", "=", "self", ".", "_reflect", "(", "retval", ")", "if", ...
Applies any boundary conditions to the given value. The value is manipulated according based on the following conditions: * If `self.cyclic` is True then `value` is wrapped around to the minimum (maximum) bound if `value` is `>= self.max` (`< self.min`) bound. For example, if the minimum and maximum bounds are `0, 2*pi` and `value = 5*pi`, then the returned value will be `pi`. * If `self.min` is a reflected boundary then `value` will be reflected to the right if it is `< self.min`. For example, if `self.min = 10` and `value = 3`, then the returned value will be 17. * If `self.max` is a reflected boundary then `value` will be reflected to the left if it is `> self.max`. For example, if `self.max = 20` and `value = 27`, then the returned value will be 13. * If `self.min` and `self.max` are both reflected boundaries, then `value` will be reflected between the two boundaries until it falls within the bounds. The first reflection occurs off of the maximum boundary. For example, if `self.min = 10`, `self.max = 20`, and `value = 42`, the returned value will be 18 ( the first reflection yields -2, the second 22, and the last 18). * If neither bounds are reflected and cyclic is False, then the value is just returned as-is. Parameters ---------- value : float The value to apply the conditions to. Returns ------- float The value after the conditions are applied; see above for details.
[ "Applies", "any", "boundary", "conditions", "to", "the", "given", "value", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/boundaries.py#L359-L404
227,975
gwastro/pycbc
pycbc/dq.py
parse_veto_definer
def parse_veto_definer(veto_def_filename): """ Parse a veto definer file from the filename and return a dictionary indexed by ifo and veto definer category level. Parameters ---------- veto_def_filename: str The path to the veto definer file Returns: parsed_definition: dict Returns a dictionary first indexed by ifo, then category level, and finally a list of veto definitions. """ from glue.ligolw import table, lsctables, utils as ligolw_utils from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(veto_def_filename, False, contenthandler=h) veto_table = table.get_table(indoc, 'veto_definer') ifo = veto_table.getColumnByName('ifo') name = veto_table.getColumnByName('name') version = numpy.array(veto_table.getColumnByName('version')) category = numpy.array(veto_table.getColumnByName('category')) start = numpy.array(veto_table.getColumnByName('start_time')) end = numpy.array(veto_table.getColumnByName('end_time')) start_pad = numpy.array(veto_table.getColumnByName('start_pad')) end_pad = numpy.array(veto_table.getColumnByName('end_pad')) data = {} for i in range(len(veto_table)): if ifo[i] not in data: data[ifo[i]] = {} # The veto-definer categories are weird! Hardware injections are stored # in "3" and numbers above that are bumped up by one (although not # often used any more). So we remap 3 to H and anything above 3 to # N-1. 2 and 1 correspond to 2 and 1 (YAY!) if category[i] > 3: curr_cat = "CAT_{}".format(category[i]-1) elif category[i] == 3: curr_cat = "CAT_H" else: curr_cat = "CAT_{}".format(category[i]) if curr_cat not in data[ifo[i]]: data[ifo[i]][curr_cat] = [] veto_info = {'name': name[i], 'version': version[i], 'start': start[i], 'end': end[i], 'start_pad': start_pad[i], 'end_pad': end_pad[i], } data[ifo[i]][curr_cat].append(veto_info) return data
python
def parse_veto_definer(veto_def_filename): from glue.ligolw import table, lsctables, utils as ligolw_utils from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) indoc = ligolw_utils.load_filename(veto_def_filename, False, contenthandler=h) veto_table = table.get_table(indoc, 'veto_definer') ifo = veto_table.getColumnByName('ifo') name = veto_table.getColumnByName('name') version = numpy.array(veto_table.getColumnByName('version')) category = numpy.array(veto_table.getColumnByName('category')) start = numpy.array(veto_table.getColumnByName('start_time')) end = numpy.array(veto_table.getColumnByName('end_time')) start_pad = numpy.array(veto_table.getColumnByName('start_pad')) end_pad = numpy.array(veto_table.getColumnByName('end_pad')) data = {} for i in range(len(veto_table)): if ifo[i] not in data: data[ifo[i]] = {} # The veto-definer categories are weird! Hardware injections are stored # in "3" and numbers above that are bumped up by one (although not # often used any more). So we remap 3 to H and anything above 3 to # N-1. 2 and 1 correspond to 2 and 1 (YAY!) if category[i] > 3: curr_cat = "CAT_{}".format(category[i]-1) elif category[i] == 3: curr_cat = "CAT_H" else: curr_cat = "CAT_{}".format(category[i]) if curr_cat not in data[ifo[i]]: data[ifo[i]][curr_cat] = [] veto_info = {'name': name[i], 'version': version[i], 'start': start[i], 'end': end[i], 'start_pad': start_pad[i], 'end_pad': end_pad[i], } data[ifo[i]][curr_cat].append(veto_info) return data
[ "def", "parse_veto_definer", "(", "veto_def_filename", ")", ":", "from", "glue", ".", "ligolw", "import", "table", ",", "lsctables", ",", "utils", "as", "ligolw_utils", "from", "glue", ".", "ligolw", ".", "ligolw", "import", "LIGOLWContentHandler", "as", "h", ...
Parse a veto definer file from the filename and return a dictionary indexed by ifo and veto definer category level. Parameters ---------- veto_def_filename: str The path to the veto definer file Returns: parsed_definition: dict Returns a dictionary first indexed by ifo, then category level, and finally a list of veto definitions.
[ "Parse", "a", "veto", "definer", "file", "from", "the", "filename", "and", "return", "a", "dictionary", "indexed", "by", "ifo", "and", "veto", "definer", "category", "level", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/dq.py#L34-L92
227,976
gwastro/pycbc
pycbc/dq.py
query_cumulative_flags
def query_cumulative_flags(ifo, segment_names, start_time, end_time, source='any', server="segments.ligo.org", veto_definer=None, bounds=None, padding=None, override_ifos=None, cache=False): """Return the times where any flag is active Parameters ---------- ifo: string or dict The interferometer to query (H1, L1). If a dict, an element for each flag name must be provided. segment_name: list of strings The status flag to query from LOSC. start_time: int The starting gps time to begin querying from LOSC end_time: int The end gps time of the query source: str, Optional Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may also be given. The default is to try GWOSC first then try dqsegdb. server: str, Optional The server path. Only used with dqsegdb atm. veto_definer: str, Optional The path to a veto definer to define groups of flags which themselves define a set of segments. bounds: dict, Optional Dict containing start end tuples keyed by the flag name which indicated places which should have a distinct time period to be active. padding: dict, Optional Dict keyed by the flag name. Each element is a tuple (start_pad, end_pad) which indicates how to change the segment boundaries. override_ifos: dict, Optional A dict keyed by flag_name to override the ifo option on a per flag basis. Returns --------- segments: glue.segments.segmentlist List of segments """ total_segs = segmentlist([]) for flag_name in segment_names: ifo_name = ifo if override_ifos is not None and flag_name in override_ifos: ifo_name = override_ifos[flag_name] segs = query_flag(ifo_name, flag_name, start_time, end_time, source=source, server=server, veto_definer=veto_definer, cache=cache) if padding and flag_name in padding: s, e = padding[flag_name] segs2 = segmentlist([]) for seg in segs: segs2.append(segment(seg[0] + s, seg[1] + e)) segs = segs2 if bounds is not None and flag_name in bounds: s, e = bounds[flag_name] valid = segmentlist([segment([s, e])]) segs = (segs & valid).coalesce() total_segs = (total_segs + segs).coalesce() return total_segs
python
def query_cumulative_flags(ifo, segment_names, start_time, end_time, source='any', server="segments.ligo.org", veto_definer=None, bounds=None, padding=None, override_ifos=None, cache=False): total_segs = segmentlist([]) for flag_name in segment_names: ifo_name = ifo if override_ifos is not None and flag_name in override_ifos: ifo_name = override_ifos[flag_name] segs = query_flag(ifo_name, flag_name, start_time, end_time, source=source, server=server, veto_definer=veto_definer, cache=cache) if padding and flag_name in padding: s, e = padding[flag_name] segs2 = segmentlist([]) for seg in segs: segs2.append(segment(seg[0] + s, seg[1] + e)) segs = segs2 if bounds is not None and flag_name in bounds: s, e = bounds[flag_name] valid = segmentlist([segment([s, e])]) segs = (segs & valid).coalesce() total_segs = (total_segs + segs).coalesce() return total_segs
[ "def", "query_cumulative_flags", "(", "ifo", ",", "segment_names", ",", "start_time", ",", "end_time", ",", "source", "=", "'any'", ",", "server", "=", "\"segments.ligo.org\"", ",", "veto_definer", "=", "None", ",", "bounds", "=", "None", ",", "padding", "=", ...
Return the times where any flag is active Parameters ---------- ifo: string or dict The interferometer to query (H1, L1). If a dict, an element for each flag name must be provided. segment_name: list of strings The status flag to query from LOSC. start_time: int The starting gps time to begin querying from LOSC end_time: int The end gps time of the query source: str, Optional Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may also be given. The default is to try GWOSC first then try dqsegdb. server: str, Optional The server path. Only used with dqsegdb atm. veto_definer: str, Optional The path to a veto definer to define groups of flags which themselves define a set of segments. bounds: dict, Optional Dict containing start end tuples keyed by the flag name which indicated places which should have a distinct time period to be active. padding: dict, Optional Dict keyed by the flag name. Each element is a tuple (start_pad, end_pad) which indicates how to change the segment boundaries. override_ifos: dict, Optional A dict keyed by flag_name to override the ifo option on a per flag basis. Returns --------- segments: glue.segments.segmentlist List of segments
[ "Return", "the", "times", "where", "any", "flag", "is", "active" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/dq.py#L227-L295
227,977
gwastro/pycbc
pycbc/dq.py
parse_flag_str
def parse_flag_str(flag_str): """ Parse a dq flag query string Parameters ---------- flag_str: str String needing to be parsed Returns ------- flags: list of strings List of reduced name strings which can be passed to lower level query commands signs: dict Dict of bools indicated if this should add positively to the segmentlist ifos: dict Ifo specified for the given flag bounds: dict The boundary of a given flag padding: dict Any padding that should be applied to the segments in for a given flag. """ flags = flag_str.replace(' ', '').strip().split(',') signs = {} ifos = {} bounds = {} padding = {} bflags = [] for flag in flags: # Check if the flag should add or subtract time sign = flag[0] == '+' flag = flag[1:] ifo = pad = bound = None # Check for non-default IFO if len(flag.split(':')[0]) == 2: ifo = flag.split(':')[0] flag = flag[3:] # Check for padding options if '<' in flag: popt = flag.split('<')[1].split('>')[0] spad, epad = popt.split(':') pad = (float(spad), float(epad)) flag = flag.replace(popt, '').replace('<>', '') # Check if there are bounds on the flag if '[' in flag: bopt = flag.split('[')[1].split(']')[0] start, end = bopt.split(':') bound = (int(start), int(end)) flag = flag.replace(bopt, '').replace('[]', '') if ifo: ifos[flag] = ifo if pad: padding[flag] = pad if bound: bounds[flag] = bound bflags.append(flag) signs[flag] = sign return bflags, signs, ifos, bounds, padding
python
def parse_flag_str(flag_str): flags = flag_str.replace(' ', '').strip().split(',') signs = {} ifos = {} bounds = {} padding = {} bflags = [] for flag in flags: # Check if the flag should add or subtract time sign = flag[0] == '+' flag = flag[1:] ifo = pad = bound = None # Check for non-default IFO if len(flag.split(':')[0]) == 2: ifo = flag.split(':')[0] flag = flag[3:] # Check for padding options if '<' in flag: popt = flag.split('<')[1].split('>')[0] spad, epad = popt.split(':') pad = (float(spad), float(epad)) flag = flag.replace(popt, '').replace('<>', '') # Check if there are bounds on the flag if '[' in flag: bopt = flag.split('[')[1].split(']')[0] start, end = bopt.split(':') bound = (int(start), int(end)) flag = flag.replace(bopt, '').replace('[]', '') if ifo: ifos[flag] = ifo if pad: padding[flag] = pad if bound: bounds[flag] = bound bflags.append(flag) signs[flag] = sign return bflags, signs, ifos, bounds, padding
[ "def", "parse_flag_str", "(", "flag_str", ")", ":", "flags", "=", "flag_str", ".", "replace", "(", "' '", ",", "''", ")", ".", "strip", "(", ")", ".", "split", "(", "','", ")", "signs", "=", "{", "}", "ifos", "=", "{", "}", "bounds", "=", "{", ...
Parse a dq flag query string Parameters ---------- flag_str: str String needing to be parsed Returns ------- flags: list of strings List of reduced name strings which can be passed to lower level query commands signs: dict Dict of bools indicated if this should add positively to the segmentlist ifos: dict Ifo specified for the given flag bounds: dict The boundary of a given flag padding: dict Any padding that should be applied to the segments in for a given flag.
[ "Parse", "a", "dq", "flag", "query", "string" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/dq.py#L297-L363
227,978
gwastro/pycbc
pycbc/dq.py
query_str
def query_str(ifo, flag_str, start_time, end_time, server="segments.ligo.org", veto_definer=None): """ Query for flags based on a special str syntax Parameters ---------- ifo: str The ifo to be mainly quering for. (may be overriden in syntax) flag_str: str Specification of how to do the query. Ex. +H1:DATA:1<-8,8>[0,100000000] would return H1 time for the DATA available flag with version 1. It would then apply an 8 second padding and only return times within the chosen range 0,1000000000. start_time: int The start gps time. May be overriden for individual flags with the flag str bounds syntax end_time: int The end gps time. May be overriden for individual flags with the flag str bounds syntax Returns ------- segs: segmentlist A list of segments corresponding to the flag query string """ flags, sign, ifos, bounds, padding = parse_flag_str(flag_str) up = [f for f in flags if sign[f]] down = [f for f in flags if not sign[f]] if len(up) + len(down) != len(flags): raise ValueError('Not all flags could be parsed, check +/- prefix') segs = query_cumulative_flags(ifo, up, start_time, end_time, server=server, veto_definer=veto_definer, bounds=bounds, padding=padding, override_ifos=ifos) mseg = query_cumulative_flags(ifo, down, start_time, end_time, server=server, veto_definer=veto_definer, bounds=bounds, padding=padding, override_ifos=ifos) segs = (segs - mseg).coalesce() return segs
python
def query_str(ifo, flag_str, start_time, end_time, server="segments.ligo.org", veto_definer=None): flags, sign, ifos, bounds, padding = parse_flag_str(flag_str) up = [f for f in flags if sign[f]] down = [f for f in flags if not sign[f]] if len(up) + len(down) != len(flags): raise ValueError('Not all flags could be parsed, check +/- prefix') segs = query_cumulative_flags(ifo, up, start_time, end_time, server=server, veto_definer=veto_definer, bounds=bounds, padding=padding, override_ifos=ifos) mseg = query_cumulative_flags(ifo, down, start_time, end_time, server=server, veto_definer=veto_definer, bounds=bounds, padding=padding, override_ifos=ifos) segs = (segs - mseg).coalesce() return segs
[ "def", "query_str", "(", "ifo", ",", "flag_str", ",", "start_time", ",", "end_time", ",", "server", "=", "\"segments.ligo.org\"", ",", "veto_definer", "=", "None", ")", ":", "flags", ",", "sign", ",", "ifos", ",", "bounds", ",", "padding", "=", "parse_flag...
Query for flags based on a special str syntax Parameters ---------- ifo: str The ifo to be mainly quering for. (may be overriden in syntax) flag_str: str Specification of how to do the query. Ex. +H1:DATA:1<-8,8>[0,100000000] would return H1 time for the DATA available flag with version 1. It would then apply an 8 second padding and only return times within the chosen range 0,1000000000. start_time: int The start gps time. May be overriden for individual flags with the flag str bounds syntax end_time: int The end gps time. May be overriden for individual flags with the flag str bounds syntax Returns ------- segs: segmentlist A list of segments corresponding to the flag query string
[ "Query", "for", "flags", "based", "on", "a", "special", "str", "syntax" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/dq.py#L365-L411
227,979
gwastro/pycbc
pycbc/inference/models/base_data.py
BaseDataModel.data
def data(self, data): """Store a copy of the data.""" self._data = {det: d.copy() for (det, d) in data.items()}
python
def data(self, data): self._data = {det: d.copy() for (det, d) in data.items()}
[ "def", "data", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "{", "det", ":", "d", ".", "copy", "(", ")", "for", "(", "det", ",", "d", ")", "in", "data", ".", "items", "(", ")", "}" ]
Store a copy of the data.
[ "Store", "a", "copy", "of", "the", "data", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base_data.py#L90-L92
227,980
gwastro/pycbc
pycbc/inference/models/base_data.py
BaseDataModel.logplr
def logplr(self): """Returns the log of the prior-weighted likelihood ratio at the current parameter values. The logprior is calculated first. If the logprior returns ``-inf`` (possibly indicating a non-physical point), then ``loglr`` is not called. """ logp = self.logprior if logp == -numpy.inf: return logp else: return logp + self.loglr
python
def logplr(self): logp = self.logprior if logp == -numpy.inf: return logp else: return logp + self.loglr
[ "def", "logplr", "(", "self", ")", ":", "logp", "=", "self", ".", "logprior", "if", "logp", "==", "-", "numpy", ".", "inf", ":", "return", "logp", "else", ":", "return", "logp", "+", "self", ".", "loglr" ]
Returns the log of the prior-weighted likelihood ratio at the current parameter values. The logprior is calculated first. If the logprior returns ``-inf`` (possibly indicating a non-physical point), then ``loglr`` is not called.
[ "Returns", "the", "log", "of", "the", "prior", "-", "weighted", "likelihood", "ratio", "at", "the", "current", "parameter", "values", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base_data.py#L130-L142
227,981
gwastro/pycbc
pycbc/inference/models/base_data.py
BaseDataModel.write_metadata
def write_metadata(self, fp): """Adds data to the metadata that's written. Parameters ---------- fp : pycbc.inference.io.BaseInferenceFile instance The inference file to write to. """ super(BaseDataModel, self).write_metadata(fp) fp.write_stilde(self.data)
python
def write_metadata(self, fp): super(BaseDataModel, self).write_metadata(fp) fp.write_stilde(self.data)
[ "def", "write_metadata", "(", "self", ",", "fp", ")", ":", "super", "(", "BaseDataModel", ",", "self", ")", ".", "write_metadata", "(", "fp", ")", "fp", ".", "write_stilde", "(", "self", ".", "data", ")" ]
Adds data to the metadata that's written. Parameters ---------- fp : pycbc.inference.io.BaseInferenceFile instance The inference file to write to.
[ "Adds", "data", "to", "the", "metadata", "that", "s", "written", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/models/base_data.py#L149-L158
227,982
gwastro/pycbc
pycbc/waveform/utils_cuda.py
apply_fseries_time_shift
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True): """Shifts a frequency domain waveform in time. The waveform is assumed to be sampled at equal frequency intervals. """ if htilde.precision != 'single': raise NotImplementedError("CUDA version of apply_fseries_time_shift only supports single precision") if copy: out = htilde.copy() else: out = htilde kmin = numpy.int32(kmin) kmax = numpy.int32(len(htilde)) nb = int(numpy.ceil(kmax / nt_float)) if nb > 1024: raise ValueError("More than 1024 blocks not supported yet") phi = numpy.float32(-2 * numpy.pi * dt * htilde.delta_f) fseries_ts_fn.prepared_call((nb, 1), (nt, 1, 1), out.data.gpudata, phi, kmin, kmax) if copy: htilde = FrequencySeries(out, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False) return htilde
python
def apply_fseries_time_shift(htilde, dt, kmin=0, copy=True): if htilde.precision != 'single': raise NotImplementedError("CUDA version of apply_fseries_time_shift only supports single precision") if copy: out = htilde.copy() else: out = htilde kmin = numpy.int32(kmin) kmax = numpy.int32(len(htilde)) nb = int(numpy.ceil(kmax / nt_float)) if nb > 1024: raise ValueError("More than 1024 blocks not supported yet") phi = numpy.float32(-2 * numpy.pi * dt * htilde.delta_f) fseries_ts_fn.prepared_call((nb, 1), (nt, 1, 1), out.data.gpudata, phi, kmin, kmax) if copy: htilde = FrequencySeries(out, delta_f=htilde.delta_f, epoch=htilde.epoch, copy=False) return htilde
[ "def", "apply_fseries_time_shift", "(", "htilde", ",", "dt", ",", "kmin", "=", "0", ",", "copy", "=", "True", ")", ":", "if", "htilde", ".", "precision", "!=", "'single'", ":", "raise", "NotImplementedError", "(", "\"CUDA version of apply_fseries_time_shift only s...
Shifts a frequency domain waveform in time. The waveform is assumed to be sampled at equal frequency intervals.
[ "Shifts", "a", "frequency", "domain", "waveform", "in", "time", ".", "The", "waveform", "is", "assumed", "to", "be", "sampled", "at", "equal", "frequency", "intervals", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/waveform/utils_cuda.py#L81-L104
227,983
gwastro/pycbc
pycbc/distributions/qnm.py
UniformF0Tau.from_config
def from_config(cls, cp, section, variable_args): """Initialize this class from a config file. Bounds on ``f0``, ``tau``, ``final_mass`` and ``final_spin`` should be specified by providing ``min-{param}`` and ``max-{param}``. If the ``f0`` or ``tau`` param should be renamed, ``rdfreq`` and ``damping_time`` should be provided; these must match ``variable_args``. If ``rdfreq`` and ``damping_time`` are not provided, ``variable_args`` are expected to be ``f0`` and ``tau``. Only ``min/max-f0`` and ``min/max-tau`` need to be provided. Example: .. code-block:: ini [{section}-f0+tau] name = uniform_f0_tau min-f0 = 10 max-f0 = 2048 min-tau = 0.0001 max-tau = 0.010 min-final_mass = 10 Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser WorkflowConfigParser instance to read. section : str The name of the section to read. variable_args : str The name of the variable args. These should be separated by ``pycbc.VARARGS_DELIM``. Returns ------- UniformF0Tau : This class initialized with the parameters provided in the config file. """ tag = variable_args variable_args = set(variable_args.split(pycbc.VARARGS_DELIM)) # get f0 and tau f0 = bounded.get_param_bounds_from_config(cp, section, tag, 'f0') tau = bounded.get_param_bounds_from_config(cp, section, tag, 'tau') # see if f0 and tau should be renamed if cp.has_option_tag(section, 'rdfreq', tag): rdfreq = cp.get_opt_tag(section, 'rdfreq', tag) else: rdfreq = 'f0' if cp.has_option_tag(section, 'damping_time', tag): damping_time = cp.get_opt_tag(section, 'damping_time', tag) else: damping_time = 'tau' # check that they match whats in the variable args if not variable_args == set([rdfreq, damping_time]): raise ValueError("variable args do not match rdfreq and " "damping_time names") # get the final mass and spin values, if provided final_mass = bounded.get_param_bounds_from_config( cp, section, tag, 'final_mass') final_spin = bounded.get_param_bounds_from_config( cp, section, tag, 'final_spin') extra_opts = {} if cp.has_option_tag(section, 'norm_tolerance', tag): extra_opts['norm_tolerance'] = float( cp.get_opt_tag(section, 'norm_tolerance', tag)) if cp.has_option_tag(section, 'norm_seed', tag): extra_opts['norm_seed'] = int( cp.get_opt_tag(section, 'norm_seed', tag)) return cls(f0=f0, tau=tau, final_mass=final_mass, final_spin=final_spin, rdfreq=rdfreq, damping_time=damping_time, **extra_opts)
python
def from_config(cls, cp, section, variable_args): tag = variable_args variable_args = set(variable_args.split(pycbc.VARARGS_DELIM)) # get f0 and tau f0 = bounded.get_param_bounds_from_config(cp, section, tag, 'f0') tau = bounded.get_param_bounds_from_config(cp, section, tag, 'tau') # see if f0 and tau should be renamed if cp.has_option_tag(section, 'rdfreq', tag): rdfreq = cp.get_opt_tag(section, 'rdfreq', tag) else: rdfreq = 'f0' if cp.has_option_tag(section, 'damping_time', tag): damping_time = cp.get_opt_tag(section, 'damping_time', tag) else: damping_time = 'tau' # check that they match whats in the variable args if not variable_args == set([rdfreq, damping_time]): raise ValueError("variable args do not match rdfreq and " "damping_time names") # get the final mass and spin values, if provided final_mass = bounded.get_param_bounds_from_config( cp, section, tag, 'final_mass') final_spin = bounded.get_param_bounds_from_config( cp, section, tag, 'final_spin') extra_opts = {} if cp.has_option_tag(section, 'norm_tolerance', tag): extra_opts['norm_tolerance'] = float( cp.get_opt_tag(section, 'norm_tolerance', tag)) if cp.has_option_tag(section, 'norm_seed', tag): extra_opts['norm_seed'] = int( cp.get_opt_tag(section, 'norm_seed', tag)) return cls(f0=f0, tau=tau, final_mass=final_mass, final_spin=final_spin, rdfreq=rdfreq, damping_time=damping_time, **extra_opts)
[ "def", "from_config", "(", "cls", ",", "cp", ",", "section", ",", "variable_args", ")", ":", "tag", "=", "variable_args", "variable_args", "=", "set", "(", "variable_args", ".", "split", "(", "pycbc", ".", "VARARGS_DELIM", ")", ")", "# get f0 and tau", "f0",...
Initialize this class from a config file. Bounds on ``f0``, ``tau``, ``final_mass`` and ``final_spin`` should be specified by providing ``min-{param}`` and ``max-{param}``. If the ``f0`` or ``tau`` param should be renamed, ``rdfreq`` and ``damping_time`` should be provided; these must match ``variable_args``. If ``rdfreq`` and ``damping_time`` are not provided, ``variable_args`` are expected to be ``f0`` and ``tau``. Only ``min/max-f0`` and ``min/max-tau`` need to be provided. Example: .. code-block:: ini [{section}-f0+tau] name = uniform_f0_tau min-f0 = 10 max-f0 = 2048 min-tau = 0.0001 max-tau = 0.010 min-final_mass = 10 Parameters ---------- cp : pycbc.workflow.WorkflowConfigParser WorkflowConfigParser instance to read. section : str The name of the section to read. variable_args : str The name of the variable args. These should be separated by ``pycbc.VARARGS_DELIM``. Returns ------- UniformF0Tau : This class initialized with the parameters provided in the config file.
[ "Initialize", "this", "class", "from", "a", "config", "file", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/qnm.py#L187-L260
227,984
gwastro/pycbc
pycbc/distributions/angular.py
UniformSolidAngle.apply_boundary_conditions
def apply_boundary_conditions(self, **kwargs): """Maps the given values to be within the domain of the azimuthal and polar angles, before applying any other boundary conditions. Parameters ---------- \**kwargs : The keyword args must include values for both the azimuthal and polar angle, using the names they were initilialized with. For example, if `polar_angle='theta'` and `azimuthal_angle=`phi`, then the keyword args must be `theta={val1}, phi={val2}`. Returns ------- dict A dictionary of the parameter names and the conditioned values. """ polarval = kwargs[self._polar_angle] azval = kwargs[self._azimuthal_angle] # constrain each angle to its domain polarval = self._polardist._domain.apply_conditions(polarval) azval = self._azimuthaldist._domain.apply_conditions(azval) # apply any other boundary conditions polarval = self._bounds[self._polar_angle].apply_conditions(polarval) azval = self._bounds[self._azimuthal_angle].apply_conditions(azval) return {self._polar_angle: polarval, self._azimuthal_angle: azval}
python
def apply_boundary_conditions(self, **kwargs): polarval = kwargs[self._polar_angle] azval = kwargs[self._azimuthal_angle] # constrain each angle to its domain polarval = self._polardist._domain.apply_conditions(polarval) azval = self._azimuthaldist._domain.apply_conditions(azval) # apply any other boundary conditions polarval = self._bounds[self._polar_angle].apply_conditions(polarval) azval = self._bounds[self._azimuthal_angle].apply_conditions(azval) return {self._polar_angle: polarval, self._azimuthal_angle: azval}
[ "def", "apply_boundary_conditions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "polarval", "=", "kwargs", "[", "self", ".", "_polar_angle", "]", "azval", "=", "kwargs", "[", "self", ".", "_azimuthal_angle", "]", "# constrain each angle to its domain", "pola...
Maps the given values to be within the domain of the azimuthal and polar angles, before applying any other boundary conditions. Parameters ---------- \**kwargs : The keyword args must include values for both the azimuthal and polar angle, using the names they were initilialized with. For example, if `polar_angle='theta'` and `azimuthal_angle=`phi`, then the keyword args must be `theta={val1}, phi={val2}`. Returns ------- dict A dictionary of the parameter names and the conditioned values.
[ "Maps", "the", "given", "values", "to", "be", "within", "the", "domain", "of", "the", "azimuthal", "and", "polar", "angles", "before", "applying", "any", "other", "boundary", "conditions", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/angular.py#L395-L420
227,985
gwastro/pycbc
pycbc/distributions/angular.py
UniformSolidAngle._pdf
def _pdf(self, **kwargs): """ Returns the pdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values. """ return self._polardist._pdf(**kwargs) * \ self._azimuthaldist._pdf(**kwargs)
python
def _pdf(self, **kwargs): return self._polardist._pdf(**kwargs) * \ self._azimuthaldist._pdf(**kwargs)
[ "def", "_pdf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_polardist", ".", "_pdf", "(", "*", "*", "kwargs", ")", "*", "self", ".", "_azimuthaldist", ".", "_pdf", "(", "*", "*", "kwargs", ")" ]
Returns the pdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values.
[ "Returns", "the", "pdf", "at", "the", "given", "angles", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/angular.py#L423-L440
227,986
gwastro/pycbc
pycbc/distributions/angular.py
UniformSolidAngle._logpdf
def _logpdf(self, **kwargs): """ Returns the logpdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values. """ return self._polardist._logpdf(**kwargs) +\ self._azimuthaldist._logpdf(**kwargs)
python
def _logpdf(self, **kwargs): return self._polardist._logpdf(**kwargs) +\ self._azimuthaldist._logpdf(**kwargs)
[ "def", "_logpdf", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_polardist", ".", "_logpdf", "(", "*", "*", "kwargs", ")", "+", "self", ".", "_azimuthaldist", ".", "_logpdf", "(", "*", "*", "kwargs", ")" ]
Returns the logpdf at the given angles. Parameters ---------- \**kwargs: The keyword arguments should specify the value for each angle, using the names of the polar and azimuthal angles as the keywords. Unrecognized arguments are ignored. Returns ------- float The value of the pdf at the given values.
[ "Returns", "the", "logpdf", "at", "the", "given", "angles", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/angular.py#L443-L460
227,987
gwastro/pycbc
pycbc/results/scatter_histograms.py
create_axes_grid
def create_axes_grid(parameters, labels=None, height_ratios=None, width_ratios=None, no_diagonals=False): """Given a list of parameters, creates a figure with an axis for every possible combination of the parameters. Parameters ---------- parameters : list Names of the variables to be plotted. labels : {None, dict}, optional A dictionary of parameters -> parameter labels. height_ratios : {None, list}, optional Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec` for details. width_ratios : {None, list}, optional Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec` for details. no_diagonals : {False, bool}, optional Do not produce axes for the same parameter on both axes. Returns ------- fig : pyplot.figure The figure that was created. axis_dict : dict A dictionary mapping the parameter combinations to the axis and their location in the subplots grid; i.e., the key, values are: `{('param1', 'param2'): (pyplot.axes, row index, column index)}` """ if labels is None: labels = {p: p for p in parameters} elif any(p not in labels for p in parameters): raise ValueError("labels must be provided for all parameters") # Create figure with adequate size for number of parameters. ndim = len(parameters) if no_diagonals: ndim -= 1 if ndim < 3: fsize = (8, 7) else: fsize = (ndim*3 - 1, ndim*3 - 2) fig = pyplot.figure(figsize=fsize) # create the axis grid gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios, height_ratios=height_ratios, wspace=0.05, hspace=0.05) # create grid of axis numbers to easily create axes in the right locations axes = numpy.arange(ndim**2).reshape((ndim, ndim)) # Select possible combinations of plots and establish rows and columns. combos = list(itertools.combinations(parameters, 2)) # add the diagonals if not no_diagonals: combos += [(p, p) for p in parameters] # create the mapping between parameter combos and axes axis_dict = {} # cycle over all the axes, setting thing as needed for nrow in range(ndim): for ncolumn in range(ndim): ax = pyplot.subplot(gs[axes[nrow, ncolumn]]) # map to a parameter index px = parameters[ncolumn] if no_diagonals: py = parameters[nrow+1] else: py = parameters[nrow] if (px, py) in combos: axis_dict[px, py] = (ax, nrow, ncolumn) # x labels only on bottom if nrow + 1 == ndim: ax.set_xlabel('{}'.format(labels[px]), fontsize=18) else: pyplot.setp(ax.get_xticklabels(), visible=False) # y labels only on left if ncolumn == 0: ax.set_ylabel('{}'.format(labels[py]), fontsize=18) else: pyplot.setp(ax.get_yticklabels(), visible=False) else: # make non-used axes invisible ax.axis('off') return fig, axis_dict
python
def create_axes_grid(parameters, labels=None, height_ratios=None, width_ratios=None, no_diagonals=False): if labels is None: labels = {p: p for p in parameters} elif any(p not in labels for p in parameters): raise ValueError("labels must be provided for all parameters") # Create figure with adequate size for number of parameters. ndim = len(parameters) if no_diagonals: ndim -= 1 if ndim < 3: fsize = (8, 7) else: fsize = (ndim*3 - 1, ndim*3 - 2) fig = pyplot.figure(figsize=fsize) # create the axis grid gs = gridspec.GridSpec(ndim, ndim, width_ratios=width_ratios, height_ratios=height_ratios, wspace=0.05, hspace=0.05) # create grid of axis numbers to easily create axes in the right locations axes = numpy.arange(ndim**2).reshape((ndim, ndim)) # Select possible combinations of plots and establish rows and columns. combos = list(itertools.combinations(parameters, 2)) # add the diagonals if not no_diagonals: combos += [(p, p) for p in parameters] # create the mapping between parameter combos and axes axis_dict = {} # cycle over all the axes, setting thing as needed for nrow in range(ndim): for ncolumn in range(ndim): ax = pyplot.subplot(gs[axes[nrow, ncolumn]]) # map to a parameter index px = parameters[ncolumn] if no_diagonals: py = parameters[nrow+1] else: py = parameters[nrow] if (px, py) in combos: axis_dict[px, py] = (ax, nrow, ncolumn) # x labels only on bottom if nrow + 1 == ndim: ax.set_xlabel('{}'.format(labels[px]), fontsize=18) else: pyplot.setp(ax.get_xticklabels(), visible=False) # y labels only on left if ncolumn == 0: ax.set_ylabel('{}'.format(labels[py]), fontsize=18) else: pyplot.setp(ax.get_yticklabels(), visible=False) else: # make non-used axes invisible ax.axis('off') return fig, axis_dict
[ "def", "create_axes_grid", "(", "parameters", ",", "labels", "=", "None", ",", "height_ratios", "=", "None", ",", "width_ratios", "=", "None", ",", "no_diagonals", "=", "False", ")", ":", "if", "labels", "is", "None", ":", "labels", "=", "{", "p", ":", ...
Given a list of parameters, creates a figure with an axis for every possible combination of the parameters. Parameters ---------- parameters : list Names of the variables to be plotted. labels : {None, dict}, optional A dictionary of parameters -> parameter labels. height_ratios : {None, list}, optional Set the height ratios of the axes; see `matplotlib.gridspec.GridSpec` for details. width_ratios : {None, list}, optional Set the width ratios of the axes; see `matplotlib.gridspec.GridSpec` for details. no_diagonals : {False, bool}, optional Do not produce axes for the same parameter on both axes. Returns ------- fig : pyplot.figure The figure that was created. axis_dict : dict A dictionary mapping the parameter combinations to the axis and their location in the subplots grid; i.e., the key, values are: `{('param1', 'param2'): (pyplot.axes, row index, column index)}`
[ "Given", "a", "list", "of", "parameters", "creates", "a", "figure", "with", "an", "axis", "for", "every", "possible", "combination", "of", "the", "parameters", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/scatter_histograms.py#L52-L134
227,988
gwastro/pycbc
pycbc/results/scatter_histograms.py
construct_kde
def construct_kde(samples_array, use_kombine=False): """Constructs a KDE from the given samples. """ if use_kombine: try: import kombine except ImportError: raise ImportError("kombine is not installed.") # construct the kde if use_kombine: kde = kombine.clustered_kde.KDE(samples_array) else: kde = scipy.stats.gaussian_kde(samples_array.T) return kde
python
def construct_kde(samples_array, use_kombine=False): if use_kombine: try: import kombine except ImportError: raise ImportError("kombine is not installed.") # construct the kde if use_kombine: kde = kombine.clustered_kde.KDE(samples_array) else: kde = scipy.stats.gaussian_kde(samples_array.T) return kde
[ "def", "construct_kde", "(", "samples_array", ",", "use_kombine", "=", "False", ")", ":", "if", "use_kombine", ":", "try", ":", "import", "kombine", "except", "ImportError", ":", "raise", "ImportError", "(", "\"kombine is not installed.\"", ")", "# construct the kde...
Constructs a KDE from the given samples.
[ "Constructs", "a", "KDE", "from", "the", "given", "samples", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/scatter_histograms.py#L146-L159
227,989
gwastro/pycbc
pycbc/results/scatter_histograms.py
create_marginalized_hist
def create_marginalized_hist(ax, values, label, percentiles=None, color='k', fillcolor='gray', linecolor='navy', linestyle='-', title=True, expected_value=None, expected_color='red', rotated=False, plot_min=None, plot_max=None): """Plots a 1D marginalized histogram of the given param from the given samples. Parameters ---------- ax : pyplot.Axes The axes on which to draw the plot. values : array The parameter values to plot. label : str A label to use for the title. percentiles : {None, float or array} What percentiles to draw lines at. If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the median). color : {'k', string} What color to make the histogram; default is black. fillcolor : {'gray', string, or None} What color to fill the histogram with. Set to None to not fill the histogram. Default is 'gray'. linestyle : str, optional What line style to use for the histogram. Default is '-'. linecolor : {'navy', string} What color to use for the percentile lines. Default is 'navy'. title : bool, optional Add a title with a estimated value +/- uncertainty. The estimated value is the pecentile halfway between the max/min of ``percentiles``, while the uncertainty is given by the max/min of the ``percentiles``. If no percentiles are specified, defaults to quoting the median +/- 95/5 percentiles. rotated : {False, bool} Plot the histogram on the y-axis instead of the x. Default is False. plot_min : {None, float} The minimum value to plot. If None, will default to whatever `pyplot` creates. plot_max : {None, float} The maximum value to plot. If None, will default to whatever `pyplot` creates. scalefac : {1., float} Factor to scale the default font sizes by. Default is 1 (no scaling). """ if fillcolor is None: htype = 'step' else: htype = 'stepfilled' if rotated: orientation = 'horizontal' else: orientation = 'vertical' ax.hist(values, bins=50, histtype=htype, orientation=orientation, facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2, density=True) if percentiles is None: percentiles = [5., 50., 95.] if len(percentiles) > 0: plotp = numpy.percentile(values, percentiles) else: plotp = [] for val in plotp: if rotated: ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3) else: ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3) # plot expected if expected_value is not None: if rotated: ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2) else: ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2) if title: if len(percentiles) > 0: minp = min(percentiles) maxp = max(percentiles) medp = (maxp + minp) / 2. else: minp = 5 medp = 50 maxp = 95 values_min = numpy.percentile(values, minp) values_med = numpy.percentile(values, medp) values_max = numpy.percentile(values, maxp) negerror = values_med - values_min poserror = values_max - values_med fmt = '${0}$'.format(str_utils.format_value( values_med, negerror, plus_error=poserror)) if rotated: ax.yaxis.set_label_position("right") # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label, rotated=rotated) # Remove x-ticks ax.set_xticks([]) # turn off x-labels ax.set_xlabel('') # set limits ymin, ymax = ax.get_ylim() if plot_min is not None: ymin = plot_min if plot_max is not None: ymax = plot_max ax.set_ylim(ymin, ymax) else: # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label) # Remove y-ticks ax.set_yticks([]) # turn off y-label ax.set_ylabel('') # set limits xmin, xmax = ax.get_xlim() if plot_min is not None: xmin = plot_min if plot_max is not None: xmax = plot_max ax.set_xlim(xmin, xmax)
python
def create_marginalized_hist(ax, values, label, percentiles=None, color='k', fillcolor='gray', linecolor='navy', linestyle='-', title=True, expected_value=None, expected_color='red', rotated=False, plot_min=None, plot_max=None): if fillcolor is None: htype = 'step' else: htype = 'stepfilled' if rotated: orientation = 'horizontal' else: orientation = 'vertical' ax.hist(values, bins=50, histtype=htype, orientation=orientation, facecolor=fillcolor, edgecolor=color, ls=linestyle, lw=2, density=True) if percentiles is None: percentiles = [5., 50., 95.] if len(percentiles) > 0: plotp = numpy.percentile(values, percentiles) else: plotp = [] for val in plotp: if rotated: ax.axhline(y=val, ls='dashed', color=linecolor, lw=2, zorder=3) else: ax.axvline(x=val, ls='dashed', color=linecolor, lw=2, zorder=3) # plot expected if expected_value is not None: if rotated: ax.axhline(expected_value, color=expected_color, lw=1.5, zorder=2) else: ax.axvline(expected_value, color=expected_color, lw=1.5, zorder=2) if title: if len(percentiles) > 0: minp = min(percentiles) maxp = max(percentiles) medp = (maxp + minp) / 2. else: minp = 5 medp = 50 maxp = 95 values_min = numpy.percentile(values, minp) values_med = numpy.percentile(values, medp) values_max = numpy.percentile(values, maxp) negerror = values_med - values_min poserror = values_max - values_med fmt = '${0}$'.format(str_utils.format_value( values_med, negerror, plus_error=poserror)) if rotated: ax.yaxis.set_label_position("right") # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label, rotated=rotated) # Remove x-ticks ax.set_xticks([]) # turn off x-labels ax.set_xlabel('') # set limits ymin, ymax = ax.get_ylim() if plot_min is not None: ymin = plot_min if plot_max is not None: ymax = plot_max ax.set_ylim(ymin, ymax) else: # sets colored title for marginal histogram set_marginal_histogram_title(ax, fmt, color, label=label) # Remove y-ticks ax.set_yticks([]) # turn off y-label ax.set_ylabel('') # set limits xmin, xmax = ax.get_xlim() if plot_min is not None: xmin = plot_min if plot_max is not None: xmax = plot_max ax.set_xlim(xmin, xmax)
[ "def", "create_marginalized_hist", "(", "ax", ",", "values", ",", "label", ",", "percentiles", "=", "None", ",", "color", "=", "'k'", ",", "fillcolor", "=", "'gray'", ",", "linecolor", "=", "'navy'", ",", "linestyle", "=", "'-'", ",", "title", "=", "True...
Plots a 1D marginalized histogram of the given param from the given samples. Parameters ---------- ax : pyplot.Axes The axes on which to draw the plot. values : array The parameter values to plot. label : str A label to use for the title. percentiles : {None, float or array} What percentiles to draw lines at. If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the upper 90th percentile and the median). color : {'k', string} What color to make the histogram; default is black. fillcolor : {'gray', string, or None} What color to fill the histogram with. Set to None to not fill the histogram. Default is 'gray'. linestyle : str, optional What line style to use for the histogram. Default is '-'. linecolor : {'navy', string} What color to use for the percentile lines. Default is 'navy'. title : bool, optional Add a title with a estimated value +/- uncertainty. The estimated value is the pecentile halfway between the max/min of ``percentiles``, while the uncertainty is given by the max/min of the ``percentiles``. If no percentiles are specified, defaults to quoting the median +/- 95/5 percentiles. rotated : {False, bool} Plot the histogram on the y-axis instead of the x. Default is False. plot_min : {None, float} The minimum value to plot. If None, will default to whatever `pyplot` creates. plot_max : {None, float} The maximum value to plot. If None, will default to whatever `pyplot` creates. scalefac : {1., float} Factor to scale the default font sizes by. Default is 1 (no scaling).
[ "Plots", "a", "1D", "marginalized", "histogram", "of", "the", "given", "param", "from", "the", "given", "samples", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/scatter_histograms.py#L295-L420
227,990
gwastro/pycbc
pycbc/results/scatter_histograms.py
set_marginal_histogram_title
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False): """ Sets the title of the marginal histograms. Parameters ---------- ax : Axes The `Axes` instance for the plot. fmt : str The string to add to the title. color : str The color of the text to add to the title. label : str If title does not exist, then include label at beginning of the string. rotated : bool If `True` then rotate the text 270 degrees for sideways title. """ # get rotation angle of the title rotation = 270 if rotated else 0 # get how much to displace title on axes xscale = 1.05 if rotated else 0.0 if rotated: yscale = 1.0 elif len(ax.get_figure().axes) > 1: yscale = 1.15 else: yscale = 1.05 # get class that packs text boxes vertical or horizonitally packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker # if no title exists if not hasattr(ax, "title_boxes"): # create a text box title = "{} = {}".format(label, fmt) tbox1 = offsetbox.TextArea( title, textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) # save a list of text boxes as attribute for later ax.title_boxes = [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # else append existing title else: # delete old title ax.title_anchor.remove() # add new text box to list tbox1 = offsetbox.TextArea( " {}".format(fmt), textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) ax.title_boxes = ax.title_boxes + [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # add new title and keep reference to instance as an attribute anchored_ybox = offsetbox.AnchoredOffsetbox( loc=2, child=ybox, pad=0., frameon=False, bbox_to_anchor=(xscale, yscale), bbox_transform=ax.transAxes, borderpad=0.) ax.title_anchor = ax.add_artist(anchored_ybox)
python
def set_marginal_histogram_title(ax, fmt, color, label=None, rotated=False): # get rotation angle of the title rotation = 270 if rotated else 0 # get how much to displace title on axes xscale = 1.05 if rotated else 0.0 if rotated: yscale = 1.0 elif len(ax.get_figure().axes) > 1: yscale = 1.15 else: yscale = 1.05 # get class that packs text boxes vertical or horizonitally packer_class = offsetbox.VPacker if rotated else offsetbox.HPacker # if no title exists if not hasattr(ax, "title_boxes"): # create a text box title = "{} = {}".format(label, fmt) tbox1 = offsetbox.TextArea( title, textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) # save a list of text boxes as attribute for later ax.title_boxes = [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # else append existing title else: # delete old title ax.title_anchor.remove() # add new text box to list tbox1 = offsetbox.TextArea( " {}".format(fmt), textprops=dict(color=color, size=15, rotation=rotation, ha='left', va='bottom')) ax.title_boxes = ax.title_boxes + [tbox1] # pack text boxes ybox = packer_class(children=ax.title_boxes, align="bottom", pad=0, sep=5) # add new title and keep reference to instance as an attribute anchored_ybox = offsetbox.AnchoredOffsetbox( loc=2, child=ybox, pad=0., frameon=False, bbox_to_anchor=(xscale, yscale), bbox_transform=ax.transAxes, borderpad=0.) ax.title_anchor = ax.add_artist(anchored_ybox)
[ "def", "set_marginal_histogram_title", "(", "ax", ",", "fmt", ",", "color", ",", "label", "=", "None", ",", "rotated", "=", "False", ")", ":", "# get rotation angle of the title", "rotation", "=", "270", "if", "rotated", "else", "0", "# get how much to displace ti...
Sets the title of the marginal histograms. Parameters ---------- ax : Axes The `Axes` instance for the plot. fmt : str The string to add to the title. color : str The color of the text to add to the title. label : str If title does not exist, then include label at beginning of the string. rotated : bool If `True` then rotate the text 270 degrees for sideways title.
[ "Sets", "the", "title", "of", "the", "marginal", "histograms", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/scatter_histograms.py#L423-L494
227,991
gwastro/pycbc
pycbc/results/scatter_histograms.py
remove_common_offset
def remove_common_offset(arr): """Given an array of data, removes a common offset > 1000, returning the removed value. """ offset = 0 isneg = (arr <= 0).all() # make sure all values have the same sign if isneg or (arr >= 0).all(): # only remove offset if the minimum and maximum values are the same # order of magintude and > O(1000) minpwr = numpy.log10(abs(arr).min()) maxpwr = numpy.log10(abs(arr).max()) if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3: offset = numpy.floor(10**minpwr) if isneg: offset *= -1 arr = arr - offset return arr, int(offset)
python
def remove_common_offset(arr): offset = 0 isneg = (arr <= 0).all() # make sure all values have the same sign if isneg or (arr >= 0).all(): # only remove offset if the minimum and maximum values are the same # order of magintude and > O(1000) minpwr = numpy.log10(abs(arr).min()) maxpwr = numpy.log10(abs(arr).max()) if numpy.floor(minpwr) == numpy.floor(maxpwr) and minpwr > 3: offset = numpy.floor(10**minpwr) if isneg: offset *= -1 arr = arr - offset return arr, int(offset)
[ "def", "remove_common_offset", "(", "arr", ")", ":", "offset", "=", "0", "isneg", "=", "(", "arr", "<=", "0", ")", ".", "all", "(", ")", "# make sure all values have the same sign", "if", "isneg", "or", "(", "arr", ">=", "0", ")", ".", "all", "(", ")",...
Given an array of data, removes a common offset > 1000, returning the removed value.
[ "Given", "an", "array", "of", "data", "removes", "a", "common", "offset", ">", "1000", "returning", "the", "removed", "value", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/scatter_histograms.py#L753-L770
227,992
gwastro/pycbc
pycbc/results/scatter_histograms.py
reduce_ticks
def reduce_ticks(ax, which, maxticks=3): """Given a pyplot axis, resamples its `which`-axis ticks such that are at most `maxticks` left. Parameters ---------- ax : axis The axis to adjust. which : {'x' | 'y'} Which axis to adjust. maxticks : {3, int} Maximum number of ticks to use. Returns ------- array An array of the selected ticks. """ ticks = getattr(ax, 'get_{}ticks'.format(which))() if len(ticks) > maxticks: # make sure the left/right value is not at the edge minax, maxax = getattr(ax, 'get_{}lim'.format(which))() dw = abs(maxax-minax)/10. start_idx, end_idx = 0, len(ticks) if ticks[0] < minax + dw: start_idx += 1 if ticks[-1] > maxax - dw: end_idx -= 1 # get reduction factor fac = int(len(ticks) / maxticks) ticks = ticks[start_idx:end_idx:fac] return ticks
python
def reduce_ticks(ax, which, maxticks=3): ticks = getattr(ax, 'get_{}ticks'.format(which))() if len(ticks) > maxticks: # make sure the left/right value is not at the edge minax, maxax = getattr(ax, 'get_{}lim'.format(which))() dw = abs(maxax-minax)/10. start_idx, end_idx = 0, len(ticks) if ticks[0] < minax + dw: start_idx += 1 if ticks[-1] > maxax - dw: end_idx -= 1 # get reduction factor fac = int(len(ticks) / maxticks) ticks = ticks[start_idx:end_idx:fac] return ticks
[ "def", "reduce_ticks", "(", "ax", ",", "which", ",", "maxticks", "=", "3", ")", ":", "ticks", "=", "getattr", "(", "ax", ",", "'get_{}ticks'", ".", "format", "(", "which", ")", ")", "(", ")", "if", "len", "(", "ticks", ")", ">", "maxticks", ":", ...
Given a pyplot axis, resamples its `which`-axis ticks such that are at most `maxticks` left. Parameters ---------- ax : axis The axis to adjust. which : {'x' | 'y'} Which axis to adjust. maxticks : {3, int} Maximum number of ticks to use. Returns ------- array An array of the selected ticks.
[ "Given", "a", "pyplot", "axis", "resamples", "its", "which", "-", "axis", "ticks", "such", "that", "are", "at", "most", "maxticks", "left", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/scatter_histograms.py#L773-L804
227,993
gwastro/pycbc
pycbc/bin_utils.py
BinnedRatios.logregularize
def logregularize(self, epsilon=2**-1074): """ Find bins in the denominator that are 0, and set them to 1, while setting the corresponding bin in the numerator to float epsilon. This has the effect of allowing the logarithm of the ratio array to be evaluated without error. """ self.numerator.array[self.denominator.array == 0] = epsilon self.denominator.array[self.denominator.array == 0] = 1 return self
python
def logregularize(self, epsilon=2**-1074): self.numerator.array[self.denominator.array == 0] = epsilon self.denominator.array[self.denominator.array == 0] = 1 return self
[ "def", "logregularize", "(", "self", ",", "epsilon", "=", "2", "**", "-", "1074", ")", ":", "self", ".", "numerator", ".", "array", "[", "self", ".", "denominator", ".", "array", "==", "0", "]", "=", "epsilon", "self", ".", "denominator", ".", "array...
Find bins in the denominator that are 0, and set them to 1, while setting the corresponding bin in the numerator to float epsilon. This has the effect of allowing the logarithm of the ratio array to be evaluated without error.
[ "Find", "bins", "in", "the", "denominator", "that", "are", "0", "and", "set", "them", "to", "1", "while", "setting", "the", "corresponding", "bin", "in", "the", "numerator", "to", "float", "epsilon", ".", "This", "has", "the", "effect", "of", "allowing", ...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/bin_utils.py#L694-L703
227,994
gwastro/pycbc
pycbc/psd/estimate.py
median_bias
def median_bias(n): """Calculate the bias of the median average PSD computed from `n` segments. Parameters ---------- n : int Number of segments used in PSD estimation. Returns ------- ans : float Calculated bias. Raises ------ ValueError For non-integer or non-positive `n`. Notes ----- See arXiv:gr-qc/0509116 appendix B for details. """ if type(n) is not int or n <= 0: raise ValueError('n must be a positive integer') if n >= 1000: return numpy.log(2) ans = 1 for i in range(1, int((n - 1) / 2 + 1)): ans += 1.0 / (2*i + 1) - 1.0 / (2*i) return ans
python
def median_bias(n): if type(n) is not int or n <= 0: raise ValueError('n must be a positive integer') if n >= 1000: return numpy.log(2) ans = 1 for i in range(1, int((n - 1) / 2 + 1)): ans += 1.0 / (2*i + 1) - 1.0 / (2*i) return ans
[ "def", "median_bias", "(", "n", ")", ":", "if", "type", "(", "n", ")", "is", "not", "int", "or", "n", "<=", "0", ":", "raise", "ValueError", "(", "'n must be a positive integer'", ")", "if", "n", ">=", "1000", ":", "return", "numpy", ".", "log", "(",...
Calculate the bias of the median average PSD computed from `n` segments. Parameters ---------- n : int Number of segments used in PSD estimation. Returns ------- ans : float Calculated bias. Raises ------ ValueError For non-integer or non-positive `n`. Notes ----- See arXiv:gr-qc/0509116 appendix B for details.
[ "Calculate", "the", "bias", "of", "the", "median", "average", "PSD", "computed", "from", "n", "segments", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/estimate.py#L26-L55
227,995
gwastro/pycbc
pycbc/psd/estimate.py
inverse_spectrum_truncation
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): """Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details. """ # sanity checks if type(max_filter_len) is not int or max_filter_len <= 0: raise ValueError('max_filter_len must be a positive integer') if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \ or low_frequency_cutoff > psd.sample_frequencies[-1]: raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD') N = (len(psd)-1)*2 inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) inv_asd[0] = 0 inv_asd[N//2] = 0 q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \ dtype=real_same_precision_as(psd)) if low_frequency_cutoff: kmin = int(low_frequency_cutoff / psd.delta_f) inv_asd[0:kmin] = 0 ifft(inv_asd, q) trunc_start = max_filter_len // 2 trunc_end = N - max_filter_len // 2 if trunc_end < trunc_start: raise ValueError('Invalid value in inverse_spectrum_truncation') if trunc_method == 'hann': trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) q[0:trunc_start] *= trunc_window[max_filter_len//2:max_filter_len] q[trunc_end:N] *= trunc_window[0:max_filter_len//2] if trunc_start < trunc_end: q[trunc_start:trunc_end] = 0 psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) fft(q, psd_trunc) psd_trunc *= psd_trunc.conj() psd_out = 1. / abs(psd_trunc) return psd_out
python
def inverse_spectrum_truncation(psd, max_filter_len, low_frequency_cutoff=None, trunc_method=None): # sanity checks if type(max_filter_len) is not int or max_filter_len <= 0: raise ValueError('max_filter_len must be a positive integer') if low_frequency_cutoff is not None and low_frequency_cutoff < 0 \ or low_frequency_cutoff > psd.sample_frequencies[-1]: raise ValueError('low_frequency_cutoff must be within the bandwidth of the PSD') N = (len(psd)-1)*2 inv_asd = FrequencySeries((1. / psd)**0.5, delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) inv_asd[0] = 0 inv_asd[N//2] = 0 q = TimeSeries(numpy.zeros(N), delta_t=(N / psd.delta_f), \ dtype=real_same_precision_as(psd)) if low_frequency_cutoff: kmin = int(low_frequency_cutoff / psd.delta_f) inv_asd[0:kmin] = 0 ifft(inv_asd, q) trunc_start = max_filter_len // 2 trunc_end = N - max_filter_len // 2 if trunc_end < trunc_start: raise ValueError('Invalid value in inverse_spectrum_truncation') if trunc_method == 'hann': trunc_window = Array(numpy.hanning(max_filter_len), dtype=q.dtype) q[0:trunc_start] *= trunc_window[max_filter_len//2:max_filter_len] q[trunc_end:N] *= trunc_window[0:max_filter_len//2] if trunc_start < trunc_end: q[trunc_start:trunc_end] = 0 psd_trunc = FrequencySeries(numpy.zeros(len(psd)), delta_f=psd.delta_f, \ dtype=complex_same_precision_as(psd)) fft(q, psd_trunc) psd_trunc *= psd_trunc.conj() psd_out = 1. / abs(psd_trunc) return psd_out
[ "def", "inverse_spectrum_truncation", "(", "psd", ",", "max_filter_len", ",", "low_frequency_cutoff", "=", "None", ",", "trunc_method", "=", "None", ")", ":", "# sanity checks", "if", "type", "(", "max_filter_len", ")", "is", "not", "int", "or", "max_filter_len", ...
Modify a PSD such that the impulse response associated with its inverse square root is no longer than `max_filter_len` time samples. In practice this corresponds to a coarse graining or smoothing of the PSD. Parameters ---------- psd : FrequencySeries PSD whose inverse spectrum is to be truncated. max_filter_len : int Maximum length of the time-domain filter in samples. low_frequency_cutoff : {None, int} Frequencies below `low_frequency_cutoff` are zeroed in the output. trunc_method : {None, 'hann'} Function used for truncating the time-domain filter. None produces a hard truncation at `max_filter_len`. Returns ------- psd : FrequencySeries PSD whose inverse spectrum has been truncated. Raises ------ ValueError For invalid types or values of `max_filter_len` and `low_frequency_cutoff`. Notes ----- See arXiv:gr-qc/0509116 for details.
[ "Modify", "a", "PSD", "such", "that", "the", "impulse", "response", "associated", "with", "its", "inverse", "square", "root", "is", "no", "longer", "than", "max_filter_len", "time", "samples", ".", "In", "practice", "this", "corresponds", "to", "a", "coarse", ...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/psd/estimate.py#L187-L259
227,996
gwastro/pycbc
pycbc/population/rates_functions.py
process_full_data
def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp): """Read the zero-lag and time-lag triggers identified by templates in a specified range of chirp mass. Parameters ---------- hdfile: File that stores all the triggers rhomin: float Minimum value of SNR threhold (will need including ifar) mass1: array First mass of the waveform in the template bank mass2: array Second mass of the waveform in the template bank lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for the template Returns ------- dictionary containing foreground triggers and background information """ with h5py.File(fname, 'r') as bulk: id_bkg = bulk['background_exc/template_id'][:] id_fg = bulk['foreground/template_id'][:] mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg]) bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg)) idx_bkg = np.where(bound == 1) mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg]) bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg)) idx_fg = np.where(bound == 1) zerolagstat = bulk['foreground/stat'][:][idx_fg] cstat_back_exc = bulk['background_exc/stat'][:][idx_bkg] dec_factors = bulk['background_exc/decimation_factor'][:][idx_bkg] return {'zerolagstat': zerolagstat[zerolagstat > rhomin], 'dec_factors': dec_factors[cstat_back_exc > rhomin], 'cstat_back_exc': cstat_back_exc[cstat_back_exc > rhomin]}
python
def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp): with h5py.File(fname, 'r') as bulk: id_bkg = bulk['background_exc/template_id'][:] id_fg = bulk['foreground/template_id'][:] mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg]) bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg)) idx_bkg = np.where(bound == 1) mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg]) bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg)) idx_fg = np.where(bound == 1) zerolagstat = bulk['foreground/stat'][:][idx_fg] cstat_back_exc = bulk['background_exc/stat'][:][idx_bkg] dec_factors = bulk['background_exc/decimation_factor'][:][idx_bkg] return {'zerolagstat': zerolagstat[zerolagstat > rhomin], 'dec_factors': dec_factors[cstat_back_exc > rhomin], 'cstat_back_exc': cstat_back_exc[cstat_back_exc > rhomin]}
[ "def", "process_full_data", "(", "fname", ",", "rhomin", ",", "mass1", ",", "mass2", ",", "lo_mchirp", ",", "hi_mchirp", ")", ":", "with", "h5py", ".", "File", "(", "fname", ",", "'r'", ")", "as", "bulk", ":", "id_bkg", "=", "bulk", "[", "'background_e...
Read the zero-lag and time-lag triggers identified by templates in a specified range of chirp mass. Parameters ---------- hdfile: File that stores all the triggers rhomin: float Minimum value of SNR threhold (will need including ifar) mass1: array First mass of the waveform in the template bank mass2: array Second mass of the waveform in the template bank lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for the template Returns ------- dictionary containing foreground triggers and background information
[ "Read", "the", "zero", "-", "lag", "and", "time", "-", "lag", "triggers", "identified", "by", "templates", "in", "a", "specified", "range", "of", "chirp", "mass", "." ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L13-L55
227,997
gwastro/pycbc
pycbc/population/rates_functions.py
save_bkg_falloff
def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp): ''' Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template ''' with h5py.File(fname_bank, 'r') as bulk: mass1_bank = bulk['mass1'][:] mass2_bank = bulk['mass2'][:] full_data = process_full_data(fname_statmap, rhomin, mass1_bank, mass2_bank, lo_mchirp, hi_mchirp) max_bg_stat = np.max(full_data['cstat_back_exc']) bg_bins = np.linspace(rhomin, max_bg_stat, 76) bg_counts = np.histogram(full_data['cstat_back_exc'], weights=full_data['dec_factors'], bins=bg_bins)[0] zerolagstat = full_data['zerolagstat'] coincs = zerolagstat[zerolagstat >= rhomin] bkg = (bg_bins[:-1], bg_bins[1:], bg_counts) return bkg, coincs
python
def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp): ''' Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template ''' with h5py.File(fname_bank, 'r') as bulk: mass1_bank = bulk['mass1'][:] mass2_bank = bulk['mass2'][:] full_data = process_full_data(fname_statmap, rhomin, mass1_bank, mass2_bank, lo_mchirp, hi_mchirp) max_bg_stat = np.max(full_data['cstat_back_exc']) bg_bins = np.linspace(rhomin, max_bg_stat, 76) bg_counts = np.histogram(full_data['cstat_back_exc'], weights=full_data['dec_factors'], bins=bg_bins)[0] zerolagstat = full_data['zerolagstat'] coincs = zerolagstat[zerolagstat >= rhomin] bkg = (bg_bins[:-1], bg_bins[1:], bg_counts) return bkg, coincs
[ "def", "save_bkg_falloff", "(", "fname_statmap", ",", "fname_bank", ",", "path", ",", "rhomin", ",", "lo_mchirp", ",", "hi_mchirp", ")", ":", "with", "h5py", ".", "File", "(", "fname_bank", ",", "'r'", ")", "as", "bulk", ":", "mass1_bank", "=", "bulk", "...
Read the STATMAP files to derive snr falloff for the background events. Save the output to a txt file Bank file is also provided to restrict triggers to BBH templates. Parameters ---------- fname_statmap: string STATMAP file containing trigger information fname_bank: string File name of the template bank path: string Destination where txt file is saved rhomin: float Minimum value of SNR threhold (will need including ifar) lo_mchirp: float Minimum chirp mass for the template hi_mchirp: float Maximum chirp mass for template
[ "Read", "the", "STATMAP", "files", "to", "derive", "snr", "falloff", "for", "the", "background", "events", ".", "Save", "the", "output", "to", "a", "txt", "file", "Bank", "file", "is", "also", "provided", "to", "restrict", "triggers", "to", "BBH", "templat...
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L58-L95
227,998
gwastro/pycbc
pycbc/population/rates_functions.py
log_rho_bg
def log_rho_bg(trigs, bins, counts): ''' Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array ''' trigs = np.atleast_1d(trigs) N = sum(counts) assert np.all(trigs >= np.min(bins)), \ 'Trigger SNR values cannot all be below the lowest bin limit!' # If there are any triggers that are louder than the max bin, put one # fictitious count in a bin that extends from the limits of the slide # triggers out to the loudest trigger. # If there is no counts for a foreground trigger put a fictitious count # in the background bin if np.any(trigs >= np.max(bins)): N = N + 1 #log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT log_rhos = [] for t in trigs: if t >= np.max(bins): log_rhos.append(-log(N)-log(np.max(trigs) - bins[-1])) else: i = bisect.bisect(bins, t) - 1 if counts[i] == 0: counts[i] = 1 log_rhos.append(log(counts[i]) - log(bins[i+1] - bins[i]) - log(N)) return np.array(log_rhos)
python
def log_rho_bg(trigs, bins, counts): ''' Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array ''' trigs = np.atleast_1d(trigs) N = sum(counts) assert np.all(trigs >= np.min(bins)), \ 'Trigger SNR values cannot all be below the lowest bin limit!' # If there are any triggers that are louder than the max bin, put one # fictitious count in a bin that extends from the limits of the slide # triggers out to the loudest trigger. # If there is no counts for a foreground trigger put a fictitious count # in the background bin if np.any(trigs >= np.max(bins)): N = N + 1 #log_plimit = -np.log(N) - np.log(np.max(trigs) - bins[-1]) CHECK IT log_rhos = [] for t in trigs: if t >= np.max(bins): log_rhos.append(-log(N)-log(np.max(trigs) - bins[-1])) else: i = bisect.bisect(bins, t) - 1 if counts[i] == 0: counts[i] = 1 log_rhos.append(log(counts[i]) - log(bins[i+1] - bins[i]) - log(N)) return np.array(log_rhos)
[ "def", "log_rho_bg", "(", "trigs", ",", "bins", ",", "counts", ")", ":", "trigs", "=", "np", ".", "atleast_1d", "(", "trigs", ")", "N", "=", "sum", "(", "counts", ")", "assert", "np", ".", "all", "(", "trigs", ">=", "np", ".", "min", "(", "bins",...
Calculate the log of background fall-off Parameters ---------- trigs: array SNR values of all the triggers bins: string bins for histogrammed triggers path: string counts for histogrammed triggers Returns ------- array
[ "Calculate", "the", "log", "of", "background", "fall", "-", "off" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L97-L141
227,999
gwastro/pycbc
pycbc/population/rates_functions.py
fgmc
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg): ''' Function to fit the likelihood Fixme ''' Lb = np.random.uniform(0., maxfg, len(Rf)) pquit = 0 while pquit < 0.1: # quit when the posterior on Lf is very close to its prior nsamp = len(Lb) Rf_sel = np.random.choice(Rf, nsamp) vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel)) Lf = Rf_sel * vt log_Lf, log_Lb = log(Lf), log(Lb) plR = 0 for lfr in log_fg_ratios: plR += np.logaddexp(lfr + log_Lf, log_Lb) plR -= (Lf + Lb) plRn = plR - max(plR) idx = np.exp(plRn) > np.random.random(len(plRn)) pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1] Lb = Lb[idx] return Rf_sel[idx], Lf[idx], Lb
python
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg): ''' Function to fit the likelihood Fixme ''' Lb = np.random.uniform(0., maxfg, len(Rf)) pquit = 0 while pquit < 0.1: # quit when the posterior on Lf is very close to its prior nsamp = len(Lb) Rf_sel = np.random.choice(Rf, nsamp) vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel)) Lf = Rf_sel * vt log_Lf, log_Lb = log(Lf), log(Lb) plR = 0 for lfr in log_fg_ratios: plR += np.logaddexp(lfr + log_Lf, log_Lb) plR -= (Lf + Lb) plRn = plR - max(plR) idx = np.exp(plRn) > np.random.random(len(plRn)) pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1] Lb = Lb[idx] return Rf_sel[idx], Lf[idx], Lb
[ "def", "fgmc", "(", "log_fg_ratios", ",", "mu_log_vt", ",", "sigma_log_vt", ",", "Rf", ",", "maxfg", ")", ":", "Lb", "=", "np", ".", "random", ".", "uniform", "(", "0.", ",", "maxfg", ",", "len", "(", "Rf", ")", ")", "pquit", "=", "0", "while", "...
Function to fit the likelihood Fixme
[ "Function", "to", "fit", "the", "likelihood", "Fixme" ]
7a64cdd104d263f1b6ea0b01e6841837d05a4cb3
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/population/rates_functions.py#L156-L188