repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
pycbc
|
pycbc-master/pycbc/neutron_stars/__init__.py
|
import os.path
# Setup the directory with the NS equilibrium sequence(s)
NS_DATA_DIRECTORY = os.path.join(
os.path.dirname(__file__), 'ns_data')
NS_SEQUENCES = [
f.replace('equil_', '').replace('.dat', '')
for f in os.listdir(NS_DATA_DIRECTORY) if f.endswith('.dat')]
from pycbc.neutron_stars.eos_utils import *
from pycbc.neutron_stars.pg_isso_solver import *
| 373
| 36.4
| 65
|
py
|
pycbc
|
pycbc-master/pycbc/neutron_stars/pg_isso_solver.py
|
# Copyright (C) 2022 Francesco Pannarale, Andrew Williamson,
# Samuel Higginbotham
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Innermost Stable Spherical Orbit (ISSO) solver in the Perez-Giz (PG)
formalism. See `Stone, Loeb, Berger, PRD 87, 084053 (2013)`_.
.. _Stone, Loeb, Berger, PRD 87, 084053 (2013):
http://dx.doi.org/10.1103/PhysRevD.87.084053
"""
import numpy as np
from scipy.optimize import root_scalar
def ISCO_solution(chi, incl):
r"""Analytic solution of the innermost
stable circular orbit (ISCO) for the Kerr metric.
..See eq. (2.21) of
Bardeen, J. M., Press, W. H., Teukolsky, S. A. (1972)`
https://articles.adsabs.harvard.edu/pdf/1972ApJ...178..347B
Parameters
-----------
chi: float
the BH dimensionless spin parameter
incl: float
inclination angle between the BH spin and the orbital angular
momentum in radians
Returns
----------
float
"""
chi2 = chi * chi
sgn = np.sign(np.cos(incl))
Z1 = 1 + np.cbrt(1 - chi2) * (np.cbrt(1 + chi) + np.cbrt(1 - chi))
Z2 = np.sqrt(3 * chi2 + Z1 * Z1)
return 3 + Z2 - sgn * np.sqrt((3 - Z1) * (3 + Z1 + 2 * Z2))
def ISSO_eq_at_pole(r, chi):
r"""Polynomial that enables the calculation of the Kerr polar
(:math:`\iota = \pm \pi / 2`) innermost stable spherical orbit
(ISSO) radius via the roots of
.. math::
P(r) &= r^3 [r^2 (r - 6) + \chi^2 (3 r + 4)] \\
&\quad + \chi^4 [3 r (r - 2) + \chi^2] \, ,
where :math:`\chi` is the BH dimensionless spin parameter. Physical
solutions are between 6 and
:math:`1 + \sqrt{3} + \sqrt{3 + 2 \sqrt{3}}`.
Parameters
----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
Returns
-------
float
"""
chi2 = chi * chi
return (
r**3 * (r**2 * (r - 6) + chi2 * (3 * r + 4))
+ chi2 * chi2 * (3 * r * (r - 2) + chi2))
def ISSO_eq_at_pole_dr(r, chi):
"""Partial derivative of :func:`ISSO_eq_at_pole` with respect to r.
Parameters
----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
Returns
-------
float
"""
chi2 = chi * chi
twlvchi2 = 12 * chi2
sxchi4 = 6 * chi2 * chi2
return (
6 * r**5 - 30 * r**4 + twlvchi2 * r**3 + twlvchi2 * r**2 + sxchi4 * r
+ sxchi4)
def ISSO_eq_at_pole_dr2(r, chi):
"""Double partial derivative of :func:`ISSO_eq_at_pole` with
respect to r.
Parameters
----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
Returns
-------
float
"""
chi2 = chi * chi
return (
30 * r**4 - 120 * r**3 + 36 * chi2 * r**2 + 24 * chi2 * r
+ 6 * chi2 * chi2)
def PG_ISSO_eq(r, chi, incl):
r"""Polynomial that enables the calculation of a generic innermost
stable spherical orbit (ISSO) radius via the roots in :math:`r` of
.. math::
S(r) &= r^8 Z(r) + \chi^2 (1 - \cos(\iota)^2) \\
&\quad * [\chi^2 (1 - \cos(\iota)^2) Y(r) - 2 r^4 X(r)]\,,
where
.. math::
X(r) &= \chi^2 (\chi^2 (3 \chi^2 + 4 r (2 r - 3)) \\
&\quad + r^2 (15 r (r - 4) + 28)) - 6 r^4 (r^2 - 4) \, ,
.. math::
Y(r) &= \chi^4 (\chi^4 + r^2 [7 r (3 r - 4) + 36]) \\
&\quad + 6 r (r - 2) \\
&\qquad * (\chi^6 + 2 r^3
[\chi^2 (3 r + 2) + 3 r^2 (r - 2)]) \, ,
and :math:`Z(r) =` :func:`ISCO_eq`. Physical solutions are between
the equatorial ISSO (i.e. the ISCO) radius (:func:`ISCO_eq`) and
the polar ISSO radius (:func:`ISSO_eq_at_pole`).
See `Stone, Loeb, Berger, PRD 87, 084053 (2013)`_.
.. _Stone, Loeb, Berger, PRD 87, 084053 (2013):
http://dx.doi.org/10.1103/PhysRevD.87.084053
Parameters
----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
incl: float
inclination angle between the BH spin and the orbital angular
momentum in radians
Returns
-------
float
"""
chi2 = chi * chi
chi4 = chi2 * chi2
r2 = r * r
r4 = r2 * r2
three_r = 3 * r
r_minus_2 = r - 2
sin_incl2 = (np.sin(incl))**2
X = (
chi2 * (
chi2 * (3 * chi2 + 4 * r * (2 * r - 3))
+ r2 * (15 * r * (r - 4) + 28))
- 6 * r4 * (r2 - 4))
Y = (
chi4 * (chi4 + r2 * (7 * r * (three_r - 4) + 36))
+ 6 * r * r_minus_2 * (
chi4 * chi2 + 2 * r2 * r * (
chi2 * (three_r + 2) + 3 * r2 * r_minus_2)))
Z = (r * (r - 6))**2 - chi2 * (2 * r * (3 * r + 14) - 9 * chi2)
return r4 * r4 * Z + chi2 * sin_incl2 * (chi2 * sin_incl2 * Y - 2 * r4 * X)
def PG_ISSO_eq_dr(r, chi, incl):
"""Partial derivative of :func:`PG_ISSO_eq` with respect to r.
Parameters
----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
incl: float
inclination angle between the BH spin and the orbital angular
momentum in radians
Returns
-------
float
"""
sini = np.sin(incl)
sin2i = sini * sini
sin4i = sin2i * sin2i
chi2 = chi * chi
chi4 = chi2 * chi2
chi6 = chi4 * chi2
chi8 = chi4 * chi4
chi10 = chi6 * chi4
return (
12 * r**11 - 132 * r**10
+ r**9 * (120 * chi2 * sin2i - 60 * chi2 + 360) - r**8 * 252 * chi2
+ 8 * r**7 * (
36 * chi4 * sin4i - 30 * chi4 * sin2i + 9 * chi4
- 48 * chi2 * sin2i)
+ 7 * r**6 * (120 * chi4 * sin2i - 144 * chi4 * sin4i)
+ 6 * r**5 * (
36 * chi6 * sin4i - 16 * chi6 * sin2i + 144 * chi4 * sin4i
- 56 * chi4 * sin2i)
+ r**4 * (120 * chi6 * sin2i - 240 * chi6 * sin4i)
+ r**3 * (84 * chi8 * sin4i - 24 * chi8 * sin2i - 192 * chi6 * sin4i)
- 84 * r**2 * chi8 * sin4i
+ r * (12 * chi10 * sin4i + 72 * chi8 * sin4i) - 12 * chi10 * sin4i)
def PG_ISSO_eq_dr2(r, chi, incl):
"""Second partial derivative of :func:`PG_ISSO_eq` with respect to
r.
Parameters
----------
r: float
the radial coordinate in BH mass units
chi: float
the BH dimensionless spin parameter
incl: float
inclination angle between the BH spin and the orbital angular
momentum in radians
Returns
-------
float
"""
sini = np.sin(incl)
sin2i = sini * sini
sin4i = sin2i * sin2i
chi2 = chi * chi
chi4 = chi2 * chi2
chi6 = chi4 * chi2
chi8 = chi4 * chi4
return (
132 * r**10 - 1320 * r**9
+ 90 * r**8 * (12 * chi2 * sin2i - 6 * chi2 + 36) - 2016 * chi2 * r**7
+ 56 * r**6 * (
36 * chi4 * sin4i - 30 * chi4 * sin2i + 9 * chi4
- 48 * chi2 * sin2i)
+ 42 * r**5 * (120 * chi4 * sin2i - 144 * chi4 * sin4i)
+ 30 * r**4 * (
36 * chi6 * sin4i - 16 * chi6 * sin2i + 144 * chi4 * sin4i
- 56 * chi4 * sin2i)
+ r**3 * (480 * chi6 * sin2i - 960 * chi6 * sin4i)
+ r**2 * (
252 * chi8 * sin4i - 72 * chi8 * sin2i - 576 * chi6 * sin4i)
- r * 168 * chi8 * sin4i
+ 12 * chi8 * chi2 * sin4i + 72 * chi8 * sin4i)
def PG_ISSO_solver(chi, incl):
"""Function that determines the radius of the innermost stable
spherical orbit (ISSO) for a Kerr BH and a generic inclination
angle between the BH spin and the orbital angular momentum.
This function finds the appropriate root of :func:`PG_ISSO_eq`.
Parameters
----------
chi: {float, array}
the BH dimensionless spin parameter
incl: {float, array}
the inclination angle between the BH spin and the orbital
angular momentum in radians
Returns
-------
solution: array
the radius of the orbit in BH mass units
"""
# Auxiliary variables
if np.isscalar(chi):
chi = np.array(chi, copy=False, ndmin=1)
incl = np.array(incl, copy=False, ndmin=1)
chi = np.abs(chi)
# ISCO radius for the given spin magnitude
rISCO_limit = ISCO_solution(chi, incl)
# If the inclination is 0 or pi, just output the ISCO radius
equatorial = np.isclose(incl, 0) | np.isclose(incl, np.pi)
if all(equatorial):
return rISCO_limit
# ISSO radius for an inclination of pi/2
# Initial guess is based on the extrema of the polar ISSO radius equation,
# that are: r=6 (chi=1) and r=1+sqrt(3)+sqrt(3+sqrt(12))=5.274... (chi=0)
initial_guess = [5.27451056440629 if c > 0.5 else 6 for c in chi]
rISSO_at_pole_limit = np.array([
root_scalar(
ISSO_eq_at_pole, x0=g0, fprime=ISSO_eq_at_pole_dr,
fprime2=ISSO_eq_at_pole_dr2, args=(c)).root
for g0, c in zip(initial_guess, chi)])
# If the inclination is pi/2, just output the ISSO radius at the pole(s)
polar = np.isclose(incl, 0.5*np.pi)
if all(polar):
return rISSO_at_pole_limit
# Otherwise, find the ISSO radius for a generic inclination
initial_hi = np.maximum(rISCO_limit, rISSO_at_pole_limit)
initial_lo = np.minimum(rISCO_limit, rISSO_at_pole_limit)
brackets = [
(bl, bh) if (c != 1 and PG_ISSO_eq(bl, c, inc) *
PG_ISSO_eq(bh, c, inc) < 0) else None
for bl, bh, c, inc in zip(initial_lo, initial_hi, chi, incl)]
solution = np.array([
root_scalar(
PG_ISSO_eq, x0=g0, fprime=PG_ISSO_eq_dr, bracket=bracket,
fprime2=PG_ISSO_eq_dr2, args=(c, inc), xtol=1e-12).root
for g0, bracket, c, inc in zip(initial_hi, brackets, chi, incl)])
oob = (solution < 1) | (solution > 9)
if any(oob):
solution = np.array([
root_scalar(
PG_ISSO_eq, x0=g0, fprime=PG_ISSO_eq_dr, bracket=bracket,
fprime2=PG_ISSO_eq_dr2, args=(c, inc)).root
if ob else sol for g0, bracket, c, inc, ob, sol
in zip(initial_lo, brackets, chi, incl, oob, solution)
])
oob = (solution < 1) | (solution > 9)
if any(oob):
raise RuntimeError('Unable to obtain some solutions!')
return solution
| 11,046
| 30.383523
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/population/fgmc_functions.py
|
# Copyright (C) 2021 Thomas Dent
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
"""
A set of helper functions for evaluating event rates, densities etc.
See https://dcc.ligo.org/LIGO-T2100060/public for technical explanations
"""
from os.path import basename
import h5py
import bisect
from itertools import chain as it_chain, combinations as it_comb
import numpy as np
from pycbc import conversions as conv
from pycbc import events
from pycbc.events.coinc import mean_if_greater_than_zero as coinc_meanigz
from pycbc.events import triggers
def filter_bin_lo_hi(values, lo, hi):
in_bin = np.sign((values - lo) * (hi - values))
if np.any(in_bin == 0):
raise RuntimeError('Edge case! Bin edges', lo, hi,
'value(s)', values[in_bin == 0])
return in_bin == 1
def filter_tmplt_mchirp(bankf, lo_mchirp, hi_mchirp):
with h5py.File(bankf) as bank:
mchirp = conv.mchirp_from_mass1_mass2(bank['mass1'][:], bank['mass2'][:])
# Boolean over template id
return filter_bin_lo_hi(mchirp, lo_mchirp, hi_mchirp)
def read_full_data(fullf, rhomin, tmplt_filter=None):
"""Read the zero- and time-lagged triggers identified by a specific
set of templates.
Parameters
----------
fullf:
File that stores zerolag and slide triggers
bankf:
File with template mass/spin information
rhomin: float
Ranking statistic threshold
tmplt_filter: array of Booleans
Filter over the array of templates stored in bankf
Returns
-------
dictionary
containing foreground triggers and background information
"""
with h5py.File(fullf, 'r') as full_data:
# apply template filter
tid_bkg = full_data['background_exc/template_id'][:]
tid_fg = full_data['foreground/template_id'][:]
bkg_inbin = tmplt_filter[tid_bkg] # Boolean over bg events
fg_inbin = tmplt_filter[tid_fg] # Boolean over fg events
zerolagstat = full_data['foreground/stat'][:][fg_inbin]
zerolagifar = full_data['foreground/ifar'][:][fg_inbin]
# arbitrarily choose time from one of the ifos
zerolagtime = full_data['foreground/time1'][:][fg_inbin]
cstat_back_exc = full_data['background_exc/stat'][:][bkg_inbin]
dec_factors = full_data['background_exc/decimation_factor'][:][bkg_inbin]
# filter on stat value
above = zerolagstat > rhomin
back_above = cstat_back_exc > rhomin
return {'zerolagstat': zerolagstat[above],
'zerolagifar': zerolagifar[above],
'zerolagtime': zerolagtime[above],
'dec_factors': dec_factors[back_above],
'cstat_back_exc': cstat_back_exc[back_above],
'file_name': fullf}
def read_full_data_mchirp(fullf, bankf, rhomin, mc_lo, mc_hi):
tmp_filter = filter_tmplt_mchirp(bankf, mc_lo, mc_hi)
return read_full_data(fullf, rhomin, tmp_filter)
def log_rho_bg(trigs, counts, bins):
"""
trigs: zerolag event statistic values
counts: background histogram
bins: bin edges of the background histogram
Returns:
log of background PDF at the zerolag statistic values,
fractional uncertainty due to Poisson count (set to 100% for empty bins)
"""
trigs = np.atleast_1d(trigs)
if len(trigs) == 0: # corner case
return np.array([]), np.array([])
assert np.all(trigs >= np.min(bins)), "can't have triggers below bin lower limit"
N = sum(counts)
log_rhos = []
fracerr = []
# If any zerolag triggers that are louder than the max bin, put one
# fictitious count in a bin that extends from the limits of the slide triggers
# out to the loudest trigger.
if np.any(trigs >= np.max(bins)):
N = N + 1
for t in trigs:
if t >= np.max(bins):
# For a trigger louder than the max bin, put one fictitious count in
# a bin that extends from the limits of the slide triggers out to the
# loudest trigger. Fractional error is 100%
log_rhos.append(-np.log(N) - np.log(np.max(trigs) - bins[-1]))
fracerr.append(1.)
else:
i = bisect.bisect(bins, t) - 1
# If there are no counts for a foreground trigger put a fictitious
# count in the background bin
if counts[i] == 0:
counts[i] = 1
log_rhos.append(np.log(counts[i]) - np.log(bins[i+1] - bins[i])
- np.log(N))
fracerr.append(counts[i] ** -0.5)
return np.array(log_rhos), np.array(fracerr)
def log_rho_fg_analytic(trigs, rhomin):
# PDF of a rho^-4 distribution defined above the threshold rhomin
return np.log(3.) + 3. * np.log(rhomin) - 4 * np.log(trigs)
def log_rho_fg(trigs, injstats, bins):
"""
trigs: zerolag event statistic values
injstats: injection event statistic values
bins: histogram bins
Returns:
log of signal PDF at the zerolag statistic values,
fractional uncertainty from Poisson count
"""
trigs = np.atleast_1d(trigs)
if len(trigs) == 0: # corner case
return np.array([])
assert np.min(trigs) >= np.min(bins)
# allow 'very loud' triggers
bmax = np.max(bins)
if np.max(trigs) >= bmax:
print('Replacing stat values lying above highest bin')
print(str(bmax))
trigs = np.where(trigs >= bmax, bmax - 1e-6, trigs)
assert np.max(trigs) < np.max(bins) # check it worked
counts, bins = np.histogram(injstats, bins)
N = sum(counts)
dens = counts / np.diff(bins) / float(N)
fracerr = counts ** -0.5
tinds = np.searchsorted(bins, trigs) - 1
return np.log(dens[tinds]), fracerr[tinds]
def get_start_dur(path):
fname = basename(path) # remove directory path
# file name is IFOS-DESCRIPTION-START-DURATION.type
pieces = fname.split('.')[0].split('-')
return pieces[2], pieces[3]
def in_coinc_time_incl(f, cstring, test_times):
""" filter to all times where coincs of type given by cstring exist
"""
in_time = np.zeros(len(test_times))
starts = np.array(f['segments/%s/start' % cstring][:])
ends = np.array(f['segments/%s/end' % cstring][:])
idx_within_segment = events.indices_within_times(test_times, starts, ends)
in_time[idx_within_segment] = np.ones_like(idx_within_segment)
return in_time
# what to change for more/fewer ifos
_ifoset = ('H1', 'L1', 'V1')
# all combinations of ifos with length mincount or more
# each returned as a tuple in same order as ifos
def alltimes(ifos, mincount=1):
assert mincount <= len(ifos)
assert len(set(ifos)) == len(ifos) # can't work with duplicates
return it_chain.from_iterable(it_comb(ifos, r) for r in
np.arange(mincount, len(ifos) + 1))
_alltimes = frozenset(alltimes(_ifoset, mincount=1))
_alltimestring = frozenset([''.join(t) for t in _alltimes])
_allctimes = frozenset(alltimes(_ifoset, mincount=2))
def ifos_from_combo(ct):
# extract ifos in alphabetical order from a coinc time string
return sorted([ct[i:i + 2] for i in range(0, len(ct), 2)])
def type_in_time(ct, cty):
# returns True if the given coinc type can exist in the coinc time ct
return all(i in ct for i in cty)
class EventRate(object):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
"""
coinc_times: iterable of strings indicating combinations of ifos operating
coinc_types: list of strings indicating coinc event types to be considered
"""
# allow for single-ifo time although not supported in pipeline yet
if hasattr(args, 'min_ifos'):
self.mincount = args.min_ifos
else:
self.mincount = 2
if hasattr(args, 'network') and sorted(args.network) != list(_ifoset):
self.ifos = sorted(args.network)
else:
self.ifos = _ifoset
self.allctimes = frozenset(alltimes(self.ifos, mincount=self.mincount))
self.allctimestring = frozenset([''.join(t) for t in self.allctimes])
for ct in coinc_times:
assert ct in list(self.allctimestring)
self.ctimes = coinc_times
if coinc_types is None:
# all types possible during given times
self.coinc_types = self.allctimestring
else:
# any coinc type must also be a time (?)
for ty in coinc_types:
assert ty in list(self.allctimes)
self.coinc_types = frozenset([''.join(t) for t in coinc_types])
if args.verbose:
print('Using', self.coinc_types, 'coincs in',
self.allctimestring, 'times')
self.args = args
self.thr = self.args.stat_threshold
self.bin_param = bin_param
self.lo = bin_lo
self.hi = bin_hi
self.bank = None
self.massspins = None
self.tpars = None
self.tmplt_filter = None
self.in_bin = None
self.incl_livetimes = {}
self.livetimes = {}
def add_bank(self, bank_file):
self.bank = bank_file
with h5py.File(self.bank, 'r') as b:
tids = np.arange(len(b['mass1'][:]))
# tuples of m1, m2, s1z, s2z in template id order
self.massspins = triggers.get_mass_spin(b, tids)
def filter_templates(self):
"""
calculate array of Booleans in template id order to filter events
"""
assert self.massspins is not None
assert self.lo is not None
assert self.hi is not None
if self.args.verbose:
print('Cutting on %s between %f - %f' %
(self.bin_param, self.lo, self.hi))
self.tpars = triggers.get_param(self.bin_param, None, *self.massspins)
self.in_bin = filter_bin_lo_hi(self.tpars, self.lo, self.hi)
def make_bins(self, maxval, choice='bg'):
# allow options to be strings describing bin formulae as well as floats?
try:
linbw = getattr(self.args, choice + '_bin_width')
logbw = getattr(self.args, choice + '_log_bin_width')
except AttributeError:
pass
if linbw is not None:
n_bins = int((maxval - self.thr) / float(linbw))
bins = np.linspace(self.thr - 0.0001, maxval, n_bins + 1)
elif logbw is not None:
n_bins = int(np.log(maxval / self.thr) / float(logbw))
bins = np.logspace(np.log10(self.thr) - 0.0001, np.log10(maxval),
n_bins + 1)
else:
raise RuntimeError("Can't make bins without a %s bin width option!"
% choice)
if self.args.verbose:
print(str(n_bins) + ' ' + choice + ' stat bins')
return bins
def get_ctypes(self, tdict):
# tdict is a ifo -> trigger time dictionary
ifotimes = zip(*[tdict[i] for i in self.ifos])
cty = []
for ts in ifotimes:
# if an ifo doesn't participate, time is sentinel value -1
cty.append(''.join([i for i, t in zip(self.ifos, ts) if t > 0]))
# return is array of coinc types strings
return np.array(cty)
def moreifotimes(self, ctstring):
# get list of coinc times with more ifos than ctstring
allctime_moreifos = [ct for ct in self.allctimestring if
len(ct) > len(ctstring)]
# only return those when at least the same ifos are operating
ret = []
ifos = ifos_from_combo(ctstring)
for act in allctime_moreifos:
if all(i in act for i in ifos):
ret.append(act)
return ret
def in_coinc_time_excl(self, f, cstring, test_times):
""" filter to all times where exactly the ifos in cstring are observing
"""
if len(cstring) == max(len(s) for s in self.allctimestring):
# ctime string already uniquely specifies time
return in_coinc_time_incl(f, cstring, test_times)
in_time = in_coinc_time_incl(f, cstring, test_times)
# if 'more-ifo' coinc times exist, exclude them
for combo in self.moreifotimes(cstring):
in_moreifo_time = in_coinc_time_incl(f, combo, test_times)
# subtract one if in more-ifo time
in_time -= in_moreifo_time
# if subtraction yields anything other than 1, set to 0
np.putmask(in_time, in_time != 1, 0)
return in_time
def get_livetimes(self, fi):
with h5py.File(fi, 'r') as f:
for ct in self.ctimes:
# 'inclusive' time when at least the ifos specified by ct are on
fgt = conv.sec_to_year(f[ct].attrs['foreground_time'])
# index dict on chunk start time / coinc type
self.incl_livetimes[(get_start_dur(fi)[0], ct)] = fgt
# subtract times during which 1 more ifo was on,
# ie subtract H1L1* time from H1L1; subtract H1* time from H1; etc
for combo in self.moreifotimes(ct):
if len(combo) == len(ct) + 2:
fgt -= conv.sec_to_year(f[combo].attrs['foreground_time'])
# index dict on chunk start time / coinc time
self.livetimes[(get_start_dur(fi)[0], ct)] = fgt
class ForegroundEvents(EventRate):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types,
bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi)
self.thr = self.args.stat_threshold
# set of arrays in parallel containing zerolag event properties
self.starttimes = []
self.gpst = np.array([])
self.stat = np.array([])
self.ifar = np.array([])
self.masspars = np.array([])
self.start = np.array([])
self.ctime = np.array([], dtype=object) # allow unequal length strings
self.ctype = np.array([], dtype=object)
self.bg_pdf = np.array([])
self.sg_pdf = np.array([])
def add_zerolag(self, full_file):
start = get_start_dur(full_file)[0]
self.starttimes.append(start)
with h5py.File(full_file, 'r') as f:
# get stat values & threshold
_stats = f['foreground/stat'][:]
_keepstat = _stats > self.thr
# get templates & apply filter
_tids = f['foreground/template_id'][:]
# we need the template filter to have already been made
assert self.in_bin is not None
_keep = np.logical_and(_keepstat, self.in_bin[_tids])
massp = self.tpars[_tids][_keep] # filtered template params
# assign times and coinc types
_times = {}
for i in self.ifos:
_times[i] = f['foreground/' + i + '/time'][:][_keep]
# if an ifo doesn't participate, time is sentinel value -1
# event time is mean of remaining positive GPS times
meantimes = np.array([coinc_meanigz(ts)[0]
for ts in zip(*_times.values())])
_ctype = self.get_ctypes(_times)
if len(_ctype) == 0:
if self.args.verbose:
print('No events in ' + start)
return
# filter events
in_ctypes = np.array([cty in self.coinc_types for cty in _ctype])
meantimes = meantimes[in_ctypes]
# get coinc time as strings
# (strings may have different lengths)
_ctime = np.repeat(np.array([''], dtype=object), len(meantimes))
for ct in self.allctimestring:
intime = self.in_coinc_time_excl(f, ct, meantimes)
_ctime[intime == 1] = ct
if self.args.verbose:
print('Got %i events in %s time' % (len(_ctime[intime == 1]), ct))
# store
self.stat = np.append(self.stat, _stats[_keep][in_ctypes])
try: # injection analyses only have 'ifar_exc', not 'ifar'
self.ifar = np.append(self.ifar,
f['foreground/ifar'][:][_keep][in_ctypes])
except KeyError:
self.ifar = np.append(self.ifar,
f['foreground/ifar_exc'][:][_keep][in_ctypes])
self.gpst = np.append(self.gpst, meantimes)
self.masspars = np.append(self.masspars, massp)
self.start = np.append(self.start, int(start) *
np.ones_like(meantimes))
self.ctime = np.append(self.ctime, _ctime)
self.ctype = np.append(self.ctype, _ctype[in_ctypes])
def get_bg_pdf(self, bg_rate):
assert isinstance(bg_rate, BackgroundEventRate)
self.bg_pdf = np.zeros_like(self.stat) # initialize
# do the calculation by chunk / coinc time / coinc type
for st in self.starttimes:
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
_idx = np.logical_and((self.ctime == ct), (self.ctype == cty))
_idx = np.logical_and(_idx, (self.start == int(st)))
_vals = self.stat[_idx]
if len(_vals) == 0:
continue
# evaluate bg pdf for specific chunk, coinc time & type
_pdf = bg_rate.eval_pdf(st, ct, cty, _vals)
# store
self.bg_pdf[_idx] = _pdf
if self.args.verbose:
print('Found bg PDFs for ' + cty + ' coincs from ' + st)
def get_sg_pdf(self, sg_rate):
assert isinstance(sg_rate, SignalEventRate)
self.sg_pdf = np.zeros_like(self.stat)
for st in self.starttimes:
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
_idx = np.logical_and((self.ctime == ct), (self.ctype == cty))
_idx = np.logical_and(_idx, (self.start == int(st)))
_vals = self.stat[_idx]
if len(_vals) == 0:
continue
# norm of PDF is chunk-dependent so need the chunk start time
_pdf = sg_rate.eval_pdf(st, ct, cty, _vals)
# store
self.sg_pdf[_idx] = _pdf
if self.args.verbose:
print('Found sg PDFs for %s coincs in %s time from %s' %
(cty, ct, st))
class BackgroundEventRate(EventRate):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types,
bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi)
self.thr = self.args.stat_threshold
# BG values in dict indexed on tuple (chunk start, coinc type)
self.bg_vals = {}
self.bg_dec = {}
# BG livetimes
self.bg_livetimes = {}
# BG hist stored as bin heights / edges
self.bg_hist = {}
# Expected counts of BG events
self.exp_bg = {}
# Total expected BG count
self.norm = 0
def add_background(self, full_file):
start = get_start_dur(full_file)[0]
self.get_livetimes(full_file)
with h5py.File(full_file, 'r') as ff:
# get stat values and threshold
_bgstat = ff['background_exc/stat'][:]
_keepstat = _bgstat > self.thr
# get template ids and filter
_bgtid = ff['background_exc/template_id'][:]
# need the template filter to have already been made
assert self.in_bin is not None
_keep = np.logical_and(_keepstat, self.in_bin[_bgtid])
_bgstat = _bgstat[_keep]
_bgdec = ff['background_exc/decimation_factor'][:][_keep]
# assign coinc types
_times = {}
for i in self.ifos:
# NB times are time-shifted between ifos
_times[i] = ff['background_exc/' + i + '/time'][:][_keep]
_ctype = self.get_ctypes(_times)
for cty in self.coinc_types:
self.bg_vals[(start, cty)] = _bgstat[_ctype == cty]
self.bg_dec[(start, cty)] = _bgdec[_ctype == cty]
# get bg livetime for noise rate estimate
# - convert to years
self.bg_livetimes[(start, cty)] = conv.sec_to_year(
ff[cty].attrs['background_time_exc'])
# make histogram
bins = self.make_bins(np.max(_bgstat[_ctype == cty]), 'bg')
# hack to make larger bins for H1L1V1
if cty == 'H1L1V1':
if self.args.verbose:
print('Halving bg bins for triple bg hist')
bins = bins[::2].copy() # take every 2nd bin edge
self.bg_hist[(start, cty)] = \
np.histogram(_bgstat[_ctype == cty],
weights=_bgdec[_ctype == cty], bins=bins)
# get expected number of bg events for this chunk and coinc type
self.exp_bg[(start, cty)] = _bgdec[_ctype == cty].sum() * \
self.incl_livetimes[(start, cty)] / \
self.bg_livetimes[(start, cty)]
def plot_bg(self):
from matplotlib import pyplot as plt
for chunk_type, hist in self.bg_hist.items():
print('Plotting', chunk_type, 'background PDF ...')
xplot = np.linspace(self.thr, self.args.plot_max_stat, 500)
heights, bins = hist[0], hist[1]
logpdf, _ = log_rho_bg(xplot, heights, bins)
plt.plot(xplot, np.exp(logpdf))
# plot error bars at bin centres
lpdf, fracerr = log_rho_bg(0.5 * (bins[:-1] + bins[1:]), heights, bins)
plt.errorbar(0.5 * (bins[:-1] + bins[1:]), np.exp(lpdf),
yerr=np.exp(lpdf) * fracerr, fmt='none')
plt.semilogy()
plt.grid(True)
plt.xlim(xmax=self.args.plot_max_stat + 0.5)
plt.ylim(ymin=0.7 * np.exp(logpdf.min()))
plt.xlabel('Ranking statistic')
plt.ylabel('Background PDF')
plt.savefig(self.args.plot_dir + '%s-bg_pdf-%s' %
(chunk_type[1], chunk_type[0]) + '.png')
plt.close()
def get_norms(self):
for count in self.exp_bg.values():
self.norm += count
def eval_pdf(self, chunk, ctime, ctype, statvals):
# given statistic values all in the same data chunk and coinc type,
# evaluate the background pdf normalized over all chunks & types
assert self.norm > 0
chunk_type = (chunk, ctype)
# fraction of expected noise events in given chunk & coinc type
frac_chunk_type = self.exp_bg[chunk_type] / self.norm
# fraction of inj in specified chunk, coinc type *and* time
frac_in_time = self.livetimes[(chunk, ctime)] /\
self.incl_livetimes[chunk_type]
# unpack heights / bins from bg hist object
local_pdfs, _ = log_rho_bg(statvals, *self.bg_hist[chunk_type])
return local_pdfs + np.log(frac_chunk_type * frac_in_time)
class SignalEventRate(EventRate):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types,
bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi)
self.thr = self.args.stat_threshold
self.starts = [] # bookkeeping
# for the moment roll all inj chunks together
# but sort both by coinc time and coinc type
self.inj_vals = {} # dict indexed on tuple (coinc time, coinc type)
self.fg_bins = {}
self.norm = 0
def add_injections(self, inj_file, fg_file):
# fg_file only needed for coinc time info :/
self.starts.append(get_start_dur(inj_file)[0])
self.get_livetimes(inj_file)
with h5py.File(inj_file, 'r') as jf:
# get stat values and threshold
_injstat = jf['found_after_vetoes/stat'][:]
_keepstat = _injstat > self.thr
# get template ids and filter
_injtid = jf['found_after_vetoes/template_id'][:]
assert self.in_bin is not None
_keep = np.logical_and(_keepstat, self.in_bin[_injtid])
_injstat = _injstat[_keep]
# assign coinc types
_times = {}
for i in self.ifos:
_times[i] = jf['found_after_vetoes/' + i + '/time'][:][_keep]
meantimes = np.array([coinc_meanigz(ts)[0]
for ts in zip(*_times.values())])
_ctype = self.get_ctypes(_times)
# get coinc time as strings
# (strings may have different lengths)
_ctime = np.repeat(np.array([''], dtype=object), len(meantimes))
for ct in self.allctimestring:
# get coinc time info from segments in fg file
intime = self.in_coinc_time_excl(
h5py.File(fg_file, 'r'), ct, meantimes)
_ctime[intime == 1] = ct # do we need this?
if self.args.verbose:
print('Got %i ' % (intime == 1).sum() + 'inj in %s time' % ct)
# filter by coinc type and add to array
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
my_vals = _injstat[np.logical_and(_ctype == cty, intime == 1)]
if self.args.verbose:
print('%d ' % len(my_vals) + 'are %s coincs' % cty)
if (ct, cty) not in self.inj_vals: # initialize
self.inj_vals[(ct, cty)] = np.array([])
if len(my_vals) > 0:
self.inj_vals[(ct, cty)] = \
np.append(self.inj_vals[(ct, cty)], my_vals)
del intime, my_vals
def make_all_bins(self):
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
vals = self.inj_vals[(ct, cty)]
# get norm of fg histogram by taking bins out to max injection stat
binmax = vals.max() * 1.01
self.fg_bins[(ct, cty)] = self.make_bins(binmax, 'inj')
def plot_inj(self):
from matplotlib import pyplot as plt
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
print('Plotting ' + cty + ' signal PDF in ' + ct + ' time ...')
samples = self.inj_vals[(ct, cty)]
bins = self.fg_bins[(ct, cty)]
xplot = np.logspace(np.log10(self.thr),
np.log10(samples.max()), 500)
logpdf, _ = log_rho_fg(xplot, samples, bins)
plt.plot(xplot, np.exp(logpdf))
# plot error bars at bin centres
lpdf, fracerr = log_rho_fg(0.5 * (bins[:-1] + bins[1:]),
samples, bins)
plt.errorbar(0.5 * (bins[:-1] + bins[1:]), np.exp(lpdf),
yerr=np.exp(lpdf) * fracerr, fmt='none')
plt.semilogy()
plt.grid(True)
# zoom in on the 'interesting' range
plt.xlim(xmin=self.thr, xmax=2. * self.args.plot_max_stat)
plt.ylim(ymin=0.7 * np.exp(logpdf.min()))
plt.title(r'%i injs plotted, \# of bins %i' %
(len(samples), len(bins) - 1))
plt.xlabel('Ranking statistic')
plt.ylabel('Signal PDF')
plt.savefig(self.args.plot_dir + '%s-fg_pdf-%s' % (ct, cty)
+ '.png')
plt.close()
def get_norms(self):
for vals in self.inj_vals.values():
# injections don't have weights/decimation
self.norm += float(len(vals))
def eval_pdf(self, chunk, ctime, ctype, statvals):
# given statistic values in the same chunk, coinc time and coinc type,
# evaluate the signal pdf normalized over all chunks, times and types
assert self.norm > 0
time_type = (ctime, ctype)
# fraction of inj in specified coinc time and type
frac_time_type = float(len(self.inj_vals[time_type])) / self.norm
# total livetime for specified coinc time
total_coinc_time = sum([self.livetimes[(ch, ctime)] for ch in self.starts])
# fraction of inj in specified chunk *and* coinc time/type
this_norm = frac_time_type * self.livetimes[(chunk, ctime)] / \
total_coinc_time
local_pdfs, _ = log_rho_fg(statvals, self.inj_vals[time_type],
self.fg_bins[time_type])
return local_pdfs + np.log(this_norm)
__all__ = ['filter_bin_lo_hi', 'filter_tmplt_mchirp', 'read_full_data',
'read_full_data_mchirp', 'log_rho_bg', 'log_rho_fg_analytic',
'log_rho_fg', 'get_start_dur', 'in_coinc_time_incl', 'alltimes',
'ifos_from_combo', 'type_in_time', 'EventRate', 'ForegroundEvents',
'BackgroundEventRate', 'SignalEventRate']
| 30,477
| 41.926761
| 86
|
py
|
pycbc
|
pycbc-master/pycbc/population/live_pastro.py
|
import logging
import h5py
import numpy
from lal import YRJUL_SI as lal_s_per_yr
from pycbc.tmpltbank import bank_conversions as bankconv
from pycbc.events import triggers
from . import fgmc_functions as fgmcfun
def check_template_param_bin_data(spec_json):
"""
Parameters
----------
spec_json: JSON dictionary-like object
Result of parsing json file containing static data
Returns
-------
spec_json: dictionary
"""
# Check the necessary data are present
assert 'param' in spec_json
assert 'bin_edges' in spec_json # should be a list of floats
assert 'sig_per_yr_binned' in spec_json # signal rate per bin (per year)
# Do the lengths of bin arrays match?
assert len(spec_json['bin_edges']) == \
len(spec_json['sig_per_yr_binned']) + 1
assert 'ref_bns_horizon' in spec_json # float
assert 'netsnr_thresh' in spec_json # float
return spec_json
def check_template_param_bin_farlim_data(spec_json):
"""
Parameters
----------
spec_json: JSON dictionary-like object
Result of parsing json file containing static data
Returns
-------
spec_json: dictionary
"""
# Standard template param bin checks
check_template_param_bin_data(spec_json)
# In addition, need limiting FAR and SNR values
assert 'limit_far' in spec_json
assert 'limit_snr' in spec_json
return spec_json
def read_template_bank_param(spec_d, bankf):
"""
Parameters
----------
spec_d: dictionary
Prerequisite data for p astro calc
bankf: string
Path to HDF5 template bank file
Returns
-------
bank_data: dictionary
Template counts binned over specified param
"""
with h5py.File(bankf, 'r') as bank:
# All the templates
tids = numpy.arange(len(bank['mass1']))
# Get param vals
logging.info('Getting %s values from bank', spec_d['param'])
parvals = bankconv.get_bank_property(spec_d['param'], bank, tids)
counts, edges = numpy.histogram(parvals, bins=spec_d['bin_edges'])
bank_data = {'bin_edges': edges, 'tcounts': counts, 'num_t': counts.sum()}
logging.info('Binned template counts: %s', counts)
return bank_data
def noise_density_from_far(far, exp_fac):
"""
Exponential model of noise rate density per time per (reweighted) SNR
b(rho) ~ k exp(-alpha * rho),
where alpha is the 'index', yields the relation
b(rho) = alpha * FAR(rho),
where FAR is the integral of b(rho) from rho to infinity.
E.g. fits to single-ifo noise typically yield alpha ~ 6
"""
return exp_fac * far
def trials_type(ntriggered, nactive):
"""
The trials factor previously applied to an individual event type FAR
For single triggers, the factor is the number of active ifos
For coincs, the factor is either 1 (in double time) or 6 (in triple time)
6 accounts for both the trials over coinc type and pvalue (non-)followup
%%% NOTE - ONLY VALID FOR 2- OR 3-IFO SEARCH %%%
"""
if ntriggered == 1:
return nactive
if ntriggered == 2 and nactive == 2:
return 1
if ntriggered == 2 and nactive == 3:
return 6
# All valid inputs are exhausted, throw an error
raise ValueError(f"I don't know what to do with {ntriggered} triggered and"
f" {nactive} active ifos!")
def signal_pdf_from_snr(netsnr, thresh):
""" FGMC approximate signal distribution ~ SNR ** -4
"""
return numpy.exp(fgmcfun.log_rho_fg_analytic(netsnr, thresh))
def signal_rate_rescale(horizons, ref_dhor):
"""
Compute a factor proportional to the rate of signals with given network SNR
to account for network sensitivity variation relative to a reference state
"""
# Combine sensitivities over ifos in a way analogous to network SNR
net_horizon = sum(hor ** 2. for hor in horizons.values()) ** 0.5
# signal rate is proportional to horizon distance cubed
return net_horizon ** 3. / ref_dhor ** 3.
def signal_rate_trig_type(horizons, sens_ifos, trig_ifos):
"""
Compute a factor accounting for the fraction of signals seen as a given
trigger type
"""
# Single-ifo time
if len(sens_ifos) == 1:
assert len(trig_ifos) == 1
return 1.
# Single trigger in multi-ifo time
if len(trig_ifos) == 1:
# Sensitive volume scales with horizon^3
# Suppress horizon by sqrt(2) wrt coincs
return (horizons[trig_ifos[0]] / 2**0.5) ** 3. /\
sum([horizons[i] ** 3. for i in sens_ifos])
# Double coinc : volume determined by less sensitive ifo
# Compare to 2nd most sensitive ifo over the observing network
return sorted([horizons[i] for i in trig_ifos])[0] ** 3. /\
sorted([horizons[i] for i in sens_ifos])[-2] ** 3.
def template_param_bin_pa(padata, trdata, horizons):
"""
Parameters
----------
padata: PAstroData instance
Static information on p astro calculation
trdata: dictionary
Trigger properties
horizons: dictionary
BNS horizon distances keyed on ifo
Returns
-------
p_astro, p_terr: tuple of floats
"""
massspin = (trdata['mass1'], trdata['mass2'],
trdata['spin1z'], trdata['spin2z'])
trig_param = triggers.get_param(padata.spec['param'], None, *massspin)
# NB digitize gives '1' for first bin, '2' for second etc.
bind = numpy.digitize(trig_param, padata.bank['bin_edges']) - 1
logging.debug('Trigger %s is in bin %i', padata.spec['param'], bind)
# Get noise rate density
if 'bg_fac' not in padata.spec:
expfac = 6.
else:
expfac = padata.spec['bg_fac']
# FAR is in Hz, therefore convert to rate per year (per SNR)
dnoise = noise_density_from_far(trdata['far'], expfac) * lal_s_per_yr
logging.debug('FAR %.3g, noise density per yr per SNR %.3g',
trdata['far'], dnoise)
# Scale by fraction of templates in bin
dnoise *= padata.bank['tcounts'][bind] / padata.bank['num_t']
logging.debug('Noise density in bin %.3g', dnoise)
# Get signal rate density per year at given SNR
dsig = signal_pdf_from_snr(trdata['network_snr'],
padata.spec['netsnr_thresh'])
logging.debug('SNR %.3g, signal pdf %.3g', trdata['network_snr'], dsig)
dsig *= padata.spec['sig_per_yr_binned'][bind]
logging.debug('Signal density per yr per SNR in bin %.3g', dsig)
# Scale by network sensitivity accounting for BNS horizon distances
dsig *= signal_rate_rescale(horizons, padata.spec['ref_bns_horizon'])
logging.debug('After horizon rescaling %.3g', dsig)
p_astro = dsig / (dsig + dnoise)
logging.debug('p_astro %.4g', p_astro)
return p_astro, 1 - p_astro
def template_param_bin_types_pa(padata, trdata, horizons):
"""
Parameters
----------
padata: PAstroData instance
Static information on p astro calculation
trdata: dictionary
Trigger properties
horizons: dictionary
BNS horizon distances keyed on ifo
Returns
-------
p_astro, p_terr: tuple of floats
"""
massspin = (trdata['mass1'], trdata['mass2'],
trdata['spin1z'], trdata['spin2z'])
trig_param = triggers.get_param(padata.spec['param'], None, *massspin)
# NB digitize gives '1' for first bin, '2' for second etc.
bind = numpy.digitize(trig_param, padata.bank['bin_edges']) - 1
logging.debug('Trigger %s is in bin %i', padata.spec['param'], bind)
# Get noise rate density
if 'bg_fac' not in padata.spec:
expfac = 6.
else:
expfac = padata.spec['bg_fac']
# List of ifos over trigger threshold
tr_ifos = trdata['triggered']
# FAR is in Hz, therefore convert to rate per year (per SNR)
dnoise = noise_density_from_far(trdata['far'], expfac) * lal_s_per_yr
logging.debug('FAR %.3g, noise density per yr per SNR %.3g',
trdata['far'], dnoise)
# Scale by fraction of templates in bin
dnoise *= padata.bank['tcounts'][bind] / padata.bank['num_t']
logging.debug('Noise density in bin %.3g', dnoise)
# Back out trials factor to give noise density for triggered event type
dnoise /= float(trials_type(len(tr_ifos), len(trdata['sensitive'])))
logging.debug('Divide by previously applied trials factor: %.3g', dnoise)
# Get signal rate density per year at given SNR
dsig = signal_pdf_from_snr(trdata['network_snr'],
padata.spec['netsnr_thresh'])
logging.debug('SNR %.3g, signal pdf %.3g', trdata['network_snr'], dsig)
dsig *= padata.spec['sig_per_yr_binned'][bind]
logging.debug('Total signal density per yr per SNR in bin %.3g', dsig)
# Scale by network sensitivity accounting for BNS horizons
dsig *= signal_rate_rescale(horizons, padata.spec['ref_bns_horizon'])
logging.debug('After network horizon rescaling %.3g', dsig)
# Scale by relative signal rate in triggered ifos
dsig *= signal_rate_trig_type(horizons, trdata['sensitive'], tr_ifos)
logging.debug('After triggered ifo rate rescaling %.3g', dsig)
p_astro = dsig / (dsig + dnoise)
logging.debug('p_astro %.4g', p_astro)
return p_astro, 1 - p_astro
def template_param_bin_types_farlim_pa(padata, trdata, horizons):
"""
Parameters
----------
padata: PAstroData instance
Static information on p astro calculation
trdata: dictionary
Trigger properties
horizons: dictionary
BNS horizon distances keyed on ifo
Returns
-------
p_astro, p_terr: tuple of floats
"""
# If the network SNR and FAR indicate saturation of the FAR estimate,
# set them to specified fixed values
trdata = padata.apply_significance_limits(trdata)
# Now perform standard calculation with event types
return template_param_bin_types_pa(padata, trdata, horizons)
__all__ = [
"check_template_param_bin_data",
"read_template_bank_param",
"noise_density_from_far",
"signal_pdf_from_snr",
"signal_rate_rescale",
"signal_rate_trig_type",
"template_param_bin_pa",
"template_param_bin_types_pa",
"template_param_bin_types_farlim_pa",
]
| 10,265
| 33.682432
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/population/scale_injections.py
|
import numpy as np
from numpy import log
import copy, h5py
from scipy.interpolate import interp1d
from scipy.integrate import quad
from astropy.cosmology import WMAP9 as cosmo
from pycbc.conversions import mchirp_from_mass1_mass2 as m1m2tomch
_mch_BNS = 1.4/2**.2
_redshifts, _d_lum, _I = np.arange(0., 5., 0.01), [], []
_save_params = ['mass1', 'mass2', 'spin1z', 'spin2z', 'spin1y', 'spin2y',
'spin1x', 'spin2x', 'distance', 'end_time']
for zz in _redshifts:
_d_lum.append(cosmo.luminosity_distance(zz).value)
_dlum_interp = interp1d(_d_lum, _redshifts)
def read_injections(sim_files, m_dist, s_dist, d_dist):
''' Read all the injections from the files in the provided folder.
The files must belong to individual set i.e. no files that combine
all the injections in a run.
Identify injection strategies and finds parameter boundaries.
Collect injection according to GPS.
Parameters
----------
sim_files: list
List containign names of the simulation files
m_dist: list
The mass distribution used in the simulation runs
s_dist: list
The spin distribution used in the simulation runs
d_dist: list
The distance distribution used in the simulation runs
Returns
-------
injections: dictionary
Contains the organized information about the injections
'''
injections = {}
min_d, max_d = 1e12, 0
nf = len(sim_files)
for i in range(nf):
key = str(i)
injections[key] = process_injections(sim_files[i])
injections[key]['file_name'] = sim_files[i]
injections[key]['m_dist'] = m_dist[i]
injections[key]['s_dist'] = s_dist[i]
injections[key]['d_dist'] = d_dist[i]
mass1, mass2 = injections[key]['mass1'], injections[key]['mass2']
distance = injections[key]['distance']
mchirp = m1m2tomch(mass1, mass2)
injections[key]['chirp_mass'] = mchirp
injections[key]['total_mass'] = mass1 + mass2
injections[key]['mtot_range'] = [min(mass1 + mass2), max(mass1 + mass2)]
injections[key]['m1_range'] = [min(mass1), max(mass1)]
injections[key]['m2_range'] = [min(mass2), max(mass2)]
injections[key]['d_range'] = [min(distance), max(distance)]
min_d, max_d = min(min_d, min(distance)), max(max_d, max(distance))
injections['z_range'] = [dlum_to_z(min_d), dlum_to_z(max_d)]
return injections
def estimate_vt(injections, mchirp_sampler, model_pdf, **kwargs):
#Try including ifar threshold
'''Based on injection strategy and the desired astro model estimate the injected volume.
Scale injections and estimate sensitive volume.
Parameters
----------
injections: dictionary
Dictionary obtained after reading injections from read_injections
mchirp_sampler: function
Sampler for producing chirp mass samples for the astro model.
model_pdf: function
The PDF for astro model in mass1-mass2-spin1z-spin2z space.
This is easily extendible to include precession
kwargs: key words
Inputs for thresholds and astrophysical models
Returns
-------
injection_chunks: dictionary
The input dictionary with VT and VT error included with the injections
'''
thr_var = kwargs.get('thr_var')
thr_val = kwargs.get('thr_val')
nsamples = 1000000 #Used to calculate injected astro volume
injections = copy.deepcopy(injections)
min_z, max_z = injections['z_range']
V = quad(contracted_dVdc, 0., max_z)[0]
z_astro = astro_redshifts(min_z, max_z, nsamples)
astro_lum_dist = cosmo.luminosity_distance(z_astro).value
mch_astro = np.array(mchirp_sampler(nsamples = nsamples, **kwargs))
mch_astro_det = mch_astro * (1. + z_astro)
idx_within = np.zeros(nsamples)
for key in injections.keys():
if key == 'z_range':
# This is repeated down again and is so
continue
mchirp = injections[key]['chirp_mass']
min_mchirp, max_mchirp = min(mchirp), max(mchirp)
distance = injections[key]['distance']
if injections[key]['d_dist'] == 'uniform':
d_min, d_max = min(distance), max(distance)
elif injections[key]['d_dist'] == 'dchirp':
d_fid_min = min(distance / (mchirp/_mch_BNS)**(5/6.))
d_fid_max = max(distance / (mchirp/_mch_BNS)**(5/6.))
d_min = d_fid_min * (mch_astro_det/_mch_BNS)**(5/6.)
d_max = d_fid_max * (mch_astro_det/_mch_BNS)**(5/6.)
bound = np.sign((max_mchirp-mch_astro_det)*(mch_astro_det-min_mchirp))
bound += np.sign((d_max - astro_lum_dist)*(astro_lum_dist - d_min))
idx = np.where(bound == 2)
idx_within[idx] = 1
inj_V0 = 4*np.pi*V*len(idx_within[idx_within == 1])/float(nsamples)
injections['inj_astro_vol'] = inj_V0
# Estimate the sensitive volume
z_range = injections['z_range']
V_min = quad(contracted_dVdc, 0., z_range[0])[0]
V_max = quad(contracted_dVdc, 0., z_range[1])[0]
thr_falloff, i_inj, i_det, i_det_sq = [], 0, 0, 0
gps_min, gps_max = 1e15, 0
keys = injections.keys()
for key in keys:
if key == 'z_range' or key == 'inj_astro_vol':
continue
data = injections[key]
distance = data['distance']
mass1, mass2 = data['mass1'], data['mass2']
spin1z, spin2z = data['spin1z'], data['spin2z']
mchirp = data['chirp_mass']
gps_min = min(gps_min, min(data['end_time']))
gps_max = max(gps_max, max(data['end_time']))
z_inj = dlum_to_z(distance)
m1_sc, m2_sc = mass1/(1 + z_inj), mass2/(1 + z_inj)
p_out = model_pdf(m1_sc, m2_sc, spin1z, spin2z)
p_out *= pdf_z_astro(z_inj, V_min, V_max)
p_in = 0
J = cosmo.luminosity_distance(z_inj + 0.0005).value
J -= cosmo.luminosity_distance(z_inj - 0.0005).value
J = abs(J)/0.001 # A quick way to get dD_l/dz
# Sum probability of injections from j-th set for all the strategies
for key2 in keys:
if key2 == 'z_range' or key2 == 'inj_astro_vol':
continue
dt_j = injections[key2]
dist_j = dt_j['distance']
m1_j, m2_j = dt_j['mass1'], dt_j['mass2']
s1x_2, s2x_2 = dt_j['spin1x'], dt_j['spin2x']
s1y_2, s2y_2 = dt_j['spin1y'], dt_j['spin2y']
s1z_2, s2z_2 = dt_j['spin1z'], dt_j['spin2z']
s1 = np.sqrt(s1x_2**2 + s1y_2**2 + s1z_2**2)
s2 = np.sqrt(s2x_2**2 + s2y_2**2 + s2z_2**2)
mch_j = dt_j['chirp_mass']
#Get probability density for injections in mass-distance space
if dt_j['m_dist'] == 'totalMass':
lomass, himass = min(min(m1_j), min(m2_j), max(max(m1_j), max(m2_j)))
lomass_2, himass_2 = lomass, himass
elif dt_j['m_dist'] == 'componentMass' or dt_j['m_dist'] == 'log':
lomass, himass = min(m1_j), max(m1_j)
lomass_2, himass_2 = min(m2_j), max(m2_j)
if dt_j['d_dist'] == 'dchirp':
l_dist = min(dist_j / (mch_j/_mch_BNS)**(5/6.))
h_dist = max(dist_j / (mch_j/_mch_BNS)**(5/6.))
elif dt_j['d_dist'] == 'uniform':
l_dist, h_dist = min(dist_j), max(dist_j)
mdist = dt_j['m_dist']
prob_mass = inj_mass_pdf(mdist, mass1, mass2,
lomass, himass, lomass_2, himass_2)
ddist = dt_j['d_dist']
prob_dist = inj_distance_pdf(ddist, distance, l_dist,
h_dist, mchirp)
hspin1, hspin2 = max(s1), max(s2)
prob_spin = inj_spin_pdf(dt_j['s_dist'], hspin1, spin1z)
prob_spin *= inj_spin_pdf(dt_j['s_dist'], hspin2, spin2z)
p_in += prob_mass * prob_dist * prob_spin * J * (1 + z_inj)**2
p_in[p_in == 0] = 1e12
p_out_in = p_out/p_in
i_inj += np.sum(p_out_in)
i_det += np.sum((p_out_in)[data[thr_var] > thr_val])
i_det_sq += np.sum((p_out_in)[data[thr_var] > thr_val]**2)
idx_thr = np.where(data[thr_var] > thr_val)
thrs = data[thr_var][idx_thr]
ratios = p_out_in[idx_thr]/max(p_out_in[idx_thr])
rndn = np.random.uniform(0, 1, len(ratios))
idx_ratio = np.where(ratios > rndn)
thr_falloff.append(thrs[idx_ratio])
inj_V0 = injections['inj_astro_vol']
injections['ninj'] = i_inj
injections['ndet'] = i_det
injections['ndetsq'] = i_det_sq
injections['VT'] = ((inj_V0*i_det/i_inj) * (gps_max - gps_min)/31557600)
injections['VT_err'] = injections['VT'] * np.sqrt(i_det_sq)/i_det
injections['thr_falloff'] = np.hstack(np.array(thr_falloff).flat)
return injections
def process_injections(hdffile):
"""Function to read in the injection file and
extract the found injections and all injections
Parameters
----------
hdffile: hdf file
File for which injections are to be processed
Returns
-------
data: dictionary
Dictionary containing injection read from the input file
"""
data = {}
with h5py.File(hdffile, 'r') as inp:
found_index = inp['found_after_vetoes/injection_index'][:]
for param in _save_params:
data[param] = inp['injections/'+param][:]
ifar = np.zeros_like(data[_save_params[0]])
ifar[found_index] = inp['found_after_vetoes/ifar'][:]
data['ifar'] = ifar
stat = np.zeros_like(data[_save_params[0]])
stat[found_index] = inp['found_after_vetoes/stat'][:]
data['stat'] = stat
return data
def dlum_to_z(dl):
''' Get the redshift for a luminosity distance
Parameters
----------
dl: array
The array of luminosity distances
Returns
-------
array
The redshift values corresponding to the luminosity distances
'''
return _dlum_interp(dl)
def astro_redshifts(min_z, max_z, nsamples):
'''Sample the redshifts for sources, with redshift
independent rate, using standard cosmology
Parameters
----------
min_z: float
Minimum redshift
max_z: float
Maximum redshift
nsamples: int
Number of samples
Returns
-------
z_astro: array
nsamples of redshift, between min_z, max_z, by standard cosmology
'''
dz, fac = 0.001, 3.0
# use interpolation instead of directly estimating all the pdfz for rndz
V = quad(contracted_dVdc, 0., max_z)[0]
zbins = np.arange(min_z, max_z + dz/2., dz)
zcenter = (zbins[:-1] + zbins[1:]) / 2
pdfz = cosmo.differential_comoving_volume(zcenter).value/(1+zcenter)/V
int_pdf = interp1d(zcenter, pdfz, bounds_error=False, fill_value=0)
rndz = np.random.uniform(min_z, max_z, int(fac*nsamples))
pdf_zs = int_pdf(rndz)
maxpdf = max(pdf_zs)
rndn = np.random.uniform(0, 1, int(fac*nsamples)) * maxpdf
diff = pdf_zs - rndn
idx = np.where(diff > 0)
z_astro = rndz[idx]
np.random.shuffle(z_astro)
z_astro.resize(nsamples)
return z_astro
def pdf_z_astro(z, V_min, V_max):
''' Get the probability density for the rate of events
at a redshift assuming standard cosmology
'''
return contracted_dVdc(z)/(V_max - V_min)
def contracted_dVdc(z):
#Return the time-dilated differential comoving volume
return cosmo.differential_comoving_volume(z).value/(1+z)
##### Defining current standard strategies used for making injections #####
def inj_mass_pdf(key, mass1, mass2, lomass, himass, lomass_2 = 0, himass_2 = 0):
'''Estimate the probability density based on the injection strategy
Parameters
----------
key: string
Injection strategy
mass1: array
First mass of the injections
mass2: array
Second mass of the injections
lomass: float
Lower value of the mass distributions
himass: float
higher value of the mass distribution
Returns
-------
pdf: array
Probability density of the injections
'''
mass1, mass2 = np.array(mass1), np.array(mass2)
if key == 'totalMass':
# Returns the PDF of mass when total mass is uniformly distributed.
# Both the component masses have the same distribution for this case.
# Parameters
# ----------
# lomass: lower component mass
# himass: higher component mass
bound = np.sign((lomass + himass) - (mass1 + mass2))
bound += np.sign((himass - mass1)*(mass1 - lomass))
bound += np.sign((himass - mass2)*(mass2 - lomass))
idx = np.where(bound != 3)
pdf = 1./(himass - lomass)/(mass1 + mass2 - 2 * lomass)
pdf[idx] = 0
return pdf
if key == 'componentMass':
# Returns the PDF of mass when component mass is uniformly
# distributed. Component masses are independent for this case.
# Parameters
# ----------
# lomass: lower component mass
# himass: higher component mass
bound = np.sign((himass - mass1)*(mass1 - lomass))
bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2))
idx = np.where(bound != 2)
pdf = np.ones_like(mass1) / (himass - lomass) / (himass_2 - lomass_2)
pdf[idx] = 0
return pdf
if key == 'log':
# Returns the PDF of mass when component mass is uniform in log.
# Component masses are independent for this case.
# Parameters
# ----------
# lomass: lower component mass
# himass: higher component mass
bound = np.sign((himass - mass1)*(mass1 - lomass))
bound += np.sign((himass_2 - mass2)*(mass2 - lomass_2))
idx = np.where(bound != 2)
pdf = 1 / (log(himass) - log(lomass)) / (log(himass_2) - log(lomass_2))
pdf /= (mass1 * mass2)
pdf[idx] = 0
return pdf
def inj_spin_pdf(key, high_spin, spinz):
''' Estimate the probability density of the
injections for the spin distribution.
Parameters
----------
key: string
Injections strategy
high_spin: float
Maximum spin used in the strategy
spinz: array
Spin of the injections (for one component)
'''
# If the data comes from disable_spin simulation
if spinz[0] == 0:
return np.ones_like(spinz)
spinz = np.array(spinz)
bound = np.sign(np.absolute(high_spin) - np.absolute(spinz))
bound += np.sign(1 - np.absolute(spinz))
if key == 'precessing':
# Returns the PDF of spins when total spin is
# isotropically distributed. Both the component
# masses have the same distribution for this case.
pdf = (np.log(high_spin - np.log(abs(spinz)))/high_spin/2)
idx = np.where(bound != 2)
pdf[idx] = 0
return pdf
if key == 'aligned':
# Returns the PDF of mass when spins are aligned and uniformly
# distributed. Component spins are independent for this case.
pdf = (np.ones_like(spinz) / 2 / high_spin)
idx = np.where(bound != 2)
pdf[idx] = 0
return pdf
if key == 'disable_spin':
# Returns unit array
pdf = np.ones_like(spinz)
return pdf
def inj_distance_pdf(key, distance, low_dist, high_dist, mchirp = 1):
''' Estimate the probability density of the
injections for the distance distribution.
Parameters
----------
key: string
Injections strategy
distance: array
Array of distances
low_dist: float
Lower value of distance used in the injection strategy
high_dist: float
Higher value of distance used in the injection strategy
'''
distance = np.array(distance)
if key == 'uniform':
# Returns the PDF at a distance when
# distance is uniformly distributed.
pdf = np.ones_like(distance)/(high_dist - low_dist)
bound = np.sign((high_dist - distance)*(distance - low_dist))
idx = np.where(bound != 1)
pdf[idx] = 0
return pdf
if key == 'dchirp':
# Returns the PDF at a distance when distance is uniformly
# distributed but scaled by the chirp mass
weight = (mchirp/_mch_BNS)**(5./6)
pdf = np.ones_like(distance) / weight / (high_dist - low_dist)
bound = np.sign((weight*high_dist - distance)*(distance - weight*low_dist))
idx = np.where(bound != 1)
pdf[idx] = 0
return pdf
| 16,956
| 32.379921
| 92
|
py
|
pycbc
|
pycbc-master/pycbc/population/fgmc_plots.py
|
# Copyright (C) 2021 Jolien Creighton & Thomas Dent
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
import json
import numpy
from matplotlib import figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
def plot_setup(*args):
# reduce scale of codeclimate complaints
fig = figure.Figure()
FigureCanvas(fig)
ax = fig.gca()
ax.grid(True)
return fig, ax
def plotodds(rankstats, p_b):
# odds vs ranking stat
fig, ax = plot_setup()
ax.loglog()
ax.plot(rankstats, (1.0 - p_b) / p_b, 'k.')
ax.plot([rankstats.min(), rankstats.max()], [1.0, 1.0], 'c--')
ax.set_title(r'Foreground/Background Odds')
ax.set_xlabel(r'ranking statistic')
ax.set_ylabel(r'$P_1/P_0$')
ax.set_xlim(0.99 * rankstats.min(), 1.2 * rankstats.max())
return fig
def plotpbg(rankstats, p_b):
# p_terr vs ranking stat
fig, ax = plot_setup()
ax.loglog()
ax.plot(rankstats, p_b, 'k.')
ax.set_title(r'Probability of background origin')
ax.set_xlabel(r'ranking statistic')
ax.set_ylabel(r'$P_0$')
ax.set_xlim(0.99 * rankstats.min(), 1.2 * rankstats.max())
return fig
def plotoddsifar(ifar, p_b):
# odds vs IFAR
fig, ax = plot_setup()
ax.loglog()
ax.plot(ifar, (1.0 - p_b) / p_b, 'k.')
ax.plot([ifar.min(), ifar.max()], [1.0, 1.0], 'c--')
ax.set_title(r'Foreground/Background Odds')
ax.set_xlabel(r'IFAR')
ax.set_ylabel(r'$P_1/P_0$')
ax.set_xlim(0.9 * ifar.min(), 1.1 * ifar.max())
return fig
def plotfdr(p_b, ntop):
# False dismissal rate vs p_terr
fig, ax = plot_setup()
# get smallest N p_terr values
p_b = numpy.sort(p_b)[:ntop]
# cumulative probable noise/signal counts
cum_false = p_b.cumsum()
cum_true = (1. - p_b).cumsum()
ax.semilogy()
ax.plot(p_b, cum_false / cum_true, 'b+')
ax.plot(p_b, 1. / (numpy.arange(len(p_b)) + 1), 'c--', label=r'1 noise event')
ax.legend()
ax.set_xlabel(r'$p_{\rm terr}$')
ax.set_ylabel(r'Cumulative $p_{\rm terr}$ / Cumulative $p_{\rm astro}$')
ax.set_xlim(0., 1.05 * p_b.max())
return fig
def finalize_plot(fig, args, extensions, name, pltype, tag):
# Helper function
for extn in extensions:
filename = args.pldir + '_'.join(name.split()) + '_' + pltype + tag + extn
if args.verbose:
print('writing %s ...' % filename)
fig.savefig(filename)
def odds_summary(args, rankstats, ifars, p_b, ntop, times=None, mchirps=None,
name='events', plot_extensions=None):
print('\nSummary of Top %i %s' % (ntop, name.title()))
# do sort in reverse order
statsort = numpy.argsort(1. / numpy.array(rankstats))
topn = statsort[:ntop] # indices giving top n
topgps = []
topstat = []
topifar = []
toppastro = []
for n, i in enumerate(topn):
gps = times[i] if times is not None else ''
ifar = ifars[i]
stat = rankstats[i]
mchirpstring = 'mchirp %.3F' % mchirps[i] if mchirps is not None else ''
topgps.append(gps)
topstat.append(stat)
topifar.append(ifar)
print('#%d event:' % (n + 1), str(gps), mchirpstring)
print(' rankstat = %-8.3f' % stat)
print(' IFAR = %.2f' % ifar)
print(' odds = %g' % ((1. - p_b[i]) / p_b[i]))
toppastro.append(1. - p_b[i])
if args.p_astro_txt is not None:
numpy.savetxt(args.p_astro_txt,
numpy.column_stack((topgps, topstat, topifar, toppastro)),
fmt=['%.3F', '%.2F', '%.2F', '%.5F'],
delimiter=',',
header='GPS seconds, stat, IFAR/yr, p_astro')
if hasattr(args, 'json_tag') and args.json_tag is not None:
# save to catalog-style files
def dump_json(gps, p_a, p_b):
jfile = args.plot_dir + 'H1L1V1-PYCBC_%s-%s-1.json' % \
(args.json_tag, str(int(gps))) # truncate to integer GPS
with open(jfile, 'w') as jf:
json.dump({'Astro': p_a, 'Terrestrial': p_b}, jf)
if hasattr(args, 'json_min_ifar') and args.json_min_ifar is not None:
for g, ifar, pt in zip(times, ifars, p_b):
if ifar < args.json_min_ifar:
continue
dump_json(g, 1. - pt, pt)
else:
for g, pa in zip(topgps, toppastro):
dump_json(g, pa, 1. - pa)
if plot_extensions is not None:
plottag = args.plot_tag or ''
if plottag != '':
plottag = '_' + plottag
fig = plotodds(rankstats, p_b)
finalize_plot(fig, args, plot_extensions, name, 'odds', plottag)
fig = plotpbg(rankstats, p_b)
finalize_plot(fig, args, plot_extensions, name, 'pbg', plottag)
fig = plotoddsifar(ifars, p_b)
finalize_plot(fig, args, plot_extensions, name, 'ifarodds', plottag)
fig = plotfdr(p_b, ntop)
finalize_plot(fig, args, plot_extensions, name, 'fdr', plottag)
def plotdist(rv, plot_lim=None, middle=None, credible_intervals=None, style='linear'):
fig = figure.Figure()
FigureCanvas(fig)
ax = fig.gca()
name = rv.name if hasattr(rv, 'name') else None
symb = rv.texsymb if hasattr(rv, 'texsymb') else r'x'
unit = rv.texunit if hasattr(rv, 'texunit') else None
xlabel = r'$' + symb + '$'
if unit is not None:
xlabel += r' ($' + unit + r'$)'
a, b = rv.interval(0.9999)
if style == 'loglog':
ax.loglog()
ylabel = r'$p(' + symb + r')$'
space = lambda a, b: numpy.logspace(numpy.log10(a), numpy.log10(b), 100)
func = numpy.vectorize(rv.pdf)
xmin = a
ymin = rv.pdf(b)
elif style == 'semilogx':
ax.semilogx()
ylabel = r'$' + symb + r'\,p(' + symb + r')$'
space = lambda a, b: numpy.logspace(numpy.log10(a), numpy.log10(b), 100)
func = numpy.vectorize(lambda x: x * rv.pdf(x))
xmin = a
ymin = 0.0
else: # linear
ax.yaxis.set_ticklabels([])
ylabel = r'$p(' + symb + r')$'
space = lambda a, b: numpy.linspace(a, b, 100)
func = numpy.vectorize(rv.pdf)
xmin = 0.0
ymin = 0.0
x = space(a, b)
y = func(x)
ax.plot(x, y, color='k', linestyle='-')
if plot_lim is not None:
xmin, xmax = plot_lim
ax.set_xlim(xmin=xmin, xmax=xmax)
else:
ax.set_xlim(xmin=xmin)
ax.set_ylim(ymin=ymin)
if y.max() < 2 and y.max() > 1:
ax.set_ylim(ymax=2.)
if name is not None:
ax.set_title(name.title())
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if middle is not None:
ax.plot([middle, middle], [ymin, func(middle)], 'k--')
if credible_intervals is not None:
# alpha : density of fill shading
alpha = 1.0 / (1.0 + len(credible_intervals))
for lo, hi in credible_intervals.values():
lo = max(a, lo)
hi = min(b, hi)
x = space(lo, hi)
y = func(x)
ax.fill_between(x, y, ymin, color='k', alpha=alpha)
return fig
def dist_summary(args, rv, plot_styles=('linear', 'loglog', 'semilogx'),
plot_extensions=None, middle=None, credible_intervals=None):
name = rv.name if hasattr(rv, 'name') else 'posterior'
unit = rv.unit if hasattr(rv, 'unit') else ''
median = rv.median()
mode = rv.mode() if hasattr(rv, 'mode') else None
print('Summary of ' + name.title())
print('mean =', rv.mean(), unit)
print('median =', median, unit)
if mode is not None:
print('mode =', mode, unit)
print('stddev =', rv.std(), unit)
if credible_intervals is not None and len(credible_intervals) > 0:
print('equal-tailed credible intervals:')
equal_tailed_credible_intervals = {}
for cred in credible_intervals:
lo, hi = rv.interval(cred)
equal_tailed_credible_intervals[cred] = (lo, hi)
print('%g%%' % (cred * 100), 'credible interval =', '[%g, %g]' %
(lo, hi), unit)
if hasattr(rv, 'hpd_interval'):
print('highest probability density credible intervals:')
hpd_credible_intervals = {}
for cred in credible_intervals:
hpdlo, hpdhi = rv.hpd_interval(cred)
hpd_credible_intervals[cred] = (hpdlo, hpdhi)
print('%g%%' % (cred * 100), 'credible interval =', '[%g, %g]' %
(hpdlo, hpdhi), unit)
else:
hpd_credible_intervals = None
if len(credible_intervals) == 0:
credible_intervals = None
intervals = None
if middle == 'mode' and mode is not None:
middle = mode
if credible_intervals is not None:
# use hpd intervals with mode
intervals = hpd_credible_intervals
else:
middle = median
if credible_intervals is not None:
# use equal tailed intervals with median
intervals = equal_tailed_credible_intervals
# plot distributions
if plot_extensions is not None:
plottag = args.plot_tag or ''
if plottag != '':
plottag = '_' + plottag
for style in plot_styles:
fig = plotdist(rv, plot_lim=args.plot_limits, middle=middle,
credible_intervals=intervals, style=style)
finalize_plot(fig, args, plot_extensions, name, style, plottag)
if credible_intervals is not None and len(credible_intervals) == 1:
return median, lo - median, hi - median
# keep codeclimate happy with explicit return statement
return None
__all__ = ['plotdist', 'odds_summary', 'dist_summary']
| 10,012
| 33.174061
| 86
|
py
|
pycbc
|
pycbc-master/pycbc/population/__init__.py
|
from pycbc.population.rates_functions import *
from pycbc.population.scale_injections import *
from pycbc.population.population_models import *
from pycbc.population.fgmc_functions import *
from pycbc.population.fgmc_laguerre import *
from pycbc.population.live_pastro import *
from pycbc.population.live_pastro_utils import *
| 327
| 40
| 48
|
py
|
pycbc
|
pycbc-master/pycbc/population/population_models.py
|
# Copyright (C) 2021 Shichao Wu, Alex Nitz, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides functions for star formation rate models, time delay
models, merger rate density, and population models of BBH/BNS/NSBH.
"""
from functools import partial
import numpy as np
import scipy.integrate as scipy_integrate
import scipy.interpolate as scipy_interpolate
from astropy import units
from pycbc.cosmology import get_cosmology
from pycbc.cosmology import cosmological_quantity_from_redshift
def sfr_grb_2008(z):
r""" The star formation rate (SFR) calibrated by high-z GRBs data.
Parameters
----------
z : float
The redshift.
Returns
-------
rho_z : float
The SFR at redshift z, in unit of "Msolar/Mpc^3/yr".
Note
----
Please see Eq.(5) in <arXiv:0804.4008> for more details.
"""
rho_local = 0.02 # Msolar/yr/Mpc^3
eta = -10
rho_z = rho_local*((1+z)**(3.4*eta) + ((1+z)/5000)**(-0.3*eta) +
((1+z)/9)**(-3.5*eta))**(1./eta)
return rho_z
def sfr_madau_dickinson_2014(z):
r""" The madau-dickinson 2014 star formation rate (SFR).
Parameters
----------
z : float
The redshift.
Returns
-------
rho_z : float
The SFR at redshift z, in unit of "Msolar/Mpc^3/yr".
Notes
-----
Pease see Eq.(15) in <arXiv:1403.0007> for more details.
"""
rho_z = 0.015 * (1+z)**2.7 / (1 + ((1+z)/2.9)**5.6)
return rho_z
def sfr_madau_fragos_2017(z, k_imf=0.66, mode='high'):
r""" The madau-fragos 2017 star formation rate (SFR),
which updates madau-dickinson 2014 SFR by better reproducing
a number of recent 4 < z < 10 results.
Parameters
----------
z : float
The redshift.
k_imf : float
The correction factor KIMF adjusts the SFR for the assumed IMF,
for the Salpeter IMF, k_imf=1.0, for the three component broken
power-law Kroupa IMF, k_imf=0.66, here we choose Kroupa IMF as default.
model : string
The model of SFR, choose from 'high' and 'low'. Default to 'high'.
Returns
-------
rho_z : float
The SFR at redshift z, in unit of "Msolar/Mpc^3/yr".
Notes
-----
Pease see <arXiv:1606.07887> and <arXiv:1706.07053> for more details.
"""
if mode == 'low':
factor_a = 2.6
factor_b = 3.2
factor_c = 6.2
elif mode == 'high':
factor_a = 2.7
factor_b = 3.0
factor_c = 5.35
else:
raise ValueError("'mode' must choose from 'high' or 'low'.")
rho_z = k_imf * 0.015 * (1+z)**factor_a / (1 + ((1+z)/factor_b)**factor_c)
return rho_z
def diff_lookback_time(z, **kwargs):
r""" The derivative of lookback time t(z)
with respect to redshit z.
Parameters
----------
z : float
The redshift.
Returns
-------
dt_dz : float
The value of dt/dz at the redshift z.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Notes
-----
Pease see Eq.(A3) in <arXiv:2011.02717v3> for more details.
"""
from sympy import sqrt
cosmology = get_cosmology(**kwargs)
H0 = cosmology.H0.value * \
(3.0856776E+19)**(-1)/(1/24/3600/365*1e-9) # Gyr^-1
dt_dz = 1/H0/(1+z)/sqrt((cosmology.Ode0+cosmology.Om0*(1+z)**3))
return dt_dz
def p_tau(tau, td_model="inverse"):
r""" The probability distribution of the time delay.
Parameters
----------
tau : float
The merger delay time from the
formation of the binary system and the orbital
decay timescale through gravitational wave radiation.
td_model : str
The time delay model.
Returns
-------
p_t : float
The probability at time delay tau.
Notes
-----
Pease see the Appendix in <arXiv:2011.02717v3> for more details.
"""
from sympy import sqrt, exp, log
if td_model == "log_normal":
t_ln = 2.9 # Gyr
sigma_ln = 0.2
p_t = exp(-(log(tau)-log(t_ln))**2/(2*sigma_ln**2)) / \
(sqrt(2*np.pi)*sigma_ln)
elif td_model == "gaussian":
t_g = 2 # Gyr
sigma_g = 0.3
p_t = exp(-(tau-t_g)**2/(2*sigma_g**2)) / (sqrt(2*np.pi)*sigma_g)
elif td_model == "power_law":
alpha_t = 0.81
p_t = tau**(-alpha_t)
elif td_model == "inverse":
p_t = tau**(-0.999) # Try to avoid dividing zero.
else:
raise ValueError("'model' must choose from \
['log_normal', 'gaussian', 'power_law', 'inverse'].")
return p_t
def convolution_trans(sfr, diff_lookback_t, model_td, **kwargs):
r""" This function is used in a symbolic integral, which to calculate
the merger rate density of CBC sources. This function converts the
convolution of the star formation rate SFR(tau) and the time delay
probability P(tau) on the time delay 'tau' into the convolution on
the redshift 'z'.
Parameters
----------
sfr : function
The star formation rate function used in the convolution.
diff_lookback_t : function
The derivative of lookback time t(z)
with respect to redshit z.
model_td : str
The name of time delay model.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
func : sympy.core.symbol.Symbol
The product of SFR(z), P(tau(z)) and dt(z)/dz.
Notes
-----
Pease see Eq.(A2) in <arXiv:2011.02717v3> for more details.
"""
from sympy import integrate, symbols
if model_td not in ['log_normal', 'gaussian', 'power_law', 'inverse']:
raise ValueError("'model_td' must choose from \
['log_normal', 'gaussian', 'power_law', 'inverse'].")
# Fix the cosmology, set 'z/z_0' to be the only
# parameter in the symbolic integration.
diff_lookback_time_z = partial(diff_lookback_t, **kwargs)
z = symbols('z')
z_0 = symbols('z_0')
tau = integrate(diff_lookback_time_z(z), (z, z_0, z))
func = sfr(z) * p_tau(tau, model_td) * diff_lookback_time_z(z)
return func
def merger_rate_density(sfr_func, td_model, rho_local, maxz=10.0,
npoints=10000, z_array=None, **kwargs):
r""" This function uses the symbolic integral to calculate
the merger rate density of CBC sources. This function converts the
convolution of the star formation rate SFR(tau) and the time delay
probability P(tau) on the time delay 'tau' into the convolution on
the redshift 'z'. This function relies on `convolution_trans`.
Parameters
----------
sfr_func : function
The star formation rate function used in the convolution.
td_model : str
The name of time delay model.
rho_local : float
The local merger rate of a certain type of CBC source.
In the unit of "Mpc^-3yr^-1".
maxz : float
The max redshift. The default value is 10.
npoints : int
The number of points used in the interpolation. The default
value is 10000.
z_array : numpy.array
The array of redshift. The default value is None.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
rho_z : scipy.interpolate.interp1d
The merger rate density.
Notes
-----
Pease see Eq.(A1), Eq.(A2) in <arXiv:2011.02717v3> for more details.
"""
from sympy import symbols, lambdify
if z_array is None:
z_array = np.linspace(0, maxz, npoints)
if td_model not in ['log_normal', 'gaussian', 'power_law', 'inverse']:
raise ValueError("'td_model' must choose from \
['log_normal', 'gaussian', 'power_law', 'inverse'].")
z = symbols('z')
z_0 = symbols('z_0')
f_z = np.zeros(len(z_array))
func_1 = convolution_trans(
sfr=sfr_func, diff_lookback_t=diff_lookback_time,
model_td=td_model, **kwargs)
for i in range(len(z_array)):
func_2 = lambdify(z, func_1.subs(z_0, z_array[i]), 'scipy')
f_z[i] = scipy_integrate.quad(
func_2, z_array[i], np.inf, epsabs=1.49e-3)[0]
f_z = f_z/f_z[0]*rho_local # Normalize & Rescale
rho_z = scipy_interpolate.interp1d(z_array, f_z)
return rho_z
def coalescence_rate(rate_den, maxz=10.0, npoints=10000,
z_array=None, **kwargs):
r""" This function calculates the coalescence(merger) rate at the redshift z.
Parameters
----------
rate_den : function or scipy.interpolate.interp1d
The merger rate density as a function of redshift. In the unit of
"Mpc^-3yr^-1". Use `merger_rate_density` function to calculate.
maxz : float
The max redshift. The default value is 10.
npoints : int
The number of points used in the interpolation. The default
value is 10000.
z_array : numpy.array or list
The redshift range. The default value is None.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
coalescence_rate_interp : scipy.interpolate.interp1d
The coalescence rate.
Notes
-----
Pease see Eq.(1) in <arXiv:2011.02717v3> for more details.
"""
if z_array is None:
z_array = np.linspace(0, maxz, npoints)
dr_dz = []
cosmology = get_cosmology(**kwargs)
for z in z_array:
dr = cosmology.differential_comoving_volume(z) / (1+z)
dr_dz.append((dr*4*np.pi*units.sr*rate_den(z)*(units.Mpc)**(-3)).value)
coalescence_rate_interp = scipy_interpolate.interp1d(
z_array, dr_dz, fill_value='extrapolate')
return coalescence_rate_interp
def total_rate_upto_redshift(z, merger_rate):
r"""Total rate of occurrences out to some redshift.
Parameters
----------
z : int, float, tuple, numpy.ndarray or list
The redshift.
merger_rate : scipy.interpolate.interp1d or function
The merger or coalescence rate. Function should take the
redshift as a single argument. Provided by users or
calculated by the `coalescence_rate` function.
Returns
-------
rate: float or list
The total rate of occurrences out to some redshift. In the
unit of "yr^-1".
"""
if isinstance(z, (float, int)):
total_rate = scipy_integrate.quad(
merger_rate, 0, z,
epsabs=2.00e-4, epsrel=2.00e-4, limit=1000)[0]
elif isinstance(z, (tuple, np.ndarray, list)):
total_rate = []
for redshift in z:
total_rate.append(
scipy_integrate.quad(
merger_rate, 0, redshift,
epsabs=2.00e-4, epsrel=2.00e-4, limit=1000)[0]
)
else:
raise ValueError("'z' must be 'int', 'float', 'tuple', \
'numpy.ndarray' or 'list'.")
return total_rate
def average_time_between_signals(z_array, merger_rate):
r""" This function calculates the average time interval
of a certain type of CBC source.
Parameters
----------
z_array : numpy.array
The array of redshift.
merger_rate : scipy.interpolate.interp1d or function
The coalescence rate. Provided by users or calculated by
the `coalescence_rate` function.
Returns
-------
average_time : float
The average time interval (s).
"""
total_rate = total_rate_upto_redshift(
z_array[-1], merger_rate) # yr^-1
average_time = 1./total_rate * 365*24*3600 # s
return average_time
def norm_redshift_distribution(z_array, merger_rate):
r""" This function calculates the normalized redshift distribution
of a certain type of CBC source.
Parameters
----------
z_array : numpy.array
The array of redshift.
merger_rate : scipy.interpolate.interp1d or function
The coalescence rate. Provided by users or calculated by
the `coalescence_rate` function.
Returns
-------
norm_coalescence_rate : numpy.array
The normalized redshift distribution.
Notes
-----
The can be used as a population-informed prior for redshift
and luminosity distance of CBC sources.
"""
lambd = average_time_between_signals(z_array, merger_rate)
norm_coalescence_rate = lambd/(365*24*3600) * merger_rate(z_array)
return norm_coalescence_rate
def distance_from_rate(
total_rate, merger_rate, maxz=10, npoints=10000, **kwargs):
r"""Returns the luminosity distance from the given total rate value.
Parameters
----------
total_rate : float
The total rate.
merger_rate : scipy.interpolate.interp1d or function
The coalescence rate. Provided by users or calculated by
the `coalescence_rate` function.
maxz : float
The max redshift in the interpolation, the default value is 10.
Please use the same `maxz` as in `merger_rate`.
npoints : int
The number of points used in the interpolation, the default value
is 10000.
\**kwargs :
All other keyword args are passed to :py:func:`get_cosmology` to
select a cosmology. If none provided, will use
:py:attr:`DEFAULT_COSMOLOGY`.
Returns
-------
dl : float
The luminosity distance at the given total rate value.
In the unit of "Mpc".
Notes
-----
This can be used in a population-informed prior for redshift
and luminosity distance of CBC sources. When this used in
high redshift range, please first use the `total_rate_upto_redshift`
function to plot the curve and find the point where the curve
starts to stay almost horizontal, then set `maxz` to the
corresponding value and change `npoints` to a reasonable value.
"""
cosmology = get_cosmology(**kwargs)
if not hasattr(merger_rate, 'dist_interp'):
merger_rate.dist_interp = {}
if ((cosmology.name not in merger_rate.dist_interp) or
(len(merger_rate.dist_interp[cosmology.name].x) != npoints)):
def rate_func(redshift):
return total_rate_upto_redshift(redshift, merger_rate)
z_array = np.linspace(0, maxz, npoints)
dists = cosmological_quantity_from_redshift(
z_array, 'luminosity_distance', **kwargs)
total_rates = rate_func(z_array)
interp = scipy_interpolate.interp1d(total_rates, dists)
merger_rate.dist_interp[cosmology.name] = interp
dl = merger_rate.dist_interp[cosmology.name](total_rate)
if np.isscalar(dl):
dl = float(dl)
return dl
__all__ = ['sfr_grb_2008', 'sfr_madau_dickinson_2014',
'sfr_madau_fragos_2017', 'diff_lookback_time',
'p_tau', 'merger_rate_density', 'coalescence_rate',
'norm_redshift_distribution', 'total_rate_upto_redshift',
'distance_from_rate', 'average_time_between_signals']
| 16,559
| 31.217899
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/population/fgmc_laguerre.py
|
# Copyright (C) 2016 Jolien Creighton
# (C) 2021 Jolien Creighton & Thomas Dent
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
"""
Based ultimately on code used for O1 rate calculations, see
https://git.ligo.org/RatesAndPopulations/lvc-rates-and-pop/-/blob/master/bin/O1_scripts/lvc_rates_calc_posterior
and technical documentation at https://dcc.ligo.org/LIGO-T1700029/public
"""
import numpy
import scipy.stats as sst
import scipy.special as ssp
import scipy.integrate as sig
import scipy.optimize as sop
class augmented_rv_continuous(sst.rv_continuous):
def __init__(self, unit='dimensionless', texunit=r'\mbox{dimensionless}',
texsymb=r'x', **kwargs):
'''
Parameters
----------
unit : string, optional
units of independent variable
texunit : string, optional
units of independent variable, in tex format
texsymb : string, optional
symbol of independent variable, in tex format
'''
super(augmented_rv_continuous, self).__init__(**kwargs)
self._hpd_interval_vec = numpy.vectorize(self._hpd_interval_scalar)
self.unit = unit
self.texunit = texunit
self.texsymb = texsymb
def _hpd_interval_scalar(self, alpha):
def width(a):
return self.ppf(alpha + self.cdf(a)) - a
a = self.ppf(1e-6) # a is displaced slightly from 0
b = self.ppf(alpha)
if self.pdf(a) >= self.pdf(b): # upper limit
return self.a, b
a = sop.fminbound(width, a, self.ppf(1.0 - alpha))
b = self.ppf(alpha + self.cdf(a))
return a, b
def hpd_interval(self, alpha):
'''
Confidence interval of highest probability density.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
'''
if isinstance(alpha, (float, numpy.number)):
a, b = self._hpd_interval_scalar(alpha)
else:
a, b = self._hpd_interval_vec(alpha)
return a, b
class count_posterior(augmented_rv_continuous):
'''
Count posterior distribution.
'''
def __init__(self, logbf, laguerre_n, Lambda0, prior=-0.5,
name='count posterior', unit='signals/experiment',
texunit=r'\mathrm{signals}/\mathrm{experiment}',
texsymb=r'\Lambda_1'):
'''
Parameters
----------
logbf : array_like
logs of normalized foreground over background pdf ratios of events
laguerre_n: int
degree of generalized Laguerre polynomial for quadrature formula
Lambda0 : float
background rate (default=len(bayesfac))
prior : float or count_posterior, optional
prior distribution power law of improper prior if float
or count posterior distribution if count_posterior
(default=-0.5: Jeffreys prior)
'''
super(count_posterior, self).__init__(a=0.0, b=numpy.inf, name=name,
unit=unit, texunit=texunit,
texsymb=texsymb)
self.Lambda0 = Lambda0
# weighted Bayes factor
self.k = numpy.exp(numpy.array(logbf)) / self.Lambda0
# power-law priors
self.alpha = prior
if prior == 0:
self.prior = lambda x: 1.0
elif prior > 0:
self.prior = lambda x: x ** prior
else:
# regularize at x = 0
self.prior = lambda x: (x + self.xtol) ** prior
# pre-compute Gaussian-Generalized-Laguerre quadrature
# abscissas and weights, along with pdf at these abscissas
self.x, w = ssp.la_roots(laguerre_n, self.alpha)
self.p = numpy.array([ww * numpy.prod(1.0 + self.k * xx)
for xx, ww in zip(self.x, w)])
self.norm = 1.0 / sum(self.p)
self.p *= self.norm
def _pdf(self, x):
# discourage underflows by evaluating ln L and using ln(1+x) function
logL = -x + numpy.sum(numpy.log1p(self.k * x))
P = numpy.exp(logL) * self.prior(x)
return self.norm * P
def _cdf(self, x):
return sig.quad(self._pdf, 0.0, x)
def expect(self, func):
'''
Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
E[x] = Integral(f(x) * dist.pdf(x))
Parameters
----------
func : callable
Function for which integral is calculated. Takes only one argument.
Returns
-------
expect : float
The calculated expected value.
'''
# FIXME: not as feature rich as the expect method this overrides
return sum(pp * func(xx) for xx, pp in zip(self.x, self.p))
def _munp(self, n):
return self.expect(lambda x: x**n)
def p_bg(self, logbf):
'''
Calculate the false alarm probabilities of the events.
Parameters
----------
logbf : array_like
Logs of foreground over background probability ratios of events.
'''
# get weighted bayes factor
k = numpy.exp(numpy.asarray(logbf)) / self.Lambda0
P0 = numpy.dot(1./(1. + numpy.outer(k, self.x)), self.p)
if isinstance(k, (float, int, numpy.number)):
return P0.item()
if isinstance(k, numpy.ndarray) and k.ndim == 0:
return P0.item()
# except in special cases above, return array of values
return P0
__all__ = ['augmented_rv_continuous', 'count_posterior']
| 6,246
| 33.136612
| 112
|
py
|
pycbc
|
pycbc-master/pycbc/population/live_pastro_utils.py
|
import logging
import json
from . import live_pastro as livepa
def insert_live_pastro_option_group(parser):
""" Add low-latency p astro options to the argparser object.
Parameters
----------
parser : object
ArgumentParser instance.
Returns
-------
live_pastro_group :
Argument group object
"""
live_pastro_group = parser.add_argument_group('Options for live p_astro')
live_pastro_group.add_argument('--p-astro-spec',
help='File containing information to set '
'up p_astro calculation')
return live_pastro_group
# Choices of p astro calc method
_check_spec = {
'template_param_bins': livepa.check_template_param_bin_data,
'template_param_bins_types': livepa.check_template_param_bin_data,
'template_param_bins_types_farlim':
livepa.check_template_param_bin_farlim_data
}
_read_bank = {
'template_param_bins': livepa.read_template_bank_param,
'template_param_bins_types': livepa.read_template_bank_param,
'template_param_bins_types_farlim': livepa.read_template_bank_param
}
_do_calc = {
'template_param_bins': livepa.template_param_bin_pa,
'template_param_bins_types': livepa.template_param_bin_types_pa,
'template_param_bins_types_farlim':
livepa.template_param_bin_types_farlim_pa
}
class PAstroData():
""" Class for managing live p_astro calculation persistent info """
def __init__(self, specfile, bank):
"""
Read in spec file and extract relevant info from bank
Parameters
----------
specfile: str
Path to file giving method and static data used in calculation
bank: str
Path to hdf template bank file
"""
if specfile is None:
self.do = False
else:
self.do = True
with open(specfile) as specf:
self.spec_json = json.load(specf)
try:
self.method = self.spec_json['method']
except KeyError as ke:
raise ValueError("Can't find 'method' in p_astro spec file!") \
from ke
logging.info('Setting up p_astro data with method %s', self.method)
self.spec = _check_spec[self.method](self.spec_json)
self.bank = _read_bank[self.method](self.spec, bank)
def apply_significance_limits(self, trigger_data):
"""
If the network SNR and FAR indicate saturation of the FAR estimate,
set them to the fixed values given in the specification.
"""
# This only happens for double or triple events
if len(trigger_data['triggered']) == 1:
return trigger_data
if len(trigger_data['triggered']) > 1:
farlim = self.spec['limit_far']
snrlim = self.spec['limit_snr']
# Only do anything if FAR and SNR are beyond given limits
if trigger_data['far'] > farlim or \
trigger_data['network_snr'] < snrlim:
return trigger_data
logging.debug('Truncating FAR and SNR from %f, %f to %f, %f',
trigger_data['far'], trigger_data['network_snr'],
farlim, snrlim)
trigger_data['network_snr'] = snrlim
trigger_data['far'] = farlim
return trigger_data
raise RuntimeError('Number of triggered ifos must be >0 !')
def do_pastro_calc(self, trigger_data, horizons):
""" No-op, or call the despatch dictionary to evaluate p_astro """
if not self.do:
return None, None
logging.info('Computing p_astro')
p_astro, p_terr = _do_calc[self.method](self, trigger_data, horizons)
return p_astro, p_terr
__all__ = [
"insert_live_pastro_option_group",
"PAstroData"
]
| 3,907
| 31.840336
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/population/rates_functions.py
|
"""
A set of helper functions for evaluating rates.
"""
import h5py
import numpy as np
from numpy import log
from scipy import integrate, optimize
import scipy.stats as ss
from pycbc.conversions import mchirp_from_mass1_mass2
def process_full_data(fname, rhomin, mass1, mass2, lo_mchirp, hi_mchirp):
"""Read the zero-lag and time-lag triggers identified by templates in
a specified range of chirp mass.
Parameters
----------
hdfile:
File that stores all the triggers
rhomin: float
Minimum value of SNR threhold (will need including ifar)
mass1: array
First mass of the waveform in the template bank
mass2: array
Second mass of the waveform in the template bank
lo_mchirp: float
Minimum chirp mass for the template
hi_mchirp: float
Maximum chirp mass for the template
Returns
-------
dictionary
containing foreground triggers and background information
"""
with h5py.File(fname, 'r') as bulk:
id_bkg = bulk['background_exc/template_id'][:]
id_fg = bulk['foreground/template_id'][:]
mchirp_bkg = mchirp_from_mass1_mass2(mass1[id_bkg], mass2[id_bkg])
bound = np.sign((mchirp_bkg - lo_mchirp) * (hi_mchirp - mchirp_bkg))
idx_bkg = np.where(bound == 1)
mchirp_fg = mchirp_from_mass1_mass2(mass1[id_fg], mass2[id_fg])
bound = np.sign((mchirp_fg - lo_mchirp) * (hi_mchirp - mchirp_fg))
idx_fg = np.where(bound == 1)
zerolagstat = bulk['foreground/stat'][:][idx_fg]
cstat_back_exc = bulk['background_exc/stat'][:][idx_bkg]
dec_factors = bulk['background_exc/decimation_factor'][:][idx_bkg]
return {'zerolagstat': zerolagstat[zerolagstat > rhomin],
'dec_factors': dec_factors[cstat_back_exc > rhomin],
'cstat_back_exc': cstat_back_exc[cstat_back_exc > rhomin]}
def save_bkg_falloff(fname_statmap, fname_bank, path, rhomin, lo_mchirp, hi_mchirp):
''' Read the STATMAP files to derive snr falloff for the background events.
Save the output to a txt file
Bank file is also provided to restrict triggers to BBH templates.
Parameters
----------
fname_statmap: string
STATMAP file containing trigger information
fname_bank: string
File name of the template bank
path: string
Destination where txt file is saved
rhomin: float
Minimum value of SNR threhold (will need including ifar)
lo_mchirp: float
Minimum chirp mass for the template
hi_mchirp: float
Maximum chirp mass for template
'''
with h5py.File(fname_bank, 'r') as bulk:
mass1_bank = bulk['mass1'][:]
mass2_bank = bulk['mass2'][:]
full_data = process_full_data(fname_statmap, rhomin,
mass1_bank, mass2_bank, lo_mchirp, hi_mchirp)
max_bg_stat = np.max(full_data['cstat_back_exc'])
bg_bins = np.linspace(rhomin, max_bg_stat, 76)
bg_counts = np.histogram(full_data['cstat_back_exc'],
weights=full_data['dec_factors'], bins=bg_bins)[0]
zerolagstat = full_data['zerolagstat']
coincs = zerolagstat[zerolagstat >= rhomin]
bkg = (bg_bins[:-1], bg_bins[1:], bg_counts)
return bkg, coincs
def log_rho_fgmc(t, injstats, bins):
counts, bins = np.histogram(injstats, bins)
N = sum(counts)
dens = counts / np.diff(bins) / float(N)
assert np.min(t) >= np.min(bins)
assert np.max(t) < np.max(bins)
tinds = np.searchsorted(bins, t) - 1
return log(dens[tinds])
def fgmc(log_fg_ratios, mu_log_vt, sigma_log_vt, Rf, maxfg):
'''
Function to fit the likelihood Fixme
'''
Lb = np.random.uniform(0., maxfg, len(Rf))
pquit = 0
while pquit < 0.1:
# quit when the posterior on Lf is very close to its prior
nsamp = len(Lb)
Rf_sel = np.random.choice(Rf, nsamp)
vt = np.random.lognormal(mu_log_vt, sigma_log_vt, len(Rf_sel))
Lf = Rf_sel * vt
log_Lf, log_Lb = log(Lf), log(Lb)
plR = 0
for lfr in log_fg_ratios:
plR += np.logaddexp(lfr + log_Lf, log_Lb)
plR -= (Lf + Lb)
plRn = plR - max(plR)
idx = np.exp(plRn) > np.random.random(len(plRn))
pquit = ss.stats.ks_2samp(Lb, Lb[idx])[1]
Lb = Lb[idx]
return Rf_sel[idx], Lf[idx], Lb
def _optm(x, alpha, mu, sigma):
'''Return probability density of skew-lognormal
See scipy.optimize.curve_fit
'''
return ss.skewnorm.pdf(x, alpha, mu, sigma)
def fit(R):
''' Fit skew - lognormal to the rate samples achived from a prior analysis
Parameters
----------
R: array
Rate samples
Returns
-------
ff[0]: float
The skewness
ff[1]: float
The mean
ff[2]: float
The standard deviation
'''
lR = np.log(R)
mu_norm, sigma_norm = np.mean(lR), np.std(lR)
xs = np.linspace(min(lR), max(lR), 200)
kde = ss.gaussian_kde(lR)
pxs = kde(xs)
# Initial guess has been taken as the mean and std-dev of the data
# And a guess assuming small skewness
ff = optimize.curve_fit(_optm, xs, pxs, p0 = [0.1, mu_norm, sigma_norm])[0]
return ff[0], ff[1], ff[2]
def skew_lognormal_samples(alpha, mu, sigma, minrp, maxrp):
''' Returns a large number of Skew lognormal samples
Parameters
----------
alpha: float
Skewness of the distribution
mu: float
Mean of the distribution
sigma: float
Scale of the distribution
minrp: float
Minimum value for the samples
maxrp: float
Maximum value for the samples
Returns
-------
Rfs: array
Large number of samples (may need fixing)
'''
nsamp = 100000000
lRu = np.random.uniform(minrp, maxrp, nsamp)
plRu = ss.skewnorm.pdf(lRu, alpha, mu, sigma)
rndn = np.random.random(nsamp)
maxp = max(plRu)
idx = np.where(plRu/maxp > rndn)
log_Rf = lRu[idx]
Rfs = np.exp(log_Rf)
return Rfs
# The flat in log and power-law mass distribution models #
# PDF for the two canonical models plus flat in mass model
def prob_lnm(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for uniform in log
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
The probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
m1, m2 = np.array(m1), np.array(m2)
C_lnm = integrate.quad(lambda x: (log(max_mtotal - x) - log(min_mass))/x, min_mass, max_mass)[0]
xx = np.minimum(m1, m2)
m1 = np.maximum(m1, m2)
m2 = xx
bound = np.sign(max_mtotal - m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = (1/C_lnm)*(1./m1)*(1./m2)
p_m1_m2[idx] = 0
return p_m1_m2
def prob_imf(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for power-law
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1(Not in use currently)
s2z:
Aligned spin 2(Not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
alpha = kwargs.get('alpha', -2.35)
max_mtotal = min_mass + max_mass
m1, m2 = np.array(m1), np.array(m2)
C_imf = max_mass**(alpha + 1)/(alpha + 1)
C_imf -= min_mass**(alpha + 1)/(alpha + 1)
xx = np.minimum(m1, m2)
m1 = np.maximum(m1, m2)
m2 = xx
bound = np.sign(max_mtotal - m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = np.zeros_like(m1)
idx = np.where(m1 <= max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(m1[idx] - min_mass)
idx = np.where(m1 > max_mtotal/2.)
p_m1_m2[idx] = (1./C_imf) * m1[idx]**alpha /(max_mass - m1[idx])
p_m1_m2[idx] = 0
return p_m1_m2/2.
def prob_flat(m1, m2, s1z, s2z, **kwargs):
''' Return probability density for uniform in component mass
Parameters
----------
m1: array
Component masses 1
m2: array
Component masses 2
s1z: array
Aligned spin 1 (not in use currently)
s2z:
Aligned spin 2 (not in use currently)
**kwargs: string
Keyword arguments as model parameters
Returns
-------
p_m1_m2: array
the probability density for m1, m2 pair
'''
min_mass = kwargs.get('min_mass', 1.)
max_mass = kwargs.get('max_mass', 2.)
bound = np.sign(m1 - m2)
bound += np.sign(max_mass - m1) * np.sign(m2 - min_mass)
idx = np.where(bound != 2)
p_m1_m2 = 2. / (max_mass - min_mass)**2
p_m1_m2[idx] = 0
return p_m1_m2
# Generate samples for the two canonical models plus flat in mass model
def draw_imf_samples(**kwargs):
''' Draw samples for power-law model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
'''
alpha_salpeter = kwargs.get('alpha', -2.35)
nsamples = kwargs.get('nsamples', 1)
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
a = (max_mass/min_mass)**(alpha_salpeter + 1.0) - 1.0
beta = 1.0 / (alpha_salpeter + 1.0)
k = nsamples * int(1.5 + log(1 + 100./nsamples))
aa = min_mass * (1.0 + a * np.random.random(k))**beta
bb = np.random.uniform(min_mass, aa, k)
idx = np.where(aa + bb < max_mtotal)
m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]
return np.resize(m1, nsamples), np.resize(m2, nsamples)
def draw_lnm_samples(**kwargs):
''' Draw samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
'''
#PDF doesnt match with sampler
nsamples = kwargs.get('nsamples', 1)
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
lnmmin = log(min_mass)
lnmmax = log(max_mass)
k = nsamples * int(1.5 + log(1 + 100./nsamples))
aa = np.exp(np.random.uniform(lnmmin, lnmmax, k))
bb = np.exp(np.random.uniform(lnmmin, lnmmax, k))
idx = np.where(aa + bb < max_mtotal)
m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]
return np.resize(m1, nsamples), np.resize(m2, nsamples)
def draw_flat_samples(**kwargs):
''' Draw samples for uniform in mass
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
'''
#PDF doesnt match with sampler
nsamples = kwargs.get('nsamples', 1)
min_mass = kwargs.get('min_mass', 1.)
max_mass = kwargs.get('max_mass', 2.)
m1 = np.random.uniform(min_mass, max_mass, nsamples)
m2 = np.random.uniform(min_mass, max_mass, nsamples)
return np.maximum(m1, m2), np.minimum(m1, m2)
# Functions to generate chirp mass samples for the two canonical models
def mchirp_sampler_lnm(**kwargs):
''' Draw chirp mass samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
mchirp-astro: array
The chirp mass samples for the population
'''
m1, m2 = draw_lnm_samples(**kwargs)
mchirp_astro = mchirp_from_mass1_mass2(m1, m2)
return mchirp_astro
def mchirp_sampler_imf(**kwargs):
''' Draw chirp mass samples for power-law model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
mchirp-astro: array
The chirp mass samples for the population
'''
m1, m2 = draw_imf_samples(**kwargs)
mchirp_astro = mchirp_from_mass1_mass2(m1, m2)
return mchirp_astro
def mchirp_sampler_flat(**kwargs):
''' Draw chirp mass samples for flat in mass model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
mchirp-astro: array
The chirp mass samples for the population
'''
m1, m2 = draw_flat_samples(**kwargs)
mchirp_astro = mchirp_from_mass1_mass2(m1, m2)
return mchirp_astro
| 13,826
| 26.933333
| 100
|
py
|
pycbc
|
pycbc-master/pycbc/types/array_cuda.py
|
# Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Pycuda based
"""
import pycuda.driver
from pycuda.elementwise import ElementwiseKernel
from pycuda.reduction import ReductionKernel
from pycuda.tools import get_or_register_dtype
from pycuda.tools import context_dependent_memoize
from pycuda.tools import dtype_to_ctype
from pytools import match_precision
from pycuda.gpuarray import _get_common_dtype, empty, GPUArray
import pycuda.gpuarray
from pycuda.scan import InclusiveScanKernel
import numpy as np
include_complex = """
#include <pycuda-complex.hpp>
"""
@context_dependent_memoize
def get_cumsum_kernel(dtype):
return InclusiveScanKernel(dtype, "a+b", preamble=include_complex)
def icumsum(vec):
krnl = get_cumsum_kernel(vec.dtype)
return krnl(vec)
@context_dependent_memoize
def call_prepare(self, sz, allocator):
MAX_BLOCK_COUNT = 1024
SMALL_SEQ_COUNT = 4
if sz <= self.block_size*SMALL_SEQ_COUNT*MAX_BLOCK_COUNT:
total_block_size = SMALL_SEQ_COUNT*self.block_size
block_count = (sz + total_block_size - 1) // total_block_size
seq_count = SMALL_SEQ_COUNT
else:
block_count = MAX_BLOCK_COUNT
macroblock_size = block_count*self.block_size
seq_count = (sz + macroblock_size - 1) // macroblock_size
if block_count == 1:
result = empty((), self.dtype_out, allocator)
else:
result = empty((block_count,), self.dtype_out, allocator)
grid_size = (block_count, 1)
block_size = (self.block_size, 1, 1)
return result, block_count, seq_count, grid_size, block_size
class LowerLatencyReductionKernel(ReductionKernel):
def __init__(self, dtype_out,
neutral, reduce_expr, map_expr=None, arguments=None,
name="reduce_kernel", keep=False, options=None, preamble=""):
ReductionKernel.__init__(self, dtype_out,
neutral, reduce_expr, map_expr, arguments,
name, keep, options, preamble)
self.shared_size=self.block_size*self.dtype_out.itemsize
def __call__(self, *args, **kwargs):
f = self.stage1_func
s1_invocation_args = []
for arg in args:
s1_invocation_args.append(arg.gpudata)
sz = args[0].size
result, block_count, seq_count, grid_size, block_size = call_prepare(self, sz, args[0].allocator)
f(grid_size, block_size, None,
*([result.gpudata]+s1_invocation_args+[seq_count, sz]),
shared_size=self.shared_size)
while True:
f = self.stage2_func
sz = result.size
result2 = result
result, block_count, seq_count, grid_size, block_size = call_prepare(self, sz, args[0].allocator)
f(grid_size, block_size, None,
*([result.gpudata, result2.gpudata]+s1_invocation_args+[seq_count, sz]),
shared_size=self.shared_size)
if block_count == 1:
return result
@context_dependent_memoize
def get_norm_kernel(dtype_x, dtype_out):
return ElementwiseKernel(
"%(tp_x)s *x, %(tp_z)s *z" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_z": dtype_to_ctype(dtype_out),
},
"z[i] = norm(x[i])",
"normalize")
def squared_norm(self):
a = self.data
dtype_out = match_precision(np.dtype('float64'), a.dtype)
out = a._new_like_me(dtype=dtype_out)
krnl = get_norm_kernel(a.dtype, dtype_out)
krnl(a, out)
return out
# FIXME: Write me!
#def multiply_and_add(self, other, mult_fac):
# """
# Return other multiplied by mult_fac and with self added.
# Self will be modified in place. This requires all inputs to be of the same
# precision.
# """
@context_dependent_memoize
def get_weighted_inner_kernel(dtype_x, dtype_y, dtype_w, dtype_out):
if (dtype_x == np.complex64) or (dtype_x == np.complex128):
inner_map="conj(x[i])*y[i]/w[i]"
else:
inner_map="x[i]*y[i]/w[i]"
return LowerLatencyReductionKernel(dtype_out,
neutral="0",
arguments="%(tp_x)s *x, %(tp_y)s *y, %(tp_w)s *w" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_y": dtype_to_ctype(dtype_y),
"tp_w": dtype_to_ctype(dtype_w),
},
reduce_expr="a+b",
map_expr=inner_map,
name="weighted_inner")
@context_dependent_memoize
def get_inner_kernel(dtype_x, dtype_y, dtype_out):
if (dtype_x == np.complex64) or (dtype_x == np.complex128):
inner_map="conj(x[i])*y[i]"
else:
inner_map="x[i]*y[i]"
return LowerLatencyReductionKernel(dtype_out,
neutral="0",
arguments="%(tp_x)s *x, %(tp_y)s *y" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_y": dtype_to_ctype(dtype_y),
},
reduce_expr="a+b",
map_expr=inner_map,
name="inner")
def inner(self, b):
a = self.data
dtype_out = _get_common_dtype(a,b)
krnl = get_inner_kernel(a.dtype, b.dtype, dtype_out)
return krnl(a, b).get().max()
vdot = inner
def weighted_inner(self, b, w):
if w is None:
return self.inner(b)
a = self.data
dtype_out = _get_common_dtype(a, b)
krnl = get_weighted_inner_kernel(a.dtype, b.dtype, w.dtype, dtype_out)
return krnl(a, b, w).get().max()
# Define PYCUDA MAXLOC for both single and double precission ##################
maxloc_preamble = """
struct MAXLOCN{
TTYPE max;
LTYPE loc;
__device__
MAXLOCN(){}
__device__
MAXLOCN(MAXLOCN const &src): max(src.max), loc(src.loc){}
__device__
MAXLOCN(MAXLOCN const volatile &src): max(src.max), loc(src.loc){}
__device__
MAXLOCN volatile &operator=( MAXLOCN const &src) volatile{
max = src.max;
loc = src.loc;
return *this;
}
};
__device__
MAXLOCN maxloc_red(MAXLOCN a, MAXLOCN b){
if (a.max > b.max)
return a;
else
return b;
}
__device__
MAXLOCN maxloc_start(){
MAXLOCN t;
t.max=0;
t.loc=0;
return t;
}
__device__
MAXLOCN maxloc_map(TTYPE val, LTYPE loc){
MAXLOCN t;
t.max = val;
t.loc = loc;
return t;
}
"""
maxloc_preamble_single = """
#define MAXLOCN maxlocs
#define TTYPE float
#define LTYPE int
""" + maxloc_preamble
maxloc_preamble_double = """
#define MAXLOCN maxlocd
#define TTYPE double
#define LTYPE long
""" + maxloc_preamble
maxloc_dtype_double = np.dtype([("max", np.float64), ("loc", np.int64)])
maxloc_dtype_single = np.dtype([("max", np.float32), ("loc", np.int32)])
maxloc_dtype_single = get_or_register_dtype("maxlocs", dtype=maxloc_dtype_single)
maxloc_dtype_double = get_or_register_dtype("maxlocd", dtype=maxloc_dtype_double)
mls = LowerLatencyReductionKernel(maxloc_dtype_single, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(x[i], i)",
arguments="float *x", preamble=maxloc_preamble_single)
mld = LowerLatencyReductionKernel(maxloc_dtype_double, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(x[i], i)",
arguments="double *x", preamble=maxloc_preamble_double)
max_loc_map = {'single':mls,'double':mld}
amls = LowerLatencyReductionKernel(maxloc_dtype_single, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="float *x", preamble=maxloc_preamble_single)
amld = LowerLatencyReductionKernel(maxloc_dtype_double, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="double *x", preamble=maxloc_preamble_double)
amlsc = LowerLatencyReductionKernel(maxloc_dtype_single, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="pycuda::complex<float> *x", preamble=maxloc_preamble_single)
amldc = LowerLatencyReductionKernel(maxloc_dtype_double, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="pycuda::complex<double> *x", preamble=maxloc_preamble_double)
abs_max_loc_map = {'single':{ 'real':amls, 'complex':amlsc }, 'double':{ 'real':amld, 'complex':amldc }}
def zeros(length, dtype=np.float64):
result = GPUArray(length, dtype=dtype)
nwords = result.nbytes / 4
pycuda.driver.memset_d32(result.gpudata, 0, nwords)
return result
def ptr(self):
return self._data.ptr
def dot(self, other):
return pycuda.gpuarray.dot(self._data,other).get().max()
def min(self):
return pycuda.gpuarray.min(self._data).get().max()
def abs_max_loc(self):
maxloc = abs_max_loc_map[self.precision][self.kind](self._data)
maxloc = maxloc.get()
return float(maxloc['max']),int(maxloc['loc'])
def cumsum(self):
tmp = self.data*1
return icumsum(tmp)
def max(self):
return pycuda.gpuarray.max(self._data).get().max()
def max_loc(self):
maxloc = max_loc_map[self.precision](self._data)
maxloc = maxloc.get()
return float(maxloc['max']),int(maxloc['loc'])
def take(self, indices):
if not isinstance(indices, pycuda.gpuarray.GPUArray):
indices = pycuda.gpuarray.to_gpu(indices)
return pycuda.gpuarray.take(self.data, indices)
def numpy(self):
return self._data.get()
def _copy(self, self_ref, other_ref):
if (len(other_ref) <= len(self_ref)) :
from pycuda.elementwise import get_copy_kernel
func = get_copy_kernel(self.dtype, other_ref.dtype)
func.prepared_async_call(self_ref._grid, self_ref._block, None,
self_ref.gpudata, other_ref.gpudata,
self_ref.mem_size)
else:
raise RuntimeError("The arrays must the same length")
def _getvalue(self, index):
return self._data.get()[index]
def sum(self):
return pycuda.gpuarray.sum(self._data).get().max()
def clear(self):
n32 = self.data.nbytes / 4
pycuda.driver.memset_d32(self.data.gpudata, 0, n32)
def _scheme_matches_base_array(array):
if isinstance(array, pycuda.gpuarray.GPUArray):
return True
else:
return False
def _copy_base_array(array):
data = pycuda.gpuarray.GPUArray((array.size), array.dtype)
if len(array) > 0:
pycuda.driver.memcpy_dtod(data.gpudata, array.gpudata, array.nbytes)
return data
def _to_device(array):
return pycuda.gpuarray.to_gpu(array)
| 11,741
| 31.436464
| 109
|
py
|
pycbc
|
pycbc-master/pycbc/types/optparse.py
|
# Copyright (C) 2015 Ian Harry, Tito Dal Canton
# 2022 Shichao Wu
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains extensions for use with argparse
"""
import copy
import argparse
from collections import defaultdict
class DictWithDefaultReturn(defaultdict):
default_set = False
ifo_set = False
def __bool__(self):
if self.items() and not all(entry is None for entry in self.values()):
# True if any values are explictly set.
return True
elif self['RANDOM_STRING_314324'] is not None:
# Or true if the default value was set
# NOTE: This stores the string RANDOM_STRING_314324 in the dict
# so subsequent calls will be caught in the first test here.
return True
else:
# Else false
return False
# Python 2 and 3 have different conventions for boolean method
__nonzero__ = __bool__
class MultiDetOptionAction(argparse.Action):
# Initialise the same as the standard 'append' action
def __init__(self,
option_strings,
dest,
nargs='+',
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if type is not None:
self.internal_type = type
else:
self.internal_type = str
new_default = DictWithDefaultReturn(lambda: default)
#new_default.default_value=default
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const'
% argparse.OPTIONAL)
super(MultiDetOptionAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=new_default,
type=str,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
# Again this is modified from the standard argparse 'append' action
err_msg = "Issue with option: %s \n" %(self.dest,)
err_msg += "Received value: %s \n" %(' '.join(values),)
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, DictWithDefaultReturn())
items = getattr(namespace, self.dest)
items = copy.copy(items)
for value in values:
value = value.split(':')
if len(value) == 2:
# "Normal" case, all ifos supplied independently as "H1:VALUE"
if items.default_set:
err_msg += "If you are supplying a value for all ifos, you "
err_msg += "cannot also supply values for specific ifos."
raise ValueError(err_msg)
items[value[0]] = self.internal_type(value[1])
items.ifo_set = True
elif len(value) == 1:
# OR supply only one value and use this for all ifos
if items.default_set:
err_msg += "If you are supplying a value for all ifos, you "
err_msg += "must only supply one value."
raise ValueError(err_msg)
# Can't use a global and ifo specific options
if items.ifo_set:
err_msg += "If you are supplying a value for all ifos, you "
err_msg += "cannot also supply values for specific ifos."
raise ValueError(err_msg)
#items.default_value = self.internal_type(value[0])
new_default = self.internal_type(value[0])
items.default_factory = lambda: new_default
items.default_set = True
else:
err_msg += "The character ':' is used to deliminate the "
err_msg += "ifo and the value. Please do not use it more than "
err_msg += "once."
raise ValueError(err_msg)
setattr(namespace, self.dest, items)
class MultiDetOptionActionSpecial(MultiDetOptionAction):
"""
This class in an extension of the MultiDetOptionAction class to handle
cases where the : is already a special character. For example the channel
name is something like H1:CHANNEL_NAME. Here the channel name *must*
be provided uniquely for each ifo. The dictionary key is set to H1 and the
value to H1:CHANNEL_NAME for this example.
"""
def __call__(self, parser, namespace, values, option_string=None):
# Again this is modified from the standard argparse 'append' action
err_msg = "Issue with option: %s \n" %(self.dest,)
err_msg += "Received value: %s \n" %(' '.join(values),)
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
items = getattr(namespace, self.dest)
items = copy.copy(items)
for value in values:
value_split = value.split(':')
if len(value_split) == 2:
# "Normal" case, all ifos supplied independently as "H1:VALUE"
if value_split[0] in items:
err_msg += "Multiple values supplied for ifo %s.\n" \
%(value_split[0],)
err_msg += "Already have %s." %(items[value_split[0]])
raise ValueError(err_msg)
else:
items[value_split[0]] = value
elif len(value_split) == 3:
# This is an unadvertised feature. It is used for cases where I
# want to pretend H1 data is actually L1 (or similar). So if I
# supply --channel-name H1:L1:LDAS-STRAIN I can use L1 data and
# pretend it is H1 internally.
if value_split[0] in items:
err_msg += "Multiple values supplied for ifo %s.\n" \
%(value_split[0],)
err_msg += "Already have %s." %(items[value_split[0]])
raise ValueError(err_msg)
else:
items[value_split[0]] = ':'.join(value_split[1:3])
else:
err_msg += "The character ':' is used to deliminate the "
err_msg += "ifo and the value. It must appear exactly "
err_msg += "once."
raise ValueError(err_msg)
setattr(namespace, self.dest, items)
class MultiDetMultiColonOptionAction(MultiDetOptionAction):
"""A special case of `MultiDetOptionAction` which allows one to use
arguments containing colons, such as `V1:FOOBAR:1`. The first colon is
assumed to be the separator between the detector and the argument.
All subsequent colons are kept as part of the argument. Unlike
`MultiDetOptionAction`, all arguments must be prefixed by the
corresponding detector.
"""
def __call__(self, parser, namespace, values, option_string=None):
err_msg = ('Issue with option: {}\n'
'Received value: {}\n').format(self.dest, ' '.join(values))
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
items = copy.copy(getattr(namespace, self.dest))
for value in values:
if ':' not in value:
err_msg += ("Each argument must contain at least one ':' "
"character")
raise ValueError(err_msg)
detector, argument = value.split(':', 1)
if detector in items:
err_msg += ('Multiple values supplied for detector {},\n'
'already have {}.')
err_msg = err_msg.format(detector, items[detector])
raise ValueError(err_msg)
items[detector] = self.internal_type(argument)
setattr(namespace, self.dest, items)
class MultiDetOptionAppendAction(MultiDetOptionAction):
def __call__(self, parser, namespace, values, option_string=None):
# Again this is modified from the standard argparse 'append' action
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
items = getattr(namespace, self.dest)
items = copy.copy(items)
for value in values:
value = value.split(':')
if len(value) == 2:
# "Normal" case, all ifos supplied independetly as "H1:VALUE"
if value[0] in items:
items[value[0]].append(self.internal_type(value[1]))
else:
items[value[0]] = [self.internal_type(value[1])]
else:
err_msg = "Issue with option: %s \n" %(self.dest,)
err_msg += "Received value: %s \n" %(' '.join(values),)
err_msg += "The character ':' is used to distinguish the "
err_msg += "ifo and the value. It must be given exactly once "
err_msg += "for all entries"
raise ValueError(err_msg)
setattr(namespace, self.dest, items)
class DictOptionAction(argparse.Action):
# Initialise the same as the standard 'append' action
def __init__(self,
option_strings,
dest,
nargs='+',
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if type is not None:
self.internal_type = type
else:
self.internal_type = str
new_default = DictWithDefaultReturn(lambda: default)
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != argparse.OPTIONAL:
raise ValueError('nargs must be %r to supply const'
% argparse.OPTIONAL)
super(DictOptionAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=new_default,
type=str,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
# Again this is modified from the standard argparse 'append' action
err_msg = "Issue with option: %s \n" %(self.dest,)
err_msg += "Received value: %s \n" %(' '.join(values),)
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
items = getattr(namespace, self.dest)
items = copy.copy(items)
for value in values:
if values == ['{}']:
break
value = value.split(':')
if len(value) == 2:
# "Normal" case, all extra arguments supplied independently
# as "param:VALUE"
items[value[0]] = self.internal_type(value[1])
else:
err_msg += "The character ':' is used to distinguish the "
err_msg += "parameter name and the value. Please do not "
err_msg += "use it more than or less than once."
raise ValueError(err_msg)
setattr(namespace, self.dest, items)
class MultiDetDictOptionAction(DictOptionAction):
"""A special case of `DictOptionAction` which allows one to use
argument containing the detector (channel) name, such as
`DETECTOR:PARAM:VALUE`. The first colon is the name of detector,
the second colon is the name of parameter, the third colon is the value.
Or similar to `DictOptionAction`, all arguments don't contain the name of
detector, such as `PARAM:VALUE`, this will assume each detector has same
values of those parameters.
"""
def __call__(self, parser, namespace, values, option_string=None):
# Again this is modified from the standard argparse 'append' action
err_msg = ('Issue with option: {}\n'
'Received value: {}\n').format(self.dest, ' '.join(values))
if getattr(namespace, self.dest, None) is None:
setattr(namespace, self.dest, {})
items = copy.copy(getattr(namespace, self.dest))
detector_args = {}
for value in values:
if values == ['{}']:
break
if value.count(':') == 2:
detector, param_value = value.split(':', 1)
param, val = param_value.split(':')
if detector not in detector_args:
detector_args[detector] = {param: self.internal_type(val)}
if param in detector_args[detector]:
err_msg += ("Multiple values supplied for the same "
"parameter {} under detector {},\n"
"already have {}.")
err_msg = err_msg.format(param, detector,
detector_args[detector][param])
else:
detector_args[detector][param] = self.internal_type(val)
elif value.count(':') == 1:
param, val = value.split(':')
for detector in getattr(namespace, 'instruments'):
if detector not in detector_args:
detector_args[detector] = \
{param: self.internal_type(val)}
if param in detector_args[detector]:
err_msg += ("Multiple values supplied for the same "
"parameter {} under detector {},\n"
"already have {}.")
err_msg = err_msg.format(
param, detector,
detector_args[detector][param])
else:
detector_args[detector][param] = \
self.internal_type(val)
else:
err_msg += ("Use format `DETECTOR:PARAM:VALUE` for each "
"detector, or use `PARAM:VALUE` for all.")
raise ValueError(err_msg)
items = detector_args
setattr(namespace, self.dest, items)
def required_opts(opt, parser, opt_list, required_by=None):
"""Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
"""
for name in opt_list:
attr = name[2:].replace('-', '_')
if not hasattr(opt, attr) or (getattr(opt, attr) is None):
err_str = "%s is missing " % name
if required_by is not None:
err_str += ", required by %s" % required_by
parser.error(err_str)
def required_opts_multi_ifo(opt, parser, ifo, opt_list, required_by=None):
"""Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
ifo : string
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
"""
for name in opt_list:
attr = name[2:].replace('-', '_')
try:
if getattr(opt, attr)[ifo] is None:
raise KeyError
except KeyError:
err_str = "%s is missing " % name
if required_by is not None:
err_str += ", required by %s" % required_by
parser.error(err_str)
def ensure_one_opt(opt, parser, opt_list):
""" Check that one and only one in the opt_list is defined in opt
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
"""
the_one = None
for name in opt_list:
attr = name[2:].replace('-', '_')
if hasattr(opt, attr) and (getattr(opt, attr) is not None):
if the_one is None:
the_one = name
else:
parser.error("%s and %s are mutually exculsive" \
% (the_one, name))
if the_one is None:
parser.error("you must supply one of the following %s" \
% (', '.join(opt_list)))
def ensure_one_opt_multi_ifo(opt, parser, ifo, opt_list):
""" Check that one and only one in the opt_list is defined in opt
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
opt_list : list of strings
"""
the_one = None
for name in opt_list:
attr = name[2:].replace('-', '_')
try:
if getattr(opt, attr)[ifo] is None:
raise KeyError
except KeyError:
pass
else:
if the_one is None:
the_one = name
else:
parser.error("%s and %s are mutually exculsive" \
% (the_one, name))
if the_one is None:
parser.error("you must supply one of the following %s" \
% (', '.join(opt_list)))
def copy_opts_for_single_ifo(opt, ifo):
"""
Takes the namespace object (opt) from the multi-detector interface and
returns a namespace object for a single ifo that can be used with
functions expecting output from the single-detector interface.
"""
opt = copy.deepcopy(opt)
for arg, val in vars(opt).items():
if isinstance(val, DictWithDefaultReturn) or \
(isinstance(val, dict) and ifo in val):
setattr(opt, arg, getattr(opt, arg)[ifo])
return opt
def convert_to_process_params_dict(opt):
"""
Takes the namespace object (opt) from the multi-detector interface and
returns a dictionary of command line options that will be handled correctly
by the register_to_process_params ligolw function.
"""
opt = copy.deepcopy(opt)
for arg, val in vars(opt).items():
if isinstance(val, DictWithDefaultReturn):
new_val = []
for key in val.keys():
if isinstance(val[key], list):
for item in val[key]:
if item is not None:
new_val.append(':'.join([key, str(item)]))
else:
if val[key] is not None:
new_val.append(':'.join([key, str(val[key])]))
setattr(opt, arg, new_val)
return vars(opt)
def _positive_type(s, dtype=None):
"""
Ensure argument is positive and convert type to dtype
This is for the functions below to wrap to avoid code duplication.
"""
assert dtype is not None
err_msg = f"Input must be a positive {dtype}, not {s}"
try:
value = dtype(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value <= 0:
raise argparse.ArgumentTypeError(err_msg)
return value
def _nonnegative_type(s, dtype=None):
"""
Ensure argument is positive or zero and convert type to dtype
This is for the functions below to wrap to avoid code duplication.
"""
assert dtype is not None
err_msg = f"Input must be either a positive or zero {dtype}, not {s}"
try:
value = dtype(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value < 0:
raise argparse.ArgumentTypeError(err_msg)
return value
def positive_float(s):
"""
Ensure argument is a positive real number and return it as float.
To be used as type in argparse arguments.
"""
return _positive_type(s, dtype=float)
def nonnegative_float(s):
"""
Ensure argument is a positive real number or zero and return it as float.
To be used as type in argparse arguments.
"""
return _nonnegative_type(s, dtype=float)
def positive_int(s):
"""
Ensure argument is a positive integer and return it as int.
To be used as type in argparse arguments.
"""
return _positive_type(s, dtype=int)
def nonnegative_int(s):
"""
Ensure argument is a positive integer or zero and return it as int.
To be used as type in argparse arguments.
"""
return _nonnegative_type(s, dtype=int)
| 21,934
| 40.076779
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/types/array.py
|
# Copyright (C) 2012 Alex Nitz, Josh Willis, Andrew Miller, Tito Dal Canton
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides a device independent Array class based on PyCUDA and Numpy.
"""
BACKEND_PREFIX="pycbc.types.array_"
import h5py
import os as _os
from functools import wraps
import lal as _lal
import numpy as _numpy
from numpy import float32, float64, complex64, complex128, ones
from numpy.linalg import norm
import pycbc.scheme as _scheme
from pycbc.scheme import schemed, cpuonly
from pycbc.opt import LimitedSizeDict
#! FIXME: the uint32 datatype has not been fully tested,
# we should restrict any functions that do not allow an
# array of uint32 integers
_ALLOWED_DTYPES = [_numpy.float32, _numpy.float64, _numpy.complex64,
_numpy.complex128, _numpy.uint32, _numpy.int32, int]
try:
_ALLOWED_SCALARS = [int, long, float, complex] + _ALLOWED_DTYPES
except NameError:
_ALLOWED_SCALARS = [int, float, complex] + _ALLOWED_DTYPES
def _convert_to_scheme(ary):
if not isinstance(ary._scheme, _scheme.mgr.state.__class__):
converted_array = Array(ary, dtype=ary._data.dtype)
ary._data = converted_array._data
ary._scheme = _scheme.mgr.state
def _convert(func):
@wraps(func)
def convert(self, *args, **kwargs):
_convert_to_scheme(self)
return func(self, *args, **kwargs)
return convert
def _nocomplex(func):
@wraps(func)
def nocomplex(self, *args, **kwargs):
if self.kind == 'real':
return func(self, *args, **kwargs)
else:
raise TypeError( func.__name__ + " does not support complex types")
return nocomplex
def _noreal(func):
@wraps(func)
def noreal(self, *args, **kwargs):
if self.kind == 'complex':
return func(self, *args, **kwargs)
else:
raise TypeError( func.__name__ + " does not support real types")
return noreal
def force_precision_to_match(scalar, precision):
if _numpy.iscomplexobj(scalar):
if precision == 'single':
return _numpy.complex64(scalar)
else:
return _numpy.complex128(scalar)
else:
if precision == 'single':
return _numpy.float32(scalar)
else:
return _numpy.float64(scalar)
def common_kind(*dtypes):
for dtype in dtypes:
if dtype.kind == 'c':
return dtype
return dtypes[0]
@schemed(BACKEND_PREFIX)
def _to_device(array):
""" Move input to device """
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@schemed(BACKEND_PREFIX)
def _copy_base_array(array):
""" Copy a backend array"""
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@schemed(BACKEND_PREFIX)
def _scheme_matches_base_array(array):
""" Check that input matches array type for scheme """
err_msg = "This function is a stub that should be overridden using the "
err_msg += "scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
def check_same_len_precision(a, b):
"""Check that the two arguments have the same length and precision.
Raises ValueError if they do not.
"""
if len(a) != len(b):
msg = 'lengths do not match ({} vs {})'.format(
len(a), len(b))
raise ValueError(msg)
if a.precision != b.precision:
msg = 'precisions do not match ({} vs {})'.format(
a.precision, b.precision)
raise TypeError(msg)
class Array(object):
"""Array used to do numeric calculations on a various compute
devices. It is a convience wrapper around numpy, and
pycuda.
"""
def __init__(self, initial_array, dtype=None, copy=True):
""" initial_array: An array-like object as specified by NumPy, this
also includes instances of an underlying data type as described in
section 3 or an instance of the PYCBC Array class itself. This
object is used to populate the data of the array.
dtype: A NumPy style dtype that describes the type of
encapsulated data (float32,compex64, etc)
copy: This defines whether the initial_array is copied to instantiate
the array or is simply referenced. If copy is false, new data is not
created, and so all arguments that would force a copy are ignored.
The default is to copy the given object.
"""
self._scheme=_scheme.mgr.state
self._saved = LimitedSizeDict(size_limit=2**5)
#Unwrap initial_array
if isinstance(initial_array, Array):
initial_array = initial_array._data
if not copy:
if not _scheme_matches_base_array(initial_array):
raise TypeError("Cannot avoid a copy of this array")
else:
self._data = initial_array
# Check that the dtype is supported.
if self._data.dtype not in _ALLOWED_DTYPES:
raise TypeError(str(self._data.dtype) + ' is not supported')
if dtype and dtype != self._data.dtype:
raise TypeError("Can only set dtype when allowed to copy data")
if copy:
# First we will check the dtype that we are given
if not hasattr(initial_array, 'dtype'):
initial_array = _numpy.array(initial_array)
# Determine the dtype to use
if dtype is not None:
dtype = _numpy.dtype(dtype)
if dtype not in _ALLOWED_DTYPES:
raise TypeError(str(dtype) + ' is not supported')
if dtype.kind != 'c' and initial_array.dtype.kind == 'c':
raise TypeError(str(initial_array.dtype) + ' cannot be cast as ' + str(dtype))
elif initial_array.dtype in _ALLOWED_DTYPES:
dtype = initial_array.dtype
else:
if initial_array.dtype.kind == 'c':
dtype = complex128
else:
dtype = float64
# Cast to the final dtype if needed
if initial_array.dtype != dtype:
initial_array = initial_array.astype(dtype)
#Create new instance with initial_array as initialization.
if issubclass(type(self._scheme), _scheme.CPUScheme):
if hasattr(initial_array, 'get'):
self._data = _numpy.array(initial_array.get())
else:
self._data = _numpy.array(initial_array, dtype=dtype, ndmin=1)
elif _scheme_matches_base_array(initial_array):
self._data = _copy_base_array(initial_array) # pylint:disable=assignment-from-no-return
else:
initial_array = _numpy.array(initial_array, dtype=dtype, ndmin=1)
self._data = _to_device(initial_array) # pylint:disable=assignment-from-no-return
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
inputs = [i.numpy() if isinstance(i, Array) else i for i in inputs]
ret = getattr(ufunc, method)(*inputs, **kwargs)
if hasattr(ret, 'shape') and ret.shape == self.shape:
ret = self._return(ret)
return ret
def __array__(self, dtype=None):
arr = self.numpy()
if dtype is not None:
arr = arr.astype(dtype)
return arr
@property
def shape(self):
return self._data.shape
def _memoize_single(func):
@wraps(func)
def memoize_single(self, arg):
badh = str(arg)
if badh in self._saved:
return self._saved[badh]
res = func(self, arg) # pylint:disable=not-callable
self._saved[badh] = res
return res
return memoize_single
def _returnarray(func):
@wraps(func)
def returnarray(self, *args, **kwargs):
return Array(func(self, *args, **kwargs), copy=False) # pylint:disable=not-callable
return returnarray
def _returntype(func):
@wraps(func)
def returntype(self, *args, **kwargs):
ary = func(self, *args, **kwargs) # pylint:disable=not-callable
if ary is NotImplemented:
return NotImplemented
return self._return(ary)
return returntype
def _return(self, ary):
"""Wrap the ary to return an Array type """
if isinstance(ary, Array):
return ary
return Array(ary, copy=False)
def _checkother(func):
@wraps(func)
def checkother(self, *args):
nargs = ()
for other in args:
self._typecheck(other)
if type(other) in _ALLOWED_SCALARS:
other = force_precision_to_match(other, self.precision)
nargs +=(other,)
elif isinstance(other, type(self)) or type(other) is Array:
check_same_len_precision(self, other)
_convert_to_scheme(other)
nargs += (other._data,)
else:
return NotImplemented
return func(self, *nargs) # pylint:disable=not-callable
return checkother
def _vcheckother(func):
@wraps(func)
def vcheckother(self, *args):
nargs = ()
for other in args:
self._typecheck(other)
if isinstance(other, type(self)) or type(other) is Array:
check_same_len_precision(self, other)
_convert_to_scheme(other)
nargs += (other._data,)
else:
raise TypeError('array argument required')
return func(self, *nargs) # pylint:disable=not-callable
return vcheckother
def _vrcheckother(func):
@wraps(func)
def vrcheckother(self, *args):
nargs = ()
for other in args:
if isinstance(other, type(self)) or type(other) is Array:
check_same_len_precision(self, other)
_convert_to_scheme(other)
nargs += (other._data,)
else:
raise TypeError('array argument required')
return func(self, *nargs) # pylint:disable=not-callable
return vrcheckother
def _icheckother(func):
@wraps(func)
def icheckother(self, other):
""" Checks the input to in-place operations """
self._typecheck(other)
if type(other) in _ALLOWED_SCALARS:
if self.kind == 'real' and type(other) == complex:
raise TypeError('dtypes are incompatible')
other = force_precision_to_match(other, self.precision)
elif isinstance(other, type(self)) or type(other) is Array:
check_same_len_precision(self, other)
if self.kind == 'real' and other.kind == 'complex':
raise TypeError('dtypes are incompatible')
_convert_to_scheme(other)
other = other._data
else:
return NotImplemented
return func(self, other) # pylint:disable=not-callable
return icheckother
def _typecheck(self, other):
""" Additional typechecking for other. Placeholder for use by derived
types.
"""
pass
@_returntype
@_convert
@_checkother
def __mul__(self,other):
""" Multiply by an Array or a scalar and return an Array. """
return self._data * other
__rmul__ = __mul__
@_convert
@_icheckother
def __imul__(self,other):
""" Multiply by an Array or a scalar and return an Array. """
self._data *= other
return self
@_returntype
@_convert
@_checkother
def __add__(self,other):
""" Add Array to Array or scalar and return an Array. """
return self._data + other
__radd__ = __add__
def fill(self, value):
self._data.fill(value)
@_convert
@_icheckother
def __iadd__(self,other):
""" Add Array to Array or scalar and return an Array. """
self._data += other
return self
@_convert
@_checkother
@_returntype
def __truediv__(self,other):
""" Divide Array by Array or scalar and return an Array. """
return self._data / other
@_returntype
@_convert
@_checkother
def __rtruediv__(self,other):
""" Divide Array by Array or scalar and return an Array. """
return self._data.__rtruediv__(other)
@_convert
@_icheckother
def __itruediv__(self,other):
""" Divide Array by Array or scalar and return an Array. """
self._data /= other
return self
__div__ = __truediv__
__idiv__ = __itruediv__
__rdiv__ = __rtruediv__
@_returntype
@_convert
def __neg__(self):
""" Return negation of self """
return - self._data
@_returntype
@_convert
@_checkother
def __sub__(self,other):
""" Subtract Array or scalar from Array and return an Array. """
return self._data - other
@_returntype
@_convert
@_checkother
def __rsub__(self,other):
""" Subtract Array or scalar from Array and return an Array. """
return self._data.__rsub__(other)
@_convert
@_icheckother
def __isub__(self,other):
""" Subtract Array or scalar from Array and return an Array. """
self._data -= other
return self
@_returntype
@_convert
@_checkother
def __pow__(self,other):
""" Exponentiate Array by scalar """
return self._data ** other
@_returntype
@_convert
def __abs__(self):
""" Return absolute value of Array """
return abs(self._data)
def __len__(self):
""" Return length of Array """
return len(self._data)
def __str__(self):
return str(self._data)
@property
def ndim(self):
return self._data.ndim
def __eq__(self,other):
"""
This is the Python special method invoked whenever the '=='
comparison is used. It will return true if the data of two
PyCBC arrays are identical, and all of the numeric meta-data
are identical, irrespective of whether or not the two
instances live in the same memory (for that comparison, the
Python statement 'a is b' should be used instead).
Thus, this method returns 'True' if the types of both 'self'
and 'other' are identical, as well as their lengths, dtypes
and the data in the arrays, element by element. It will always
do the comparison on the CPU, but will *not* move either object
to the CPU if it is not already there, nor change the scheme of
either object. It is possible to compare a CPU object to a GPU
object, and the comparison should be true if the data and
meta-data of the two objects are the same.
Note in particular that this function returns a single boolean,
and not an array of booleans as Numpy does. If the numpy
behavior is instead desired it can be obtained using the numpy()
method of the PyCBC type to get a numpy instance from each
object, and invoking '==' on those two instances.
Parameters
----------
other: another Python object, that should be tested for equality
with 'self'.
Returns
-------
boolean: 'True' if the types, dtypes, lengths, and data of the
two objects are each identical.
"""
# Writing the first test as below allows this method to be safely
# called from subclasses.
if type(self) != type(other):
return False
if self.dtype != other.dtype:
return False
if len(self) != len(other):
return False
# Now we've checked meta-data, so look at the actual data itself:
# The numpy() method call will put a copy of GPU data onto a CPU
# array, and could therefore be slow. As noted in the help for
# this function we don't worry about that.
sary = self.numpy()
oary = other.numpy()
# Now we know that both sary and oary are numpy arrays. The
# '==' statement returns an array of booleans, and the all()
# method of that array returns 'True' only if every element
# of that array of booleans is True.
return (sary == oary).all()
def almost_equal_elem(self,other,tol,relative=True):
"""
Compare whether two array types are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the array.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the array.
Other meta-data (type, dtype, and length) must be exactly equal.
If either object's memory lives on the GPU it will be copied to
the CPU for the comparison, which may be slow. But the original
object itself will not have its memory relocated nor scheme
changed.
Parameters
----------
other
Another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol
A non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative
A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
Returns
-------
boolean
'True' if the data agree within the tolerance, as
interpreted by the 'relative' keyword, and if the types,
lengths, and dtypes are exactly the same.
"""
# Check that the tolerance is non-negative and raise an
# exception otherwise.
if (tol<0):
raise ValueError("Tolerance cannot be negative")
# Check that the meta-data agree; the type check is written in
# this way so that this method may be safely called from
# subclasses as well.
if type(other) != type(self):
return False
if self.dtype != other.dtype:
return False
if len(self) != len(other):
return False
# The numpy() method will move any GPU memory onto the CPU.
# Slow, but the user was warned.
diff = abs(self.numpy()-other.numpy())
if relative:
cmpary = tol*abs(self.numpy())
else:
cmpary = tol*ones(len(self),dtype=self.dtype)
return (diff<=cmpary).all()
def almost_equal_norm(self,other,tol,relative=True):
"""
Compare whether two array types are almost equal, normwise.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(norm(self-other)) <= tol*abs(norm(self)).
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(norm(self-other)) <= tol
Other meta-data (type, dtype, and length) must be exactly equal.
If either object's memory lives on the GPU it will be copied to
the CPU for the comparison, which may be slow. But the original
object itself will not have its memory relocated nor scheme
changed.
Parameters
----------
other
another Python object, that should be tested for
almost-equality with 'self', based on their norms.
tol
a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative
A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
Returns
-------
boolean
'True' if the data agree within the tolerance, as
interpreted by the 'relative' keyword, and if the types,
lengths, and dtypes are exactly the same.
"""
# Check that the tolerance is non-negative and raise an
# exception otherwise.
if (tol<0):
raise ValueError("Tolerance cannot be negative")
# Check that the meta-data agree; the type check is written in
# this way so that this method may be safely called from
# subclasses as well.
if type(other) != type(self):
return False
if self.dtype != other.dtype:
return False
if len(self) != len(other):
return False
# The numpy() method will move any GPU memory onto the CPU.
# Slow, but the user was warned.
diff = self.numpy()-other.numpy()
dnorm = norm(diff)
if relative:
return (dnorm <= tol*norm(self))
else:
return (dnorm <= tol)
@_returntype
@_convert
def real(self):
""" Return real part of Array """
return Array(self._data.real, copy=True)
@_returntype
@_convert
def imag(self):
""" Return imaginary part of Array """
return Array(self._data.imag, copy=True)
@_returntype
@_convert
def conj(self):
""" Return complex conjugate of Array. """
return self._data.conj()
@_returntype
@_convert
@schemed(BACKEND_PREFIX)
def squared_norm(self):
""" Return the elementwise squared norm of the array """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_returntype
@_checkother
@_convert
@schemed(BACKEND_PREFIX)
def multiply_and_add(self, other, mult_fac):
""" Return other multiplied by mult_fac and with self added.
Self is modified in place and returned as output.
Precisions of inputs must match.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_vrcheckother
@_convert
@schemed(BACKEND_PREFIX)
def inner(self, other):
""" Return the inner product of the array with complex conjugation.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_vrcheckother
@_convert
@schemed(BACKEND_PREFIX)
def vdot(self, other):
""" Return the inner product of the array with complex conjugation.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@schemed(BACKEND_PREFIX)
def clear(self):
""" Clear out the values of the array. """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_vrcheckother
@_convert
@schemed(BACKEND_PREFIX)
def weighted_inner(self, other, weight):
""" Return the inner product of the array with complex conjugation.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@schemed(BACKEND_PREFIX)
def sum(self):
""" Return the sum of the the array. """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_returntype
@_convert
@schemed(BACKEND_PREFIX)
def cumsum(self):
""" Return the cumulative sum of the the array. """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@_nocomplex
@schemed(BACKEND_PREFIX)
def max(self):
""" Return the maximum value in the array. """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@_nocomplex
@schemed(BACKEND_PREFIX)
def max_loc(self):
"""Return the maximum value in the array along with the index location """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@schemed(BACKEND_PREFIX)
def abs_arg_max(self):
""" Return location of the maximum argument max """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@schemed(BACKEND_PREFIX)
def abs_max_loc(self):
"""Return the maximum elementwise norm in the array along with the index location"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@_nocomplex
@schemed(BACKEND_PREFIX)
def min(self):
""" Return the maximum value in the array. """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_returnarray
@_convert
@schemed(BACKEND_PREFIX)
def take(self, indices):
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
@_vcheckother
@schemed(BACKEND_PREFIX)
def dot(self, other):
""" Return the dot product"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@schemed(BACKEND_PREFIX)
def _getvalue(self, index):
"""Helper function to return a single value from an array. May be very
slow if the memory is on a gpu.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_memoize_single
@_returntype
def _getslice(self, index):
return self._return(self._data[index])
@_convert
def __getitem__(self, index):
""" Return items from the Array. This not guaranteed to be fast for
returning single values.
"""
if isinstance(index, slice):
return self._getslice(index)
else:
return self._getvalue(index)
@_convert
def resize(self, new_size):
"""Resize self to new_size
"""
if new_size == len(self):
return
else:
self._saved = LimitedSizeDict(size_limit=2**5)
new_arr = zeros(new_size, dtype=self.dtype)
if len(self) <= new_size:
new_arr[0:len(self)] = self
else:
new_arr[:] = self[0:new_size]
self._data = new_arr._data
@_convert
def roll(self, shift):
"""shift vector
"""
new_arr = zeros(len(self), dtype=self.dtype)
if shift < 0:
shift = shift - len(self) * (shift // len(self))
if shift == 0:
return
new_arr[0:shift] = self[len(self)-shift: len(self)]
new_arr[shift:len(self)] = self[0:len(self)-shift]
self._saved = LimitedSizeDict(size_limit=2**5)
self._data = new_arr._data
@_returntype
@_convert
def astype(self, dtype):
if _numpy.dtype(self.dtype) == _numpy.dtype(dtype):
return self
else:
return self._data.astype(dtype)
@schemed(BACKEND_PREFIX)
def _copy(self, self_ref, other_ref):
"""Helper function to copy between two arrays. The arrays references
should be bare array types and not `Array` class instances.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
def __setitem__(self, index, other):
if isinstance(other,Array):
_convert_to_scheme(other)
if self.kind == 'real' and other.kind == 'complex':
raise ValueError('Cannot set real value with complex')
if isinstance(index,slice):
self_ref = self._data[index]
other_ref = other._data
else:
self_ref = self._data[index:index+1]
other_ref = other._data
self._copy(self_ref, other_ref)
elif type(other) in _ALLOWED_SCALARS:
if isinstance(index, slice):
self[index].fill(other)
else:
self[index:index+1].fill(other)
else:
raise TypeError('Can only copy data from another Array')
@property
def precision(self):
if self.dtype == float32 or self.dtype == complex64:
return 'single'
else:
return 'double'
@property
def kind(self):
if self.dtype == float32 or self.dtype == float64:
return 'real'
elif self.dtype == complex64 or self.dtype == complex128:
return 'complex'
else:
return 'unknown'
@property
@_convert
def data(self):
"""Returns the internal python array """
return self._data
@data.setter
def data(self,other):
dtype = None
if hasattr(other,'dtype'):
dtype = other.dtype
temp = Array(other, dtype=dtype)
self._data = temp._data
@property
@_convert
@schemed(BACKEND_PREFIX)
def ptr(self):
""" Returns a pointer to the memory of this array """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def nbytes(self):
return len(self.data) * self.itemsize
@property
@cpuonly
@_convert
def _swighelper(self):
""" Used internally by SWIG typemaps to ensure @_convert
is called and scheme is correct
"""
return self;
@_convert
@schemed(BACKEND_PREFIX)
def numpy(self):
""" Returns a Numpy Array that contains this data """
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_convert
def lal(self):
""" Returns a LAL Object that contains this data """
lal_data = None
if self._data.dtype == float32:
lal_data = _lal.CreateREAL4Vector(len(self))
elif self._data.dtype == float64:
lal_data = _lal.CreateREAL8Vector(len(self))
elif self._data.dtype == complex64:
lal_data = _lal.CreateCOMPLEX8Vector(len(self))
elif self._data.dtype == complex128:
lal_data = _lal.CreateCOMPLEX16Vector(len(self))
lal_data.data[:] = self.numpy()
return lal_data
@property
def dtype(self):
return self._data.dtype
def save(self, path, group=None):
"""
Save array to a Numpy .npy, hdf, or text file. When saving a complex array as
text, the real and imaginary parts are saved as the first and second
column respectively. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
_numpy.save(path, self.numpy())
elif ext == '.txt':
if self.kind == 'real':
_numpy.savetxt(path, self.numpy())
elif self.kind == 'complex':
output = _numpy.vstack((self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext == '.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'a') as f:
f.create_dataset(key, data=self.numpy(), compression='gzip',
compression_opts=9, shuffle=True)
else:
raise ValueError('Path must end with .npy, .txt, or .hdf')
@_convert
def trim_zeros(self):
"""Remove the leading and trailing zeros.
"""
tmp = self.numpy()
f = len(self)-len(_numpy.trim_zeros(tmp, trim='f'))
b = len(self)-len(_numpy.trim_zeros(tmp, trim='b'))
return self[f:len(self)-b]
@_returntype
@_convert
def view(self, dtype):
"""
Return a 'view' of the array with its bytes now interpreted according
to 'dtype'. The location in memory is unchanged and changing elements
in a view of an array will also change the original array.
Parameters
----------
dtype : numpy dtype (one of float32, float64, complex64 or complex128)
The new dtype that should be used to interpret the bytes of self
"""
return self._data.view(dtype)
def copy(self):
""" Return copy of this array """
return self._return(self.data.copy())
def __lt__(self, other):
return self.numpy().__lt__(other)
def __le__(self, other):
return self.numpy().__le__(other)
def __ne__(self, other):
return self.numpy().__ne__(other)
def __gt__(self, other):
return self.numpy().__gt__(other)
def __ge__(self, other):
return self.numpy().__ge__(other)
# Convenience functions for determining dtypes
def real_same_precision_as(data):
if data.precision == 'single':
return float32
elif data.precision == 'double':
return float64
def complex_same_precision_as(data):
if data.precision == 'single':
return complex64
elif data.precision == 'double':
return complex128
def _return_array(func):
@wraps(func)
def return_array(*args, **kwds):
return Array(func(*args, **kwds), copy=False)
return return_array
@_return_array
@schemed(BACKEND_PREFIX)
def zeros(length, dtype=float64):
""" Return an Array filled with zeros.
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
@_return_array
@schemed(BACKEND_PREFIX)
def empty(length, dtype=float64):
""" Return an empty Array (no initialization)
"""
err_msg = "This function is a stub that should be overridden using "
err_msg += "the scheme. You shouldn't be seeing this error!"
raise ValueError(err_msg)
def load_array(path, group=None):
"""Load an Array from an HDF5, ASCII or Numpy file. The file type is
inferred from the file extension, which must be `.hdf`, `.txt` or `.npy`.
For ASCII and Numpy files with a single column, a real array is returned.
For files with two columns, the columns are assumed to contain the real
and imaginary parts of a complex array respectively.
The default data types will be double precision floating point.
Parameters
----------
path : string
Input file path. Must end with either `.npy`, `.txt` or `.hdf`.
group: string
Additional name for internal storage use. When reading HDF files, this
is the path to the HDF dataset to read.
Raises
------
ValueError
If path does not end with a supported extension. For Numpy and ASCII
input files, this is also raised if the array does not have 1 or 2
dimensions.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'r') as f:
array = Array(f[key])
return array
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
if data.ndim == 1:
return Array(data)
elif data.ndim == 2:
return Array(data[:,0] + 1j*data[:,1])
raise ValueError('File has %s dimensions, cannot convert to Array, \
must be 1 (real) or 2 (complex)' % data.ndim)
| 39,845
| 33.588542
| 108
|
py
|
pycbc
|
pycbc-master/pycbc/types/config.py
|
# Copyright (C) 2013,2017,2021 Ian Harry, Duncan Brown, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides a wrapper to the ConfigParser utilities for pycbc.
This module is described in the page here:
"""
import re
import itertools
import logging
from io import StringIO
import configparser as ConfigParser
class DeepCopyableConfigParser(ConfigParser.ConfigParser):
"""
The standard SafeConfigParser no longer supports deepcopy() as of python
2.7 (see http://bugs.python.org/issue16058). This subclass restores that
functionality.
"""
def __deepcopy__(self, memo):
# http://stackoverflow.com/questions/23416370
# /manually-building-a-deep-copy-of-a-configparser-in-python-2-7
config_string = StringIO()
self.write(config_string)
config_string.seek(0)
new_config = self.__class__()
new_config.readfp(config_string)
return new_config
class InterpolatingConfigParser(DeepCopyableConfigParser):
"""
This is a sub-class of DeepCopyableConfigParser, which lets
us add a few additional helper features that are useful in workflows.
"""
def __init__(
self,
configFiles=None,
overrideTuples=None,
parsedFilePath=None,
deleteTuples=None,
skip_extended=False,
sanitize_newline=True,
):
"""
Initialize an InterpolatingConfigParser. This reads the input configuration
files, overrides values if necessary and performs the interpolation.
Parameters
-----------
configFiles : Path to .ini file, or list of paths
The file(s) to be read in and parsed.
overrideTuples : List of (section, option, value) tuples
Add the (section, option, value) triplets provided
in this list to the provided .ini file(s). If the section, option
pair is already present, it will be overwritten.
parsedFilePath : Path, optional (default=None)
If given, write the parsed .ini file back to disk at this location.
deleteTuples : List of (section, option) tuples
Delete the (section, option) pairs provided
in this list from provided .ini file(s). If the section only
is provided, the entire section will be deleted.
Returns
--------
InterpolatingConfigParser
Initialized InterpolatingConfigParser instance.
"""
if configFiles is None:
configFiles = []
if overrideTuples is None:
overrideTuples = []
if deleteTuples is None:
deleteTuples = []
DeepCopyableConfigParser.__init__(self)
# Enable case sensitive options
self.optionxform = str
self.read_ini_file(configFiles)
# Split sections like [inspiral&tmplt] into [inspiral] and [tmplt]
self.split_multi_sections()
# Populate shared options from the [sharedoptions] section
self.populate_shared_sections()
# Do deletes from command line
for delete in deleteTuples:
if len(delete) == 1:
if self.remove_section(delete[0]) is False:
raise ValueError(
"Cannot delete section %s, "
"no such section in configuration." % delete
)
logging.info(
"Deleting section %s from configuration", delete[0]
)
elif len(delete) == 2:
if self.remove_option(delete[0], delete[1]) is False:
raise ValueError(
"Cannot delete option %s from section %s,"
" no such option in configuration." % delete
)
logging.info(
"Deleting option %s from section %s in " "configuration",
delete[1],
delete[0],
)
else:
raise ValueError(
"Deletes must be tuples of length 1 or 2. "
"Got %s." % str(delete)
)
# Do overrides from command line
for override in overrideTuples:
if len(override) not in [2, 3]:
errmsg = "Overrides must be tuples of length 2 or 3."
errmsg = "Got %s." % (str(override))
raise ValueError(errmsg)
section = override[0]
option = override[1]
value = ""
if len(override) == 3:
value = override[2]
# Check for section existence, create if needed
if not self.has_section(section):
self.add_section(section)
self.set(section, option, value)
logging.info(
"Overriding section %s option %s with value %s "
"in configuration.",
section,
option,
value,
)
# Check for any substitutions that can be made
if not skip_extended:
self.perform_extended_interpolation()
# replace newlines in input with spaces
# this enables command line conversion compatibility
if sanitize_newline:
self.sanitize_newline()
# Check for duplicate options in sub-sections
self.sanity_check_subsections()
# Dump parsed .ini file if needed
if parsedFilePath:
fp = open(parsedFilePath, "w")
self.write(fp)
fp.close()
@classmethod
def from_cli(cls, opts):
"""Initialize the config parser using options parsed from the command
line.
The parsed options ``opts`` must include options provided by
:py:func:`add_workflow_command_line_group`.
Parameters
-----------
opts : argparse.ArgumentParser
The command line arguments parsed by argparse
"""
# read configuration file
logging.info("Reading configuration file")
if opts.config_overrides is not None:
overrides = [
tuple(override.split(":", 2))
for override in opts.config_overrides
]
else:
overrides = None
if opts.config_delete is not None:
deletes = [
tuple(delete.split(":")) for delete in opts.config_delete
]
else:
deletes = None
return cls(opts.config_files, overrides, deleteTuples=deletes)
def read_ini_file(self, fpath):
"""
Read a .ini file and return it as a ConfigParser class.
This function does none of the parsing/combining of sections. It simply
reads the file and returns it unedited
Stub awaiting more functionality - see configparser_test.py
Parameters
----------
fpath : Path to .ini file, or list of paths
The path(s) to a .ini file to be read in
Returns
-------
cp : ConfigParser
The ConfigParser class containing the read in .ini file
"""
# Read the file
self.read(fpath)
def get_subsections(self, section_name):
"""Return a list of subsections for the given section name"""
# Keep only subsection names
subsections = [
sec[len(section_name) + 1:]
for sec in self.sections()
if sec.startswith(section_name + "-")
and not sec.endswith('defaultvalues')
]
for sec in subsections:
sp = sec.split("-")
# The format [section-subsection-tag] is okay. Just
# check that [section-subsection] section exists. If not it is possible
# the user is trying to use an subsection name with '-' in it
if (len(sp) > 1) and not self.has_section(
"%s-%s" % (section_name, sp[0])
):
raise ValueError(
"Workflow uses the '-' as a delimiter so "
"this is interpreted as section-subsection-tag. "
"While checking section %s, no section with "
"name %s-%s was found. "
"If you did not intend to use tags in an "
"'advanced user' manner, or do not understand what "
"this means, don't use dashes in section "
"names. So [injection-nsbhinj] is good. "
"[injection-nsbh-inj] is not." % (sec, sp[0], sp[1])
)
if len(subsections) > 0:
return [sec.split("-")[0] for sec in subsections]
elif self.has_section(section_name):
return [""]
else:
return []
def perform_extended_interpolation(self):
"""
Filter through an ini file and replace all examples of
ExtendedInterpolation formatting with the exact value. For values like
${example} this is replaced with the value that corresponds to the
option called example ***in the same section***
For values like ${common|example} this is replaced with the value that
corresponds to the option example in the section [common]. Note that
in the python3 config parser this is ${common:example} but python2.7
interprets the : the same as a = and this breaks things
Nested interpolation is not supported here.
"""
# Do not allow any interpolation of the section names
for section in self.sections():
for option, value in self.items(section):
# Check the option name
new_str = self.interpolate_string(option, section)
if new_str != option:
self.set(section, new_str, value)
self.remove_option(section, option)
# Check the value
new_str = self.interpolate_string(value, section)
if new_str != value:
self.set(section, option, new_str)
def sanitize_newline(self):
"""
Filter through an ini file and replace all examples of
newlines with spaces. This is useful for command line conversion
and allow multiline configparser inputs without added backslashes
"""
# Do not allow any interpolation of the section names
for section in self.sections():
for option, value in self.items(section):
new_value = value.replace('\n', ' ').replace('\r', ' ')
self.set(section, option, new_value)
def interpolate_string(self, test_string, section):
"""
Take a string and replace all example of ExtendedInterpolation
formatting within the string with the exact value.
For values like ${example} this is replaced with the value that
corresponds to the option called example ***in the same section***
For values like ${common|example} this is replaced with the value that
corresponds to the option example in the section [common]. Note that
in the python3 config parser this is ${common:example} but python2.7
interprets the : the same as a = and this breaks things
Nested interpolation is not supported here.
Parameters
----------
test_string : String
The string to parse and interpolate
section : String
The current section of the ConfigParser object
Returns
----------
test_string : String
Interpolated string
"""
# First check if any interpolation is needed and abort if not
re_obj = re.search(r"\$\{.*?\}", test_string)
while re_obj:
# Not really sure how this works, but this will obtain the first
# instance of a string contained within ${....}
rep_string = (re_obj).group(0)[2:-1]
# Need to test which of the two formats we have
split_string = rep_string.split("|")
if len(split_string) == 1:
try:
test_string = test_string.replace(
"${" + rep_string + "}",
self.get(section, split_string[0]),
)
except ConfigParser.NoOptionError:
print("Substitution failed")
raise
if len(split_string) == 2:
try:
test_string = test_string.replace(
"${" + rep_string + "}",
self.get(split_string[0], split_string[1]),
)
except ConfigParser.NoOptionError:
print("Substitution failed")
raise
re_obj = re.search(r"\$\{.*?\}", test_string)
return test_string
def split_multi_sections(self):
"""
Parse through the WorkflowConfigParser instance and splits any sections
labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into
[inspiral] and [tmpltbank] sections. If these individual sections
already exist they will be appended to. If an option exists in both the
[inspiral] and [inspiral&tmpltbank] sections an error will be thrown
"""
# Begin by looping over all sections
for section in self.sections():
# Only continue if section needs splitting
if "&" not in section:
continue
# Get list of section names to add these options to
split_sections = section.split("&")
for new_sec in split_sections:
# Add sections if they don't already exist
if not self.has_section(new_sec):
self.add_section(new_sec)
self.add_options_to_section(new_sec, self.items(section))
self.remove_section(section)
def populate_shared_sections(self):
"""Parse the [sharedoptions] section of the ini file.
That section should contain entries according to:
* massparams = inspiral, tmpltbank
* dataparams = tmpltbank
This will result in all options in [sharedoptions-massparams] being
copied into the [inspiral] and [tmpltbank] sections and the options
in [sharedoptions-dataparams] being copited into [tmpltbank].
In the case of duplicates an error will be raised.
"""
if not self.has_section("sharedoptions"):
# No sharedoptions, exit
return
for key, value in self.items("sharedoptions"):
assert self.has_section("sharedoptions-%s" % (key))
# Comma separated
values = value.split(",")
common_options = self.items("sharedoptions-%s" % (key))
for section in values:
if not self.has_section(section):
self.add_section(section)
for arg, val in common_options:
if arg in self.options(section):
raise ValueError(
"Option exists in both original "
+ "ConfigParser section [%s] and " % (section,)
+ "sharedoptions section: %s %s"
% (arg, "sharedoptions-%s" % (key))
)
self.set(section, arg, val)
self.remove_section("sharedoptions-%s" % (key))
self.remove_section("sharedoptions")
def add_options_to_section(self, section, items, overwrite_options=False):
"""
Add a set of options and values to a section of a ConfigParser object.
Will throw an error if any of the options being added already exist,
this behaviour can be overridden if desired
Parameters
----------
section : string
The name of the section to add options+values to
items : list of tuples
Each tuple contains (at [0]) the option and (at [1]) the value to
add to the section of the ini file
overwrite_options : Boolean, optional
By default this function will throw a ValueError if an option exists
in both the original section in the ConfigParser *and* in the
provided items.
This will override so that the options+values given in items
will replace the original values if the value is set to True.
Default = False
"""
# Sanity checking
if not self.has_section(section):
raise ValueError(
"Section %s not present in ConfigParser." % (section,)
)
# Check for duplicate options first
for option, value in items:
if not overwrite_options:
if option in self.options(section):
raise ValueError(
"Option exists in both original "
+ "ConfigParser section [%s] and " % (section,)
+ "input list: %s" % (option,)
)
self.set(section, option, value)
def sanity_check_subsections(self):
"""
This function goes through the ConfigParser and checks that any options
given in the [SECTION_NAME] section are not also given in any
[SECTION_NAME-SUBSECTION] sections.
"""
# Loop over the sections in the ini file
for section in self.sections():
# [pegasus_profile] is specially allowed to be overriden by
# sub-sections
if section == "pegasus_profile":
continue
if section.endswith('-defaultvalues') and \
not len(section.split('-')) == 2:
# Only allow defaultvalues for top-level sections
raise NotImplementedError(
"-defaultvalues subsections are only allowed for "
"top-level sections; given %s" % section
)
# Loop over the sections again
for section2 in self.sections():
# Check if any are subsections of section
if section2.startswith(section + "-"):
if section2.endswith("defaultvalues"):
# defaultvalues is storage for defaults, and will
# be over-written by anything in the sections-proper
continue
# Check for duplicate options whenever this exists
self.check_duplicate_options(
section, section2, raise_error=True
)
def check_duplicate_options(self, section1, section2, raise_error=False):
"""
Check for duplicate options in two sections, section1 and section2.
Will return a list of the duplicate options.
Parameters
----------
section1 : string
The name of the first section to compare
section2 : string
The name of the second section to compare
raise_error : Boolean, optional (default=False)
If True, raise an error if duplicates are present.
Returns
----------
duplicates : List
List of duplicate options
"""
# Sanity checking
if not self.has_section(section1):
raise ValueError(
"Section %s not present in ConfigParser." % (section1,)
)
if not self.has_section(section2):
raise ValueError(
"Section %s not present in ConfigParser." % (section2,)
)
# Are section1 and section2 a section-and-defaultvalues pair?
section_and_default = (section1 == f"{section2}-defaultvalues" or
section2 == f"{section1}-defaultvalues")
# Is one the sections defaultvalues, but the other is not the
# top-level section? This is to catch the case where we are
# comparing section-defaultvalues with section-subsection
if section1.endswith("-defaultvalues") or \
section2.endswith("-defaultvalues"):
if not section_and_default:
# Override the raise_error variable not to error when
# defaultvalues are given and the sections are not
# otherwise the same
raise_error = False
items1 = self.options(section1)
items2 = self.options(section2)
# The list comprehension here creates a list of all duplicate items
duplicates = [x for x in items1 if x in items2]
if duplicates and raise_error:
err_msg = ("The following options appear in both section "
f"{section1} and {section2}: " + ", ".join(duplicates))
if section_and_default:
err_msg += ". Default values are unused in this case."
raise ValueError(err_msg)
return duplicates
def get_opt_tag(self, section, option, tag):
"""
Convenience function accessing get_opt_tags() for a single tag: see
documentation for that function.
NB calling get_opt_tags() directly is preferred for simplicity.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tag : string
The name of the subsection to look in, if not found in [section]
Returns
--------
string
The value of the options being searched for
"""
return self.get_opt_tags(section, option, [tag])
def get_opt_tags(self, section, option, tags):
"""
Supplement to ConfigParser.ConfigParser.get(). This will search for an
option in [section] and if it doesn't find it will also try in
[section-defaultvalues], and [section-tag] for every value of tag
in tags. [section-tag] will be preferred to [section-defaultvalues]
values. Will raise a ConfigParser.Error if it cannot find a value.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tags : list of strings
The name of subsections to look in, if not found in [section]
Returns
--------
string
The value of the options being searched for
"""
# Need lower case tag name; also exclude cases with tag=None
if tags:
tags = [tag.lower() for tag in tags if tag is not None]
try:
return self.get(section, option)
except ConfigParser.Error:
err_string = "No option '%s' in section [%s] " % (option, section)
if not tags:
raise ConfigParser.Error(err_string + ".")
return_vals = []
# First, check if there are any default values set:
has_defaultvalue = False
if self.has_section(f"{section}-defaultvalues"):
return_vals.append(
self.get(f"{section}-defaultvalues", option)
)
has_defaultvalue = True
sub_section_list = []
for sec_len in range(1, len(tags) + 1):
for tag_permutation in itertools.permutations(tags, sec_len):
joined_name = "-".join(tag_permutation)
sub_section_list.append(joined_name)
section_list = ["%s-%s" % (section, sb) for sb in sub_section_list]
err_section_list = []
for sub in sub_section_list:
if self.has_section("%s-%s" % (section, sub)):
if self.has_option("%s-%s" % (section, sub), option):
err_section_list.append("%s-%s" % (section, sub))
return_vals.append(
self.get("%s-%s" % (section, sub), option)
)
if has_defaultvalue and len(return_vals) > 1:
# option supplied which should overwrite the default;
# default will be first in the list, so remove it
return_vals = return_vals[1:]
# We also want to recursively go into sections
if not return_vals:
err_string += "or in sections [%s]." % (
"] [".join(section_list)
)
raise ConfigParser.Error(err_string)
if len(return_vals) > 1:
err_string += (
"and multiple entries found in sections [%s]."
% ("] [".join(err_section_list))
)
raise ConfigParser.Error(err_string)
return return_vals[0]
def has_option_tag(self, section, option, tag):
"""
Convenience function accessing has_option_tags() for a single tag: see
documentation for that function.
NB calling has_option_tags() directly is preferred for simplicity.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tag : string
The name of the subsection to look in, if not found in [section]
Returns
--------
Boolean
Is the option in the section or [section-tag]
"""
return self.has_option_tags(section, option, [tag])
def has_option_tags(self, section, option, tags):
"""
Supplement to ConfigParser.ConfigParser.has_option().
This will search for an option in [section] and if it doesn't find it
will also try in [section-tag] for each value in tags.
Returns True if the option is found and false if not.
Parameters
-----------
self : ConfigParser object
The ConfigParser object (automatically passed when this is appended
to the ConfigParser class)
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tags : list of strings
The names of the subsection to look in, if not found in [section]
Returns
--------
Boolean
Is the option in the section or [section-tag] (for tag in tags)
"""
try:
self.get_opt_tags(section, option, tags)
return True
except ConfigParser.Error:
return False
| 28,298
| 38.523743
| 84
|
py
|
pycbc
|
pycbc-master/pycbc/types/timeseries.py
|
# Copyright (C) 2014 Tito Dal Canton, Josh Willis, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Provides a class representing a time series.
"""
import os as _os, h5py
from pycbc.types.array import Array, _convert, complex_same_precision_as, zeros
from pycbc.types.array import _nocomplex
from pycbc.types.frequencyseries import FrequencySeries
import lal as _lal
import numpy as _numpy
from scipy.io.wavfile import write as write_wav
class TimeSeries(Array):
"""Models a time series consisting of uniformly sampled scalar values.
Parameters
----------
initial_array : array-like
Array containing sampled data.
delta_t : float
Time between consecutive samples in seconds.
epoch : {None, lal.LIGOTimeGPS}, optional
Time of the first sample in seconds.
dtype : {None, data-type}, optional
Sample data type.
copy : boolean, optional
If True, samples are copied to a new array.
"""
def __init__(self, initial_array, delta_t=None,
epoch=None, dtype=None, copy=True):
if len(initial_array) < 1:
raise ValueError('initial_array must contain at least one sample.')
if delta_t is None:
try:
delta_t = initial_array.delta_t
except AttributeError:
raise TypeError('must provide either an initial_array with a delta_t attribute, or a value for delta_t')
if not delta_t > 0:
raise ValueError('delta_t must be a positive number')
# Get epoch from initial_array if epoch not given (or is None)
# If initialy array has no epoch, set epoch to 0.
# If epoch is provided, use that.
if not isinstance(epoch, _lal.LIGOTimeGPS):
if epoch is None:
if isinstance(initial_array, TimeSeries):
epoch = initial_array._epoch
else:
epoch = _lal.LIGOTimeGPS(0)
elif epoch is not None:
try:
epoch = _lal.LIGOTimeGPS(epoch)
except:
raise TypeError('epoch must be either None or a lal.LIGOTimeGPS')
Array.__init__(self, initial_array, dtype=dtype, copy=copy)
self._delta_t = delta_t
self._epoch = epoch
def to_astropy(self, name='pycbc'):
""" Return an astropy.timeseries.TimeSeries instance
"""
from astropy.timeseries import TimeSeries as ATimeSeries
from astropy.time import Time
from astropy.units import s
start = Time(float(self.start_time), format='gps', scale='utc')
delta = self.delta_t * s
return ATimeSeries({name: self.numpy()},
time_start=start,
time_delta=delta,
n_samples=len(self))
def epoch_close(self, other):
""" Check if the epoch is close enough to allow operations """
dt = abs(float(self.start_time - other.start_time))
return dt <= 1e-7
def sample_rate_close(self, other):
""" Check if the sample rate is close enough to allow operations """
# compare our delta_t either to a another time series' or
# to a given sample rate (float)
if isinstance(other, TimeSeries):
odelta_t = other.delta_t
else:
odelta_t = 1.0/other
if (odelta_t - self.delta_t) / self.delta_t > 1e-4:
return False
if abs(1 - odelta_t / self.delta_t) * len(self) > 0.5:
return False
return True
def _return(self, ary):
return TimeSeries(ary, self._delta_t, epoch=self._epoch, copy=False)
def _typecheck(self, other):
if isinstance(other, TimeSeries):
if not self.sample_rate_close(other):
raise ValueError('different delta_t, {} vs {}'.format(
self.delta_t, other.delta_t))
if not self.epoch_close(other):
raise ValueError('different epoch, {} vs {}'.format(
self.start_time, other.start_time))
def _getslice(self, index):
# Set the new epoch---note that index.start may also be None
if index.start is None:
new_epoch = self._epoch
else:
if index.start < 0:
raise ValueError(('Negative start index ({})'
' not supported').format(index.start))
new_epoch = self._epoch + index.start * self._delta_t
if index.step is not None:
new_delta_t = self._delta_t * index.step
else:
new_delta_t = self._delta_t
return TimeSeries(Array._getslice(self, index), new_delta_t,
new_epoch, copy=False)
def prepend_zeros(self, num):
"""Prepend num zeros onto the beginning of this TimeSeries. Update also
epoch to include this prepending.
"""
self.resize(len(self) + num)
self.roll(num)
self._epoch = self._epoch - num * self._delta_t
def append_zeros(self, num):
"""Append num zeros onto the end of this TimeSeries.
"""
self.resize(len(self) + num)
def get_delta_t(self):
"""Return time between consecutive samples in seconds.
"""
return self._delta_t
delta_t = property(get_delta_t,
doc="Time between consecutive samples in seconds.")
def get_duration(self):
"""Return duration of time series in seconds.
"""
return len(self) * self._delta_t
duration = property(get_duration,
doc="Duration of time series in seconds.")
def get_sample_rate(self):
"""Return the sample rate of the time series.
"""
return 1.0/self.delta_t
sample_rate = property(get_sample_rate,
doc="The sample rate of the time series.")
def time_slice(self, start, end, mode='floor'):
"""Return the slice of the time series that contains the time range
in GPS seconds.
"""
if start < self.start_time:
raise ValueError('Time series does not contain a time as early as %s' % start)
if end > self.end_time:
raise ValueError('Time series does not contain a time as late as %s' % end)
start_idx = float(start - self.start_time) * self.sample_rate
end_idx = float(end - self.start_time) * self.sample_rate
if _numpy.isclose(start_idx, round(start_idx)):
start_idx = round(start_idx)
if _numpy.isclose(end_idx, round(end_idx)):
end_idx = round(end_idx)
if mode == 'floor':
start_idx = int(start_idx)
end_idx = int(end_idx)
elif mode == 'nearest':
start_idx = int(round(start_idx))
end_idx = int(round(end_idx))
else:
raise ValueError("Invalid mode: {}".format(mode))
return self[start_idx:end_idx]
@property
def delta_f(self):
"""Return the delta_f this ts would have in the frequency domain
"""
return 1.0 / self.duration
@property
def start_time(self):
"""Return time series start time as a LIGOTimeGPS.
"""
return self._epoch
@start_time.setter
def start_time(self, time):
""" Set the start time
"""
self._epoch = _lal.LIGOTimeGPS(time)
def get_end_time(self):
"""Return time series end time as a LIGOTimeGPS.
"""
return self._epoch + self.get_duration()
end_time = property(get_end_time,
doc="Time series end time as a LIGOTimeGPS.")
def get_sample_times(self):
"""Return an Array containing the sample times.
"""
if self._epoch is None:
return Array(range(len(self))) * self._delta_t
else:
return Array(range(len(self))) * self._delta_t + float(self._epoch)
sample_times = property(get_sample_times,
doc="Array containing the sample times.")
def at_time(self, time, nearest_sample=False,
interpolate=None, extrapolate=None):
""" Return the value at the specified gps time
Parameters
----------
nearest_sample: bool
Return the sample at the time nearest to the chosen time rather
than rounded down.
interpolate: str, None
Return the interpolated value of the time series. Choices
are simple linear or quadratic interpolation.
extrapolate: str or float, None
Value to return if time is outsidde the range of the vector or
method of extrapolating the value.
"""
if nearest_sample:
time = time + self.delta_t / 2.0
vtime = _numpy.array(time, ndmin=1)
fill_value = None
keep_idx = None
size = len(vtime)
if extrapolate is not None:
if _numpy.isscalar(extrapolate) and _numpy.isreal(extrapolate):
fill_value = extrapolate
facl = facr = 0
if interpolate == 'quadratic':
facl = facr = 1.1
elif interpolate == 'linear':
facl, facr = 0.1, 1.1
left = (vtime >= self.start_time + self.delta_t * facl)
right = (vtime < self.end_time - self.delta_t * facr)
keep_idx = _numpy.where(left & right)[0]
vtime = vtime[keep_idx]
else:
raise ValueError("Unsuported extrapolate: %s" % extrapolate)
fi = (vtime - float(self.start_time))*self.sample_rate
i = _numpy.asarray(_numpy.floor(fi)).astype(int)
di = fi - i
if interpolate == 'linear':
a = self[i]
b = self[i+1]
ans = a + (b - a) * di
elif interpolate == 'quadratic':
c = self.data[i]
xr = self.data[i + 1] - c
xl = self.data[i - 1] - c
a = 0.5 * (xr + xl)
b = 0.5 * (xr - xl)
ans = a * di**2.0 + b * di + c
else:
ans = self[i]
ans = _numpy.array(ans, ndmin=1)
if fill_value is not None:
old = ans
ans = _numpy.zeros(size) + fill_value
ans[keep_idx] = old
ans = _numpy.array(ans, ndmin=1)
if _numpy.isscalar(time):
return ans[0]
else:
return ans
at_times = at_time
def __eq__(self,other):
"""
This is the Python special method invoked whenever the '=='
comparison is used. It will return true if the data of two
time series are identical, and all of the numeric meta-data
are identical, irrespective of whether or not the two
instances live in the same memory (for that comparison, the
Python statement 'a is b' should be used instead).
Thus, this method returns 'True' if the types of both 'self'
and 'other' are identical, as well as their lengths, dtypes,
epochs, delta_ts and the data in the arrays, element by element.
It will always do the comparison on the CPU, but will *not* move
either object to the CPU if it is not already there, nor change
the scheme of either object. It is possible to compare a CPU
object to a GPU object, and the comparison should be true if the
data and meta-data of the two objects are the same.
Note in particular that this function returns a single boolean,
and not an array of booleans as Numpy does. If the numpy
behavior is instead desired it can be obtained using the numpy()
method of the PyCBC type to get a numpy instance from each
object, and invoking '==' on those two instances.
Parameters
----------
other: another Python object, that should be tested for equality
with 'self'.
Returns
-------
boolean: 'True' if the types, dtypes, lengths, epochs, delta_ts
and data of the two objects are each identical.
"""
if super(TimeSeries,self).__eq__(other):
return (self._epoch == other._epoch and self._delta_t == other._delta_t)
else:
return False
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the series.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the series.
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_elem(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False
def almost_equal_norm(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two time series are almost equal, normwise.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(norm(self-other)) <= tol*abs(norm(self)).
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(norm(self-other)) <= tol
The method also checks that self.delta_t is within 'dtol' of
other.delta_t; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', based on their norms.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_t. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_t values of the two TimeSeries.
Returns
-------
boolean: 'True' if the data and delta_ts agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_t tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_t cannot be negative")
if super(TimeSeries,self).almost_equal_norm(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol*self._delta_t)
else:
return (self._epoch == other._epoch and
abs(self._delta_t-other._delta_t) <= dtol)
else:
return False
@_convert
def lal(self):
"""Produces a LAL time series object equivalent to self.
Returns
-------
lal_data : {lal.*TimeSeries}
LAL time series object containing the same data as self.
The actual type depends on the sample's dtype. If the epoch of
self is 'None', the epoch of the returned LAL object will be
LIGOTimeGPS(0,0); otherwise, the same as that of self.
Raises
------
TypeError
If time series is stored in GPU memory.
"""
lal_data = None
ep = self._epoch
if self._data.dtype == _numpy.float32:
lal_data = _lal.CreateREAL4TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.float64:
lal_data = _lal.CreateREAL8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex64:
lal_data = _lal.CreateCOMPLEX8TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex128:
lal_data = _lal.CreateCOMPLEX16TimeSeries("",ep,0,self.delta_t,_lal.SecondUnit,len(self))
lal_data.data.data[:] = self.numpy()
return lal_data
def crop(self, left, right):
""" Remove given seconds from either end of time series
Parameters
----------
left : float
Number of seconds of data to remove from the left of the time series.
right : float
Number of seconds of data to remove from the right of the time series.
Returns
-------
cropped : pycbc.types.TimeSeries
The reduced time series
"""
if left + right > self.duration:
raise ValueError('Cannot crop more data than we have')
s = int(left * self.sample_rate)
e = len(self) - int(right * self.sample_rate)
return self[s:e]
def save_to_wav(self, file_name):
""" Save this time series to a wav format audio file.
Parameters
----------
file_name : string
The output file name
"""
scaled = _numpy.int16(self.numpy()/max(abs(self)) * 32767)
write_wav(file_name, int(self.sample_rate), scaled)
def psd(self, segment_duration, **kwds):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
For more complete options, please see that function.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import welch
seg_len = int(round(segment_duration * self.sample_rate))
seg_stride = int(seg_len / 2)
return welch(self, seg_len=seg_len,
seg_stride=seg_stride,
**kwds)
def gate(self, time, window=0.25, method='taper', copy=True,
taper_width=0.25, invpsd=None):
""" Gate out portion of time series
Parameters
----------
time: float
Central time of the gate in seconds
window: float
Half-length in seconds to remove data around gate time.
method: str
Method to apply gate, options are 'hard', 'taper', and 'paint'.
copy: bool
If False, do operations inplace to this time series, else return
new time series.
taper_width: float
Length of tapering region on either side of excized data. Only
applies to the taper gating method.
invpsd: pycbc.types.FrequencySeries
The inverse PSD to use for painting method. If not given,
a PSD is generated using default settings.
Returns
-------
data: pycbc.types.TimeSeris
Gated time series
"""
data = self.copy() if copy else self
if method == 'taper':
from pycbc.strain import gate_data
return gate_data(data, [(time, window, taper_width)])
elif method == 'paint':
# Uses the hole-filling method of
# https://arxiv.org/pdf/1908.05644.pdf
from pycbc.strain.gate import gate_and_paint
if invpsd is None:
# These are some bare minimum settings, normally you
# should probably provide a psd
invpsd = 1. / self.filter_psd(self.duration/32, self.delta_f, 0)
lindex = int((time - window - self.start_time) / self.delta_t)
rindex = lindex + int(2 * window / self.delta_t)
lindex = lindex if lindex >= 0 else 0
rindex = rindex if rindex <= len(self) else len(self)
return gate_and_paint(data, lindex, rindex, invpsd, copy=False)
elif method == 'hard':
tslice = data.time_slice(time - window, time + window)
tslice[:] = 0
return data
else:
raise ValueError('Invalid method name: {}'.format(method))
def filter_psd(self, segment_duration, delta_f, flow):
""" Calculate the power spectral density of this time series.
Use the `pycbc.psd.welch` method to estimate the psd of this time segment.
The psd is then truncated in the time domain to the segment duration
and interpolated to the requested sample frequency.
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
delta_f : float
Frequency spacing to return psd at.
flow : float
The low frequency cutoff to apply when truncating the inverse
spectrum.
Returns
-------
psd : FrequencySeries
Frequency series containing the estimated PSD.
"""
from pycbc.psd import interpolate, inverse_spectrum_truncation
p = self.psd(segment_duration)
samples = int(round(p.sample_rate * segment_duration))
p = interpolate(p, delta_f)
return inverse_spectrum_truncation(p, samples,
low_frequency_cutoff=flow,
trunc_method='hann')
def whiten(self, segment_duration, max_filter_duration, trunc_method='hann',
remove_corrupted=True, low_frequency_cutoff=None,
return_psd=False, **kwds):
""" Return a whitened time series
Parameters
----------
segment_duration: float
Duration in seconds to use for each sample of the spectrum.
max_filter_duration : int
Maximum length of the time-domain filter in seconds.
trunc_method : {None, 'hann'}
Function used for truncating the time-domain filter.
None produces a hard truncation at `max_filter_len`.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the whitening
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
low_frequency_cutoff : {None, float}
Low frequency cutoff to pass to the inverse spectrum truncation.
This should be matched to a known low frequency cutoff of the
data if there is one.
return_psd : {False, Boolean}
Return the estimated and conditioned PSD that was used to whiten
the data.
kwds : keywords
Additional keyword arguments are passed on to the `pycbc.psd.welch` method.
Returns
-------
whitened_data : TimeSeries
The whitened time series
"""
from pycbc.psd import inverse_spectrum_truncation, interpolate
# Estimate the noise spectrum
psd = self.psd(segment_duration, **kwds)
psd = interpolate(psd, self.delta_f)
max_filter_len = int(round(max_filter_duration * self.sample_rate))
# Interpolate and smooth to the desired corruption length
psd = inverse_spectrum_truncation(psd,
max_filter_len=max_filter_len,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method=trunc_method)
# Whiten the data by the asd
white = (self.to_frequencyseries() / psd**0.5).to_timeseries()
if remove_corrupted:
white = white[int(max_filter_len/2):int(len(self)-max_filter_len/2)]
if return_psd:
return white, psd
return white
def qtransform(self, delta_t=None, delta_f=None, logfsteps=None,
frange=None, qrange=(4,64), mismatch=0.2, return_complex=False):
""" Return the interpolated 2d qtransform of this data
Parameters
----------
delta_t : {self.delta_t, float}
The time resolution to interpolate to
delta_f : float, Optional
The frequency resolution to interpolate to
logfsteps : int
Do a log interpolation (incompatible with delta_f option) and set
the number of steps to take.
frange : {(30, nyquist*0.8), tuple of ints}
frequency range
qrange : {(4, 64), tuple}
q range
mismatch : float
Mismatch between frequency tiles
return_complex: {False, bool}
return the raw complex series instead of the normalized power.
Returns
-------
times : numpy.ndarray
The time that the qtransform is sampled.
freqs : numpy.ndarray
The frequencies that the qtransform is sampled.
qplane : numpy.ndarray (2d)
The two dimensional interpolated qtransform of this time series.
"""
from pycbc.filter.qtransform import qtiling, qplane
from scipy.interpolate import interp2d
if frange is None:
frange = (30, int(self.sample_rate / 2 * 8))
q_base = qtiling(self, qrange, frange, mismatch)
_, times, freqs, q_plane = qplane(q_base, self.to_frequencyseries(),
return_complex=return_complex)
if logfsteps and delta_f:
raise ValueError("Provide only one (or none) of delta_f and logfsteps")
# Interpolate if requested
if delta_f or delta_t or logfsteps:
if return_complex:
interp_amp = interp2d(times, freqs, abs(q_plane))
interp_phase = interp2d(times, freqs, _numpy.angle(q_plane))
else:
interp = interp2d(times, freqs, q_plane)
if delta_t:
times = _numpy.arange(float(self.start_time),
float(self.end_time), delta_t)
if delta_f:
freqs = _numpy.arange(int(frange[0]), int(frange[1]), delta_f)
if logfsteps:
freqs = _numpy.logspace(_numpy.log10(frange[0]),
_numpy.log10(frange[1]),
logfsteps)
if delta_f or delta_t or logfsteps:
if return_complex:
q_plane = _numpy.exp(1.0j * interp_phase(times, freqs))
q_plane *= interp_amp(times, freqs)
else:
q_plane = interp(times, freqs)
return times, freqs, q_plane
def notch_fir(self, f1, f2, order, beta=5.0, remove_corrupted=True):
""" notch filter the time series using an FIR filtered generated from
the ideal response passed through a time-domain kaiser
window (beta = 5.0)
The suppression of the notch filter is related to the bandwidth and
the number of samples in the filter length. For a few Hz bandwidth,
a length corresponding to a few seconds is typically
required to create significant suppression in the notched band.
Parameters
----------
Time Series: TimeSeries
The time series to be notched.
f1: float
The start of the frequency suppression.
f2: float
The end of the frequency suppression.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
"""
from pycbc.filter import notch_fir
ts = notch_fir(self, f1, f2, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def lowpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True):
""" Lowpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be low-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the filtering
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
"""
from pycbc.filter import lowpass_fir
ts = lowpass_fir(self, frequency, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def highpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True):
""" Highpass filter the time series using an FIR filtered generated from
the ideal response passed through a kaiser window (beta = 5.0)
Parameters
----------
Time Series: TimeSeries
The time series to be high-passed.
frequency: float
The frequency below which is suppressed.
order: int
Number of corrupted samples on each side of the time series
beta: float
Beta parameter of the kaiser window that sets the side lobe attenuation.
remove_corrupted : {True, boolean}
If True, the region of the time series corrupted by the filtering
is excised before returning. If false, the corrupted regions
are not excised and the full time series is returned.
"""
from pycbc.filter import highpass_fir
ts = highpass_fir(self, frequency, order, beta=beta)
if remove_corrupted:
ts = ts[order:len(ts)-order]
return ts
def fir_zero_filter(self, coeff):
"""Filter the timeseries with a set of FIR coefficients
Parameters
----------
coeff: numpy.ndarray
FIR coefficients. Should be and odd length and symmetric.
Returns
-------
filtered_series: pycbc.types.TimeSeries
Return the filtered timeseries, which has been properly shifted to account
for the FIR filter delay and the corrupted regions zeroed out.
"""
from pycbc.filter import fir_zero_filter
return self._return(fir_zero_filter(coeff, self))
def resample(self, delta_t):
""" Resample this time series to the new delta_t
Parameters
-----------
delta_t: float
The time step to resample the times series to.
Returns
-------
resampled_ts: pycbc.types.TimeSeries
The resample timeseries at the new time interval delta_t.
"""
from pycbc.filter import resample_to_delta_t
return resample_to_delta_t(self, delta_t)
def save(self, path, group = None):
"""
Save time series to a Numpy .npy, hdf, or text file. The first column
contains the sample times, the second contains the values.
In the case of a complex time series saved as text, the imaginary
part is written as a third column. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
output = _numpy.vstack((self.sample_times.numpy(), self.numpy())).T
_numpy.save(path, output)
elif ext == '.txt':
if self.kind == 'real':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy())).T
elif self.kind == 'complex':
output = _numpy.vstack((self.sample_times.numpy(),
self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext =='.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'a') as f:
ds = f.create_dataset(key, data=self.numpy(),
compression='gzip',
compression_opts=9, shuffle=True)
ds.attrs['start_time'] = float(self.start_time)
ds.attrs['delta_t'] = float(self.delta_t)
else:
raise ValueError('Path must end with .npy, .txt or .hdf')
def to_timeseries(self):
""" Return time series"""
return self
@_nocomplex
def to_frequencyseries(self, delta_f=None):
""" Return the Fourier transform of this time series
Parameters
----------
delta_f : {None, float}, optional
The frequency resolution of the returned frequency series. By
default the resolution is determined by the duration of the timeseries.
Returns
-------
FrequencySeries:
The fourier transform of this time series.
"""
from pycbc.fft import fft
if not delta_f:
delta_f = 1.0 / self.duration
# add 0.5 to round integer
tlen = int(1.0 / delta_f / self.delta_t + 0.5)
flen = int(tlen / 2 + 1)
if tlen < len(self):
raise ValueError("The value of delta_f (%s) would be "
"undersampled. Maximum delta_f "
"is %s." % (delta_f, 1.0 / self.duration))
if not delta_f:
tmp = self
else:
tmp = TimeSeries(zeros(tlen, dtype=self.dtype),
delta_t=self.delta_t, epoch=self.start_time)
tmp[:len(self)] = self[:]
f = FrequencySeries(zeros(flen,
dtype=complex_same_precision_as(self)),
delta_f=delta_f)
fft(tmp, f)
f._delta_f = delta_f
return f
def inject(self, other, copy=True):
"""Return copy of self with other injected into it.
The other vector will be resized and time shifted with sub-sample
precision before adding. This assumes that one can assume zeros
outside of the original vector range.
"""
# only handle equal sample rate for now.
if not self.sample_rate_close(other):
raise ValueError('Sample rate must be the same')
# determine if we want to inject in place or not
if copy:
ts = self.copy()
else:
ts = self
# Other is disjoint
if ((other.start_time >= ts.end_time) or
(ts.start_time > other.end_time)):
return ts
other = other.copy()
dt = float((other.start_time - ts.start_time) * ts.sample_rate)
# This coaligns other to the time stepping of self
if not dt.is_integer():
diff = (dt - _numpy.floor(dt)) * ts.delta_t
# insert zeros at end
other.resize(len(other) + (len(other) + 1) % 2 + 1)
# fd shift to the right
other = other.cyclic_time_shift(diff)
# get indices of other with respect to self
# this is already an integer to floating point precission
left = float(other.start_time - ts.start_time) * ts.sample_rate
left = int(round(left))
right = left + len(other)
oleft = 0
oright = len(other)
# other overhangs on left so truncate
if left < 0:
oleft = -left
left = 0
# other overhangs on right so truncate
if right > len(ts):
oright = len(other) - (right - len(ts))
right = len(ts)
ts[left:right] += other[oleft:oright]
return ts
add_into = inject # maintain backwards compatibility for now
@_nocomplex
def cyclic_time_shift(self, dt):
"""Shift the data and timestamps by a given number of seconds
Shift the data and timestamps in the time domain a given number of
seconds. To just change the time stamps, do ts.start_time += dt.
The time shift may be smaller than the intrinsic sample rate of the data.
Note that data will be cyclically rotated, so if you shift by 2
seconds, the final 2 seconds of your data will now be at the
beginning of the data set.
Parameters
----------
dt : float
Amount of time to shift the vector.
Returns
-------
data : pycbc.types.TimeSeries
The time shifted time series.
"""
# We do this in the frequency domain to allow us to do sub-sample
# time shifts. This also results in the shift being circular. It
# is left to a future update to do a faster impelementation in the case
# where the time shift can be done with an exact number of samples.
return self.to_frequencyseries().cyclic_time_shift(dt).to_timeseries()
def match(self, other, psd=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
""" Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivalent to the overlap
maximized over time and phase. By default, the other vector will be
resized to match self. This may remove high frequency content or the
end of the vector.
Parameters
----------
other : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
"""
return self.to_frequencyseries().match(other, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
def detrend(self, type='linear'):
""" Remove linear trend from the data
Remove a linear trend from the data to improve the approximation that
the data is circularly convolved, this helps reduce the size of filter
transients from a circular convolution / filter.
Parameters
----------
type: str
The choice of detrending. The default ('linear') removes a linear
least squares fit. 'constant' removes only the mean of the data.
"""
from scipy.signal import detrend
return self._return(detrend(self.numpy(), type=type))
def plot(self, **kwds):
""" Basic plot of this time series
"""
from matplotlib import pyplot
if self.kind == 'real':
plot = pyplot.plot(self.sample_times, self, **kwds)
return plot
elif self.kind == 'complex':
plot1 = pyplot.plot(self.sample_times, self.real(), **kwds)
plot2 = pyplot.plot(self.sample_times, self.imag(), **kwds)
return plot1, plot2
def load_timeseries(path, group=None):
"""Load a TimeSeries from an HDF5, ASCII or Numpy file. The file type is
inferred from the file extension, which must be `.hdf`, `.txt` or `.npy`.
For ASCII and Numpy files, the first column of the array is assumed to
contain the sample times. If the array has two columns, a real-valued time
series is returned. If the array has three columns, the second and third
ones are assumed to contain the real and imaginary parts of a complex time
series.
For HDF files, the dataset is assumed to contain the attributes `delta_t`
and `start_time`, which should contain respectively the sampling period in
seconds and the start GPS time of the data.
The default data types will be double precision floating point.
Parameters
----------
path: string
Input file path. Must end with either `.npy`, `.txt` or `.hdf`.
group: string
Additional name for internal storage use. When reading HDF files, this
is the path to the HDF dataset to read.
Raises
------
ValueError
If path does not end in a supported extension.
For Numpy and ASCII input files, this is also raised if the array
does not have 2 or 3 dimensions.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'r') as f:
data = f[key][:]
series = TimeSeries(data, delta_t=f[key].attrs['delta_t'],
epoch=f[key].attrs['start_time'])
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
delta_t = (data[-1][0] - data[0][0]) / (len(data) - 1)
epoch = _lal.LIGOTimeGPS(data[0][0])
if data.ndim == 2:
return TimeSeries(data[:,1], delta_t=delta_t, epoch=epoch)
elif data.ndim == 3:
return TimeSeries(data[:,1] + 1j*data[:,2],
delta_t=delta_t, epoch=epoch)
raise ValueError('File has %s dimensions, cannot convert to TimeSeries, \
must be 2 (real) or 3 (complex)' % data.ndim)
| 46,009
| 38.190801
| 120
|
py
|
pycbc
|
pycbc-master/pycbc/types/__init__.py
|
from .array import *
from .timeseries import *
from .frequencyseries import *
from .optparse import *
from .aligned import check_aligned
| 137
| 22
| 34
|
py
|
pycbc
|
pycbc-master/pycbc/types/frequencyseries.py
|
# Copyright (C) 2012 Tito Dal Canton, Josh Willis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Provides a class representing a frequency series.
"""
import os as _os, h5py
from pycbc.types.array import Array, _convert, zeros, _noreal
import lal as _lal
import numpy as _numpy
class FrequencySeries(Array):
"""Models a frequency series consisting of uniformly sampled scalar values.
Parameters
----------
initial_array : array-like
Array containing sampled data.
delta_f : float
Frequency between consecutive samples in Hertz.
epoch : {None, lal.LIGOTimeGPS}, optional
Start time of the associated time domain data in seconds.
dtype : {None, data-type}, optional
Sample data type.
copy : boolean, optional
If True, samples are copied to a new array.
"""
def __init__(self, initial_array, delta_f=None, epoch="", dtype=None, copy=True):
if len(initial_array) < 1:
raise ValueError('initial_array must contain at least one sample.')
if delta_f is None:
try:
delta_f = initial_array.delta_f
except AttributeError:
raise TypeError('must provide either an initial_array with a delta_f attribute, or a value for delta_f')
if not delta_f > 0:
raise ValueError('delta_f must be a positive number')
# We gave a nonsensical default value to epoch so we can test if it's been set.
# If the user passes in an initial_array that has an 'epoch' attribute and doesn't
# pass in a value of epoch, then our new object's epoch comes from initial_array.
# But if the user passed in a value---even 'None'---that will take precedence over
# anything set in initial_array. Finally, if the user passes in something without
# an epoch attribute *and* doesn't pass in a value of epoch, it becomes 'None'
if not isinstance(epoch,_lal.LIGOTimeGPS):
if epoch == "":
if isinstance(initial_array,FrequencySeries):
epoch = initial_array._epoch
else:
epoch = _lal.LIGOTimeGPS(0)
elif epoch is not None:
try:
if isinstance(epoch, _numpy.generic):
# In python3 lal LIGOTimeGPS will not work on numpy
# types as input. A quick google on how to generically
# convert numpy floats/ints to the python equivalent
# https://stackoverflow.com/questions/9452775/
epoch = _lal.LIGOTimeGPS(epoch.item())
else:
epoch = _lal.LIGOTimeGPS(epoch)
except:
raise TypeError('epoch must be either None or a lal.LIGOTimeGPS')
Array.__init__(self, initial_array, dtype=dtype, copy=copy)
self._delta_f = delta_f
self._epoch = epoch
def _return(self, ary):
return FrequencySeries(ary, self._delta_f, epoch=self._epoch, copy=False)
def _typecheck(self, other):
if isinstance(other, FrequencySeries):
try:
_numpy.testing.assert_almost_equal(other._delta_f,
self._delta_f)
except:
raise ValueError('different delta_f')
# consistency of _epoch is not required because we may want
# to combine frequency series estimated at different times
# (e.g. PSD estimation)
def get_delta_f(self):
"""Return frequency between consecutive samples in Hertz.
"""
return self._delta_f
delta_f = property(get_delta_f,
doc="Frequency between consecutive samples in Hertz.")
def get_epoch(self):
"""Return frequency series epoch as a LIGOTimeGPS.
"""
return self._epoch
epoch = property(get_epoch,
doc="Frequency series epoch as a LIGOTimeGPS.")
def get_sample_frequencies(self):
"""Return an Array containing the sample frequencies.
"""
return Array(range(len(self))) * self._delta_f
sample_frequencies = property(get_sample_frequencies,
doc="Array of the sample frequencies.")
def _getslice(self, index):
if index.step is not None:
new_delta_f = self._delta_f * index.step
else:
new_delta_f = self._delta_f
return FrequencySeries(Array._getslice(self, index),
delta_f=new_delta_f,
epoch=self._epoch,
copy=False)
def at_frequency(self, freq):
""" Return the value at the specified frequency
"""
return self[int(freq / self.delta_f)]
@property
def start_time(self):
"""Return the start time of this vector
"""
return self.epoch
@start_time.setter
def start_time(self, time):
""" Set the start time
"""
self._epoch = _lal.LIGOTimeGPS(time)
@property
def end_time(self):
"""Return the end time of this vector
"""
return self.start_time + self.duration
@property
def duration(self):
"""Return the time duration of this vector
"""
return 1.0 / self.delta_f
@property
def delta_t(self):
"""Return the time between samples if this were a time series.
This assume the time series is even in length!
"""
return 1.0 / self.sample_rate
@property
def sample_rate(self):
"""Return the sample rate this would have in the time domain. This
assumes even length time series!
"""
return (len(self) - 1) * self.delta_f * 2.0
def __eq__(self,other):
"""
This is the Python special method invoked whenever the '=='
comparison is used. It will return true if the data of two
frequency series are identical, and all of the numeric meta-data
are identical, irrespective of whether or not the two
instances live in the same memory (for that comparison, the
Python statement 'a is b' should be used instead).
Thus, this method returns 'True' if the types of both 'self'
and 'other' are identical, as well as their lengths, dtypes,
epochs, delta_fs and the data in the arrays, element by element.
It will always do the comparison on the CPU, but will *not* move
either object to the CPU if it is not already there, nor change
the scheme of either object. It is possible to compare a CPU
object to a GPU object, and the comparison should be true if the
data and meta-data of the two objects are the same.
Note in particular that this function returns a single boolean,
and not an array of booleans as Numpy does. If the numpy
behavior is instead desired it can be obtained using the numpy()
method of the PyCBC type to get a numpy instance from each
object, and invoking '==' on those two instances.
Parameters
----------
other: another Python object, that should be tested for equality
with 'self'.
Returns
-------
boolean: 'True' if the types, dtypes, lengths, epochs, delta_fs
and data of the two objects are each identical.
"""
if super(FrequencySeries,self).__eq__(other):
return (self._epoch == other._epoch and self._delta_f == other._delta_f)
else:
return False
def almost_equal_elem(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two frequency series are almost equal, element
by element.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(self[i]-other[i]) <= tol*abs(self[i])
for all elements of the series.
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(self[i]-other[i]) <= tol
for all elements of the series.
The method also checks that self.delta_f is within 'dtol' of
other.delta_f; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', element-by-element.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_f. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_f values of the two FrequencySeries.
Returns
-------
boolean: 'True' if the data and delta_fs agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_f tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_f cannot be negative")
if super(FrequencySeries,self).almost_equal_elem(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_f-other._delta_f) <= dtol*self._delta_f)
else:
return (self._epoch == other._epoch and
abs(self._delta_f-other._delta_f) <= dtol)
else:
return False
def almost_equal_norm(self,other,tol,relative=True,dtol=0.0):
"""
Compare whether two frequency series are almost equal, normwise.
If the 'relative' parameter is 'True' (the default) then the
'tol' parameter (which must be positive) is interpreted as a
relative tolerance, and the comparison returns 'True' only if
abs(norm(self-other)) <= tol*abs(norm(self)).
If 'relative' is 'False', then 'tol' is an absolute tolerance,
and the comparison is true only if
abs(norm(self-other)) <= tol
The method also checks that self.delta_f is within 'dtol' of
other.delta_f; if 'dtol' has its default value of 0 then exact
equality between the two is required.
Other meta-data (type, dtype, length, and epoch) must be exactly
equal. If either object's memory lives on the GPU it will be
copied to the CPU for the comparison, which may be slow. But the
original object itself will not have its memory relocated nor
scheme changed.
Parameters
----------
other: another Python object, that should be tested for
almost-equality with 'self', based on their norms.
tol: a non-negative number, the tolerance, which is interpreted
as either a relative tolerance (the default) or an absolute
tolerance.
relative: A boolean, indicating whether 'tol' should be interpreted
as a relative tolerance (if True, the default if this argument
is omitted) or as an absolute tolerance (if tol is False).
dtol: a non-negative number, the tolerance for delta_f. Like 'tol',
it is interpreted as relative or absolute based on the value of
'relative'. This parameter defaults to zero, enforcing exact
equality between the delta_f values of the two FrequencySeries.
Returns
-------
boolean: 'True' if the data and delta_fs agree within the tolerance,
as interpreted by the 'relative' keyword, and if the types,
lengths, dtypes, and epochs are exactly the same.
"""
# Check that the delta_f tolerance is non-negative; raise an exception
# if needed.
if (dtol < 0.0):
raise ValueError("Tolerance in delta_f cannot be negative")
if super(FrequencySeries,self).almost_equal_norm(other,tol=tol,relative=relative):
if relative:
return (self._epoch == other._epoch and
abs(self._delta_f-other._delta_f) <= dtol*self._delta_f)
else:
return (self._epoch == other._epoch and
abs(self._delta_f-other._delta_f) <= dtol)
else:
return False
@_convert
def lal(self):
"""Produces a LAL frequency series object equivalent to self.
Returns
-------
lal_data : {lal.*FrequencySeries}
LAL frequency series object containing the same data as self.
The actual type depends on the sample's dtype. If the epoch of
self was 'None', the epoch of the returned LAL object will be
LIGOTimeGPS(0,0); otherwise, the same as that of self.
Raises
------
TypeError
If frequency series is stored in GPU memory.
"""
lal_data = None
if self._epoch is None:
ep = _lal.LIGOTimeGPS(0,0)
else:
ep = self._epoch
if self._data.dtype == _numpy.float32:
lal_data = _lal.CreateREAL4FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.float64:
lal_data = _lal.CreateREAL8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex64:
lal_data = _lal.CreateCOMPLEX8FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self))
elif self._data.dtype == _numpy.complex128:
lal_data = _lal.CreateCOMPLEX16FrequencySeries("",ep,0,self.delta_f,_lal.SecondUnit,len(self))
lal_data.data.data[:] = self.numpy()
return lal_data
def save(self, path, group=None, ifo='P1'):
"""
Save frequency series to a Numpy .npy, hdf, or text file. The first column
contains the sample frequencies, the second contains the values.
In the case of a complex frequency series saved as text, the imaginary
part is written as a third column. When using hdf format, the data is stored
as a single vector, along with relevant attributes.
Parameters
----------
path: string
Destination file path. Must end with either .hdf, .npy or .txt.
group: string
Additional name for internal storage use. Ex. hdf storage uses
this as the key value.
Raises
------
ValueError
If path does not end in .npy or .txt.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
output = _numpy.vstack((self.sample_frequencies.numpy(),
self.numpy())).T
_numpy.save(path, output)
elif ext == '.txt':
if self.kind == 'real':
output = _numpy.vstack((self.sample_frequencies.numpy(),
self.numpy())).T
elif self.kind == 'complex':
output = _numpy.vstack((self.sample_frequencies.numpy(),
self.numpy().real,
self.numpy().imag)).T
_numpy.savetxt(path, output)
elif ext == '.xml' or path.endswith('.xml.gz'):
from pycbc.io.ligolw import make_psd_xmldoc
from ligo.lw import utils
if self.kind != 'real':
raise ValueError('XML only supports real frequency series')
output = self.lal()
# When writing in this format we must *not* have the 0 values at
# frequencies less than flow. To resolve this we set the first
# non-zero value < flow.
data_lal = output.data.data
first_idx = _numpy.argmax(data_lal>0)
if not first_idx == 0:
data_lal[:first_idx] = data_lal[first_idx]
psddict = {ifo: output}
utils.write_filename(
make_psd_xmldoc(psddict),
path,
compress='auto'
)
elif ext == '.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'a') as f:
ds = f.create_dataset(key, data=self.numpy(),
compression='gzip',
compression_opts=9, shuffle=True)
if self.epoch is not None:
ds.attrs['epoch'] = float(self.epoch)
ds.attrs['delta_f'] = float(self.delta_f)
else:
raise ValueError('Path must end with .npy, .txt, .xml, .xml.gz '
'or .hdf')
def to_frequencyseries(self):
""" Return frequency series """
return self
@_noreal
def to_timeseries(self, delta_t=None):
""" Return the Fourier transform of this time series.
Note that this assumes even length time series!
Parameters
----------
delta_t : {None, float}, optional
The time resolution of the returned series. By default the
resolution is determined by length and delta_f of this frequency
series.
Returns
-------
TimeSeries:
The inverse fourier transform of this frequency series.
"""
from pycbc.fft import ifft
from pycbc.types import TimeSeries, real_same_precision_as
nat_delta_t = 1.0 / ((len(self)-1)*2) / self.delta_f
if not delta_t:
delta_t = nat_delta_t
# add 0.5 to round integer
tlen = int(1.0 / self.delta_f / delta_t + 0.5)
flen = int(tlen / 2 + 1)
if flen < len(self):
raise ValueError("The value of delta_t (%s) would be "
"undersampled. Maximum delta_t "
"is %s." % (delta_t, nat_delta_t))
if not delta_t:
tmp = self
else:
tmp = FrequencySeries(zeros(flen, dtype=self.dtype),
delta_f=self.delta_f, epoch=self.epoch)
tmp[:len(self)] = self[:]
f = TimeSeries(zeros(tlen,
dtype=real_same_precision_as(self)),
delta_t=delta_t)
ifft(tmp, f)
f._delta_t = delta_t
return f
@_noreal
def cyclic_time_shift(self, dt):
"""Shift the data and timestamps by a given number of seconds
Shift the data and timestamps in the time domain a given number of
seconds. To just change the time stamps, do ts.start_time += dt.
The time shift may be smaller than the intrinsic sample rate of the data.
Note that data will be cycliclly rotated, so if you shift by 2
seconds, the final 2 seconds of your data will now be at the
beginning of the data set.
Parameters
----------
dt : float
Amount of time to shift the vector.
Returns
-------
data : pycbc.types.FrequencySeries
The time shifted frequency series.
"""
from pycbc.waveform import apply_fseries_time_shift
data = apply_fseries_time_shift(self, dt)
data.start_time = self.start_time - dt
return data
def match(self, other, psd=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
""" Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivalent to the overlap
maximized over time and phase. By default, the other vector will be
resized to match self. Beware, this may remove high frequency content or the
end of the vector.
Parameters
----------
other : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
index: int
The number of samples to shift to get the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
"""
from pycbc.types import TimeSeries
from pycbc.filter import match
if isinstance(other, TimeSeries):
if other.duration != self.duration:
other = other.copy()
other.resize(int(other.sample_rate * self.duration))
other = other.to_frequencyseries()
if len(other) != len(self):
other = other.copy()
other.resize(len(self))
if psd is not None and len(psd) > len(self):
psd = psd.copy()
psd.resize(len(self))
return match(self, other, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
def plot(self, **kwds):
""" Basic plot of this frequency series
"""
from matplotlib import pyplot
if self.kind == 'real':
plot = pyplot.plot(self.sample_frequencies, self, **kwds)
return plot
elif self.kind == 'complex':
plot1 = pyplot.plot(self.sample_frequencies, self.real(), **kwds)
plot2 = pyplot.plot(self.sample_frequencies, self.imag(), **kwds)
return plot1, plot2
def load_frequencyseries(path, group=None):
"""Load a FrequencySeries from an HDF5, ASCII or Numpy file. The file type
is inferred from the file extension, which must be `.hdf`, `.txt` or
`.npy`.
For ASCII and Numpy files, the first column of the array is assumed to
contain the frequency. If the array has two columns, a real frequency
series is returned. If the array has three columns, the second and third
ones are assumed to contain the real and imaginary parts of a complex
frequency series.
For HDF files, the dataset is assumed to contain the attribute `delta_f`
giving the frequency resolution in Hz. The attribute `epoch`, if present,
is taken as the start GPS time (epoch) of the data in the series.
The default data types will be double precision floating point.
Parameters
----------
path: string
Input file path. Must end with either `.npy`, `.txt` or `.hdf`.
group: string
Additional name for internal storage use. When reading HDF files, this
is the path to the HDF dataset to read.
Raises
------
ValueError
If the path does not end in a supported extension.
For Numpy and ASCII input files, this is also raised if the array
does not have 2 or 3 dimensions.
"""
ext = _os.path.splitext(path)[1]
if ext == '.npy':
data = _numpy.load(path)
elif ext == '.txt':
data = _numpy.loadtxt(path)
elif ext == '.hdf':
key = 'data' if group is None else group
with h5py.File(path, 'r') as f:
data = f[key][:]
delta_f = f[key].attrs['delta_f']
epoch = f[key].attrs['epoch'] if 'epoch' in f[key].attrs else None
series = FrequencySeries(data, delta_f=delta_f, epoch=epoch)
return series
else:
raise ValueError('Path must end with .npy, .hdf, or .txt')
delta_f = (data[-1][0] - data[0][0]) / (len(data) - 1)
if data.ndim == 2:
return FrequencySeries(data[:,1], delta_f=delta_f, epoch=None)
elif data.ndim == 3:
return FrequencySeries(data[:,1] + 1j*data[:,2], delta_f=delta_f,
epoch=None)
raise ValueError('File has %s dimensions, cannot convert to FrequencySeries, \
must be 2 (real) or 3 (complex)' % data.ndim)
| 25,727
| 39.838095
| 120
|
py
|
pycbc
|
pycbc-master/pycbc/types/aligned.py
|
# Copyright (C) 2014 Josh Willis, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides a class derived from numpy.ndarray that also indicates
whether or not its memory is aligned. It further provides functions for
creating zeros and empty (unitialized) arrays with this class.
"""
import numpy as _np
from pycbc import PYCBC_ALIGNMENT
def check_aligned(ndarr):
return ((ndarr.ctypes.data % PYCBC_ALIGNMENT) == 0)
def zeros(n, dtype):
d = _np.dtype(dtype)
nbytes = (d.itemsize)*int(n)
tmp = _np.zeros(nbytes+PYCBC_ALIGNMENT, dtype=_np.uint8)
address = tmp.__array_interface__['data'][0]
offset = (PYCBC_ALIGNMENT - address%PYCBC_ALIGNMENT)%PYCBC_ALIGNMENT
ret_ary = tmp[offset:offset+nbytes].view(dtype=d)
del tmp
return ret_ary
def empty(n, dtype):
d = _np.dtype(dtype)
nbytes = (d.itemsize)*int(n)
tmp = _np.empty(nbytes+PYCBC_ALIGNMENT, dtype=_np.uint8)
address = tmp.__array_interface__['data'][0]
offset = (PYCBC_ALIGNMENT - address%PYCBC_ALIGNMENT)%PYCBC_ALIGNMENT
ret_ary = tmp[offset:offset+nbytes].view(dtype=d)
del tmp
return ret_ary
| 2,021
| 35.763636
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/lambda_mapping.py
|
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
import numpy
import pycbc.libutils
from lal import MTSUN_SI, PI, CreateREAL8Vector
lalsimulation = pycbc.libutils.import_optional('lalsimulation')
# PLEASE ENSURE THESE ARE KEPT UP TO DATE WITH THE REST OF THIS FILE
pycbcValidTmpltbankOrders = ['zeroPN','onePN','onePointFivePN','twoPN',\
'twoPointFivePN','threePN','threePointFivePN']
pycbcValidOrdersHelpDescriptions="""
* zeroPN: Will only include the dominant term (proportional to chirp mass)
* onePN: Will only the leading orbit term and first correction at 1PN
* onePointFivePN: Will include orbit and spin terms to 1.5PN.
* twoPN: Will include orbit and spin terms to 2PN.
* twoPointFivePN: Will include orbit and spin terms to 2.5PN.
* threePN: Will include orbit terms to 3PN and spin terms to 2.5PN.
* threePointFivePN: Include orbit terms to 3.5PN and spin terms to 2.5PN
"""
def generate_mapping(order):
"""
This function will take an order string and return a mapping between
components in the metric and the various Lambda components. This must be
used (and consistently used) when generating the metric *and* when
transforming to/from the xi_i coordinates to the lambda_i coordinates.
NOTE: This is not a great way of doing this. It would be nice to clean
this up. Hence pulling this function out. The valid PN orders are
{}
Parameters
----------
order : string
A string containing a PN order. Valid values are given above.
Returns
--------
mapping : dictionary
A mapping between the active Lambda terms and index in the metric
"""
mapping = {}
mapping['Lambda0'] = 0
if order == 'zeroPN':
return mapping
mapping['Lambda2'] = 1
if order == 'onePN':
return mapping
mapping['Lambda3'] = 2
if order == 'onePointFivePN':
return mapping
mapping['Lambda4'] = 3
if order == 'twoPN':
return mapping
mapping['LogLambda5'] = 4
if order == 'twoPointFivePN':
return mapping
mapping['Lambda6'] = 5
mapping['LogLambda6'] = 6
if order == 'threePN':
return mapping
mapping['Lambda7'] = 7
if order == 'threePointFivePN':
return mapping
# For some as-of-yet unknown reason, the tidal terms are not giving correct
# match estimates when enabled. So, for now, this order is commented out.
#if order == 'tidalTesting':
# mapping['Lambda10'] = 8
# mapping['Lambda12'] = 9
# return mapping
raise ValueError("Order %s is not understood." %(order))
# Override doc so the PN orders are added automatically to online docs
generate_mapping.__doc__ = \
generate_mapping.__doc__.format(pycbcValidOrdersHelpDescriptions)
def generate_inverse_mapping(order):
"""Genereate a lambda entry -> PN order map.
This function will generate the opposite of generate mapping. So where
generate_mapping gives dict[key] = item this will give
dict[item] = key. Valid PN orders are:
{}
Parameters
----------
order : string
A string containing a PN order. Valid values are given above.
Returns
--------
mapping : dictionary
An inverse mapping between the active Lambda terms and index in the
metric
"""
mapping = generate_mapping(order)
inv_mapping = {}
for key,value in mapping.items():
inv_mapping[value] = key
return inv_mapping
generate_inverse_mapping.__doc__ = \
generate_inverse_mapping.__doc__.format(pycbcValidOrdersHelpDescriptions)
def get_ethinca_orders():
"""
Returns the dictionary mapping TaylorF2 PN order names to twice-PN
orders (powers of v/c)
"""
ethinca_orders = {"zeroPN" : 0,
"onePN" : 2,
"onePointFivePN" : 3,
"twoPN" : 4,
"twoPointFivePN" : 5,
"threePN" : 6,
"threePointFivePN" : 7
}
return ethinca_orders
def ethinca_order_from_string(order):
"""
Returns the integer giving twice the post-Newtonian order
used by the ethinca calculation. Currently valid only for TaylorF2 metric
Parameters
----------
order : string
Returns
-------
int
"""
if order in get_ethinca_orders().keys():
return get_ethinca_orders()[order]
else: raise ValueError("Order "+str(order)+" is not valid for ethinca"
"calculation! Valid orders: "+
str(get_ethinca_orders().keys()))
def get_chirp_params(mass1, mass2, spin1z, spin2z, f0, order,
quadparam1=None, quadparam2=None, lambda1=None,
lambda2=None):
"""
Take a set of masses and spins and convert to the various lambda
coordinates that describe the orbital phase. Accepted PN orders are:
{}
Parameters
----------
mass1 : float or array
Mass1 of input(s).
mass2 : float or array
Mass2 of input(s).
spin1z : float or array
Parallel spin component(s) of body 1.
spin2z : float or array
Parallel spin component(s) of body 2.
f0 : float
This is an arbitrary scaling factor introduced to avoid the potential
for numerical overflow when calculating this. Generally the default
value (70) is safe here. **IMPORTANT, if you want to calculate the
ethinca metric components later this MUST be set equal to f_low.**
This value must also be used consistently (ie. don't change its value
when calling different functions!).
order : string
The Post-Newtonian order that is used to translate from masses and
spins to the lambda_i coordinate system. Valid orders given above.
Returns
--------
lambdas : list of floats or numpy.arrays
The lambda coordinates for the input system(s)
"""
# Determine whether array or single value input
sngl_inp = False
try:
num_points = len(mass1)
except TypeError:
sngl_inp = True
# If you care about speed, you aren't calling this function one entry
# at a time.
mass1 = numpy.array([mass1])
mass2 = numpy.array([mass2])
spin1z = numpy.array([spin1z])
spin2z = numpy.array([spin2z])
if quadparam1 is not None:
quadparam1 = numpy.array([quadparam1])
if quadparam2 is not None:
quadparam2 = numpy.array([quadparam2])
if lambda1 is not None:
lambda1 = numpy.array([lambda1])
if lambda2 is not None:
lambda2 = numpy.array([lambda2])
num_points = 1
if quadparam1 is None:
quadparam1 = numpy.ones(len(mass1), dtype=float)
if quadparam2 is None:
quadparam2 = numpy.ones(len(mass1), dtype=float)
if lambda1 is None:
lambda1 = numpy.zeros(len(mass1), dtype=float)
if lambda2 is None:
lambda2 = numpy.zeros(len(mass1), dtype=float)
mass1_v = CreateREAL8Vector(len(mass1))
mass1_v.data[:] = mass1[:]
mass2_v = CreateREAL8Vector(len(mass1))
mass2_v.data[:] = mass2[:]
spin1z_v = CreateREAL8Vector(len(mass1))
spin1z_v.data[:] = spin1z[:]
spin2z_v = CreateREAL8Vector(len(mass1))
spin2z_v.data[:] = spin2z[:]
lambda1_v = CreateREAL8Vector(len(mass1))
lambda1_v.data[:] = lambda1[:]
lambda2_v = CreateREAL8Vector(len(mass1))
lambda2_v.data[:] = lambda2[:]
dquadparam1_v = CreateREAL8Vector(len(mass1))
dquadparam1_v.data[:] = quadparam1[:] - 1.
dquadparam2_v = CreateREAL8Vector(len(mass1))
dquadparam2_v.data[:] = quadparam2[:] - 1.
phasing_arr = lalsimulation.SimInspiralTaylorF2AlignedPhasingArray\
(mass1_v, mass2_v, spin1z_v, spin2z_v, lambda1_v, lambda2_v,
dquadparam1_v, dquadparam2_v)
vec_len = lalsimulation.PN_PHASING_SERIES_MAX_ORDER + 1;
phasing_vs = numpy.zeros([num_points, vec_len])
phasing_vlogvs = numpy.zeros([num_points, vec_len])
phasing_vlogvsqs = numpy.zeros([num_points, vec_len])
lng = len(mass1)
jmp = lng * vec_len
for idx in range(vec_len):
phasing_vs[:,idx] = phasing_arr.data[lng*idx : lng*(idx+1)]
phasing_vlogvs[:,idx] = \
phasing_arr.data[jmp + lng*idx : jmp + lng*(idx+1)]
phasing_vlogvsqs[:,idx] = \
phasing_arr.data[2*jmp + lng*idx : 2*jmp + lng*(idx+1)]
pim = PI * (mass1 + mass2)*MTSUN_SI
pmf = pim * f0
pmf13 = pmf**(1./3.)
logpim13 = numpy.log((pim)**(1./3.))
mapping = generate_inverse_mapping(order)
lambdas = []
lambda_str = '^Lambda([0-9]+)'
loglambda_str = '^LogLambda([0-9]+)'
logloglambda_str = '^LogLogLambda([0-9]+)'
for idx in range(len(mapping.keys())):
# RE magic engage!
rematch = re.match(lambda_str, mapping[idx])
if rematch:
pn_order = int(rematch.groups()[0])
term = phasing_vs[:,pn_order]
term = term + logpim13 * phasing_vlogvs[:,pn_order]
lambdas.append(term * pmf13**(-5+pn_order))
continue
rematch = re.match(loglambda_str, mapping[idx])
if rematch:
pn_order = int(rematch.groups()[0])
lambdas.append((phasing_vlogvs[:,pn_order]) * pmf13**(-5+pn_order))
continue
rematch = re.match(logloglambda_str, mapping[idx])
if rematch:
raise ValueError("LOGLOG terms are not implemented")
#pn_order = int(rematch.groups()[0])
#lambdas.append(phasing_vlogvsqs[:,pn_order] * pmf13**(-5+pn_order))
#continue
err_msg = "Failed to parse " + mapping[idx]
raise ValueError(err_msg)
if sngl_inp:
return [l[0] for l in lambdas]
else:
return lambdas
get_chirp_params.__doc__ = \
get_chirp_params.__doc__.format(pycbcValidOrdersHelpDescriptions)
| 10,726
| 34.996644
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/bank_output_utils.py
|
import numpy
import h5py
from lal import PI, MTSUN_SI, TWOPI, GAMMA
from ligo.lw import ligolw, lsctables, utils as ligolw_utils
from pycbc import pnutils
from pycbc.tmpltbank.lambda_mapping import ethinca_order_from_string
from pycbc.io.ligolw import (
return_empty_sngl, return_search_summary, create_process_table
)
from pycbc.waveform import get_waveform_filter_length_in_time as gwflit
def convert_to_sngl_inspiral_table(params, proc_id):
'''
Convert a list of m1,m2,spin1z,spin2z values into a basic sngl_inspiral
table with mass and spin parameters populated and event IDs assigned
Parameters
-----------
params : iterable
Each entry in the params iterable should be a sequence of
[mass1, mass2, spin1z, spin2z] in that order
proc_id : int
Process ID to add to each row of the sngl_inspiral table
Returns
----------
SnglInspiralTable
Bank of templates in SnglInspiralTable format
'''
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
col_names = ['mass1','mass2','spin1z','spin2z']
for values in params:
tmplt = return_empty_sngl()
tmplt.process_id = proc_id
for colname, value in zip(col_names, values):
setattr(tmplt, colname, value)
tmplt.mtotal, tmplt.eta = pnutils.mass1_mass2_to_mtotal_eta(
tmplt.mass1, tmplt.mass2)
tmplt.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
tmplt.mass1, tmplt.mass2)
tmplt.template_duration = 0 # FIXME
tmplt.event_id = sngl_inspiral_table.get_next_id()
sngl_inspiral_table.append(tmplt)
return sngl_inspiral_table
def calculate_ethinca_metric_comps(metricParams, ethincaParams, mass1, mass2,
spin1z=0., spin2z=0., full_ethinca=True):
"""
Calculate the Gamma components needed to use the ethinca metric.
At present this outputs the standard TaylorF2 metric over the end time
and chirp times \tau_0 and \tau_3.
A desirable upgrade might be to use the \chi coordinates [defined WHERE?]
for metric distance instead of \tau_0 and \tau_3.
The lower frequency cutoff is currently hard-coded to be the same as the
bank layout options fLow and f0 (which must be the same as each other).
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
ethincaParams : ethincaParameters instance
Structure holding options relevant to the ethinca metric computation.
mass1 : float
Mass of the heavier body in the considered template.
mass2 : float
Mass of the lighter body in the considered template.
spin1z : float (optional, default=0)
Spin of the heavier body in the considered template.
spin2z : float (optional, default=0)
Spin of the lighter body in the considered template.
full_ethinca : boolean (optional, default=True)
If True calculate the ethinca components in all 3 directions (mass1,
mass2 and time). If False calculate only the time component (which is
stored in Gamma0).
Returns
--------
fMax_theor : float
Value of the upper frequency cutoff given by the template parameters
and the cutoff formula requested.
gammaVals : numpy_array
Array holding 6 independent metric components in
(end_time, tau_0, tau_3) coordinates to be stored in the Gamma0-5
slots of a SnglInspiral object.
"""
if (float(spin1z) != 0. or float(spin2z) != 0.) and full_ethinca:
raise NotImplementedError("Ethinca cannot at present be calculated "
"for nonzero component spins!")
f0 = metricParams.f0
if f0 != metricParams.fLow:
raise ValueError("If calculating ethinca the bank f0 value must be "
"equal to f-low!")
if ethincaParams.fLow is not None and (
ethincaParams.fLow != metricParams.fLow):
raise NotImplementedError("An ethinca metric f-low different from the"
" bank metric f-low is not supported!")
twicePNOrder = ethinca_order_from_string(ethincaParams.pnOrder)
piFl = PI * f0
totalMass, eta = pnutils.mass1_mass2_to_mtotal_eta(mass1, mass2)
totalMass = totalMass * MTSUN_SI
v0cube = totalMass*piFl
v0 = v0cube**(1./3.)
# Get theoretical cutoff frequency and work out the closest
# frequency for which moments were calculated
fMax_theor = pnutils.frequency_cutoff_from_name(
ethincaParams.cutoff, mass1, mass2, spin1z, spin2z)
fMaxes = list(metricParams.moments['J4'].keys())
fMaxIdx = abs(numpy.array(fMaxes,dtype=float) - fMax_theor).argmin()
fMax = fMaxes[fMaxIdx]
# Set the appropriate moments
Js = numpy.zeros([18,3],dtype=float)
for i in range(18):
Js[i,0] = metricParams.moments['J%d'%(i)][fMax]
Js[i,1] = metricParams.moments['log%d'%(i)][fMax]
Js[i,2] = metricParams.moments['loglog%d'%(i)][fMax]
# Compute the time-dependent metric term.
two_pi_flower_sq = TWOPI * f0 * TWOPI * f0
gammaVals = numpy.zeros([6],dtype=float)
gammaVals[0] = 0.5 * two_pi_flower_sq * \
( Js[(1,0)] - (Js[(4,0)]*Js[(4,0)]) )
# If mass terms not required stop here
if not full_ethinca:
return fMax_theor, gammaVals
# 3pN is a mess, so split it into pieces
a0 = 11583231236531/200286535680 - 5*PI*PI - 107*GAMMA/14
a1 = (-15737765635/130056192 + 2255*PI*PI/512)*eta
a2 = (76055/73728)*eta*eta
a3 = (-127825/55296)*eta*eta*eta
alog = numpy.log(4*v0) # Log terms are tricky - be careful
# Get the Psi coefficients
Psi = [{},{}] #Psi = numpy.zeros([2,8,2],dtype=float)
Psi[0][0,0] = 3/5
Psi[0][2,0] = (743/756 + 11*eta/3)*v0*v0
Psi[0][3,0] = 0.
Psi[0][4,0] = (-3058673/508032 + 5429*eta/504 + 617*eta*eta/24)\
*v0cube*v0
Psi[0][5,1] = (-7729*PI/126)*v0cube*v0*v0/3
Psi[0][6,0] = (128/15)*(-3*a0 - a1 + a2 + 3*a3 + 107*(1+3*alog)/14)\
*v0cube*v0cube
Psi[0][6,1] = (6848/35)*v0cube*v0cube/3
Psi[0][7,0] = (-15419335/63504 - 75703*eta/756)*PI*v0cube*v0cube*v0
Psi[1][0,0] = 0.
Psi[1][2,0] = (3715/12096 - 55*eta/96)/PI/v0;
Psi[1][3,0] = -3/2
Psi[1][4,0] = (15293365/4064256 - 27145*eta/16128 - 3085*eta*eta/384)\
*v0/PI
Psi[1][5,1] = (193225/8064)*v0*v0/3
Psi[1][6,0] = (4/PI)*(2*a0 + a1/3 - 4*a2/3 - 3*a3 -107*(1+6*alog)/42)\
*v0cube
Psi[1][6,1] = (-428/PI/7)*v0cube/3
Psi[1][7,0] = (77096675/1161216 + 378515*eta/24192 + 74045*eta*eta/8064)\
*v0cube*v0
# Set the appropriate moments
Js = numpy.zeros([18,3],dtype=float)
for i in range(18):
Js[i,0] = metricParams.moments['J%d'%(i)][fMax]
Js[i,1] = metricParams.moments['log%d'%(i)][fMax]
Js[i,2] = metricParams.moments['loglog%d'%(i)][fMax]
# Calculate the g matrix
PNterms = [(0,0),(2,0),(3,0),(4,0),(5,1),(6,0),(6,1),(7,0)]
PNterms = [term for term in PNterms if term[0] <= twicePNOrder]
# Now can compute the mass-dependent gamma values
for m in [0, 1]:
for k in PNterms:
gammaVals[1+m] += 0.5 * two_pi_flower_sq * Psi[m][k] * \
( Js[(9-k[0],k[1])]
- Js[(12-k[0],k[1])] * Js[(4,0)] )
g = numpy.zeros([2,2],dtype=float)
for (m,n) in [(0,0),(0,1),(1,1)]:
for k in PNterms:
for l in PNterms:
g[m,n] += Psi[m][k] * Psi[n][l] * \
( Js[(17-k[0]-l[0], k[1]+l[1])]
- Js[(12-k[0],k[1])] * Js[(12-l[0],l[1])] )
g[m,n] = 0.5 * two_pi_flower_sq * g[m,n]
g[n,m] = g[m,n]
gammaVals[3] = g[0,0]
gammaVals[4] = g[0,1]
gammaVals[5] = g[1,1]
return fMax_theor, gammaVals
def output_sngl_inspiral_table(outputFile, tempBank, programName="",
optDict = None, outdoc=None,
**kwargs): # pylint:disable=unused-argument
"""
Function that converts the information produced by the various PyCBC bank
generation codes into a valid LIGOLW XML file containing a sngl_inspiral
table and outputs to file.
Parameters
-----------
outputFile : string
Name of the file that the bank will be written to
tempBank : iterable
Each entry in the tempBank iterable should be a sequence of
[mass1,mass2,spin1z,spin2z] in that order.
programName (key-word-argument) : string
Name of the executable that has been run
optDict (key-word argument) : dictionary
Dictionary of the command line arguments passed to the program
outdoc (key-word argument) : ligolw xml document
If given add template bank to this representation of a xml document and
write to disk. If not given create a new document.
kwargs : optional key-word arguments
Allows unused options to be passed to this function (for modularity)
"""
if optDict is None:
optDict = {}
if outdoc is None:
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
# get IFO to put in search summary table
ifos = []
if 'channel_name' in optDict.keys():
if optDict['channel_name'] is not None:
ifos = [optDict['channel_name'][0:2]]
proc = create_process_table(
outdoc,
program_name=programName,
detectors=ifos,
options=optDict
)
proc_id = proc.process_id
sngl_inspiral_table = convert_to_sngl_inspiral_table(tempBank, proc_id)
# set per-template low-frequency cutoff
if 'f_low_column' in optDict and 'f_low' in optDict and \
optDict['f_low_column'] is not None:
for sngl in sngl_inspiral_table:
setattr(sngl, optDict['f_low_column'], optDict['f_low'])
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
# get times to put in search summary table
start_time = 0
end_time = 0
if 'gps_start_time' in optDict.keys() and 'gps_end_time' in optDict.keys():
start_time = optDict['gps_start_time']
end_time = optDict['gps_end_time']
# make search summary table
search_summary_table = lsctables.New(lsctables.SearchSummaryTable)
search_summary = return_search_summary(
start_time, end_time, len(sngl_inspiral_table), ifos
)
search_summary_table.append(search_summary)
outdoc.childNodes[0].appendChild(search_summary_table)
# write the xml doc to disk
ligolw_utils.write_filename(outdoc, outputFile)
def output_bank_to_hdf(outputFile, tempBank, optDict=None, programName='',
approximant=None, output_duration=False,
**kwargs): # pylint:disable=unused-argument
"""
Function that converts the information produced by the various PyCBC bank
generation codes into a hdf5 file.
Parameters
-----------
outputFile : string
Name of the file that the bank will be written to
tempBank : iterable
Each entry in the tempBank iterable should be a sequence of
[mass1,mass2,spin1z,spin2z] in that order.
programName (key-word-argument) : string
Name of the executable that has been run
optDict (key-word argument) : dictionary
Dictionary of the command line arguments passed to the program
approximant : string
The approximant to be outputted to the file,
if output_duration is True, this is also used for that calculation.
output_duration : boolean
Output the duration of the template, calculated using
get_waveform_filter_length_in_time, to the file.
kwargs : optional key-word arguments
Allows unused options to be passed to this function (for modularity)
"""
bank_dict = {}
mass1, mass2, spin1z, spin2z = list(zip(*tempBank))
bank_dict['mass1'] = mass1
bank_dict['mass2'] = mass2
bank_dict['spin1z'] = spin1z
bank_dict['spin2z'] = spin2z
# Add other values to the bank dictionary as appropriate
if optDict is not None:
bank_dict['f_lower'] = numpy.ones_like(mass1) * \
optDict['f_low']
argument_string = [f'{k}:{v}' for k, v in optDict.items()]
if optDict is not None and optDict['output_f_final']:
bank_dict['f_final'] = numpy.ones_like(mass1) * \
optDict['f_upper']
if approximant:
if not isinstance(approximant, bytes):
appx = approximant.encode()
bank_dict['approximant'] = numpy.repeat(appx, len(mass1))
if output_duration:
appx = approximant if approximant else 'SPAtmplt'
tmplt_durations = numpy.zeros_like(mass1)
for i in range(len(mass1)):
wfrm_length = gwflit(appx,
mass1=mass1[i],
mass2=mass2[i],
f_lower=optDict['f_low'],
phase_order=7)
tmplt_durations[i] = wfrm_length
bank_dict['template_duration'] = tmplt_durations
with h5py.File(outputFile, 'w') as bankf_out:
bankf_out.attrs['program'] = programName
if optDict is not None:
bankf_out.attrs['arguments'] = argument_string
for k, v in bank_dict.items():
bankf_out[k] = v
def output_bank_to_file(outputFile, tempBank, **kwargs):
if outputFile.endswith(('.xml','.xml.gz','.xmlgz')):
output_sngl_inspiral_table(
outputFile,
tempBank,
**kwargs
)
elif outputFile.endswith(('.h5','.hdf','.hdf5')):
output_bank_to_hdf(
outputFile,
tempBank,
**kwargs
)
else:
err_msg = f"Unrecognized extension for file {outputFile}."
raise ValueError(err_msg)
| 14,135
| 38.157895
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/lattice_utils.py
|
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import copy
import numpy
import lal
def generate_hexagonal_lattice(maxv1, minv1, maxv2, minv2, mindist):
"""
This function generates a 2-dimensional lattice of points using a hexagonal
lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension
"""
if minv1 > maxv1:
raise ValueError("Invalid input to function.")
if minv2 > maxv2:
raise ValueError("Invalid input to function.")
# Place first point
v1s = [minv1]
v2s = [minv2]
initPoint = [minv1,minv2]
# Place first line
initLine = [initPoint]
tmpv1 = minv1
while (tmpv1 < maxv1):
tmpv1 = tmpv1 + (3 * mindist)**(0.5)
initLine.append([tmpv1,minv2])
v1s.append(tmpv1)
v2s.append(minv2)
initLine = numpy.array(initLine)
initLine2 = copy.deepcopy(initLine)
initLine2[:,0] += 0.5 * (3*mindist)**0.5
initLine2[:,1] += 1.5 * (mindist)**0.5
for i in range(len(initLine2)):
v1s.append(initLine2[i,0])
v2s.append(initLine2[i,1])
tmpv2_1 = initLine[0,1]
tmpv2_2 = initLine2[0,1]
while tmpv2_1 < maxv2 and tmpv2_2 < maxv2:
tmpv2_1 = tmpv2_1 + 3.0 * (mindist)**0.5
tmpv2_2 = tmpv2_2 + 3.0 * (mindist)**0.5
initLine[:,1] = tmpv2_1
initLine2[:,1] = tmpv2_2
for i in range(len(initLine)):
v1s.append(initLine[i,0])
v2s.append(initLine[i,1])
for i in range(len(initLine2)):
v1s.append(initLine2[i,0])
v2s.append(initLine2[i,1])
v1s = numpy.array(v1s)
v2s = numpy.array(v2s)
return v1s, v2s
def generate_anstar_3d_lattice(maxv1, minv1, maxv2, minv2, maxv3, minv3, \
mindist):
"""
This function calls into LAL routines to generate a 3-dimensional array
of points using the An^* lattice.
Parameters
-----------
maxv1 : float
Largest value in the 1st dimension to cover
minv1 : float
Smallest value in the 1st dimension to cover
maxv2 : float
Largest value in the 2nd dimension to cover
minv2 : float
Smallest value in the 2nd dimension to cover
maxv3 : float
Largest value in the 3rd dimension to cover
minv3 : float
Smallest value in the 3rd dimension to cover
mindist : float
Maximum allowed mismatch between a point in the parameter space and the
generated bank of points.
Returns
--------
v1s : numpy.array
Array of positions in the first dimension
v2s : numpy.array
Array of positions in the second dimension
v3s : numpy.array
Array of positions in the second dimension
"""
# Lalpulsar not a requirement for the rest of pycbc, so check if we have it
# here in this function.
try:
import lalpulsar
except:
raise ImportError("A SWIG-wrapped install of lalpulsar is needed to use the anstar tiling functionality.")
tiling = lalpulsar.CreateLatticeTiling(3)
lalpulsar.SetLatticeTilingConstantBound(tiling, 0, minv1, maxv1)
lalpulsar.SetLatticeTilingConstantBound(tiling, 1, minv2, maxv2)
lalpulsar.SetLatticeTilingConstantBound(tiling, 2, minv3, maxv3)
# Make a 3x3 Euclidean lattice
a = lal.gsl_matrix(3,3)
a.data[0,0] = 1
a.data[1,1] = 1
a.data[2,2] = 1
try:
# old versions of lalpulsar used an enumeration
lattice = lalpulsar.TILING_LATTICE_ANSTAR
except AttributeError:
# newer versions of lalpulsar use a string
lattice = 'An-star'
lalpulsar.SetTilingLatticeAndMetric(tiling, lattice, a, mindist)
try:
iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3)
except TypeError:
# old versions of lalpulsar required the flags argument
# (set to 0 for defaults)
iterator = lalpulsar.CreateLatticeTilingIterator(tiling, 3, 0)
vs1 = []
vs2 = []
vs3 = []
curr_point = lal.gsl_vector(3)
while (lalpulsar.NextLatticeTilingPoint(iterator, curr_point) > 0):
vs1.append(curr_point.data[0])
vs2.append(curr_point.data[1])
vs3.append(curr_point.data[2])
return vs1, vs2, vs3
| 5,472
| 33.20625
| 114
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/bank_conversions.py
|
# Copyright (C) 2022 Gareth Cabourn Davies
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is supplied to make a convenience function for converting into
specific values from PyCBC template banks.
"""
import numpy as np
from pycbc import conversions as conv
from pycbc import pnutils
# Convert from parameter name to helper function
# some multiple names are used for the same function
conversion_options = ['mass1', 'mass2', 'spin1z', 'spin2z', 'duration',
'template_duration', 'mtotal', 'total_mass',
'q', 'invq', 'eta', 'chirp_mass', 'mchirp',
'chieff', 'chi_eff', 'effective_spin', 'chi_a',
'premerger_duration']
mass_conversions = {
'mtotal': conv.mtotal_from_mass1_mass2,
'total_mass': conv.mtotal_from_mass1_mass2,
'q': conv.q_from_mass1_mass2,
'invq': conv.invq_from_mass1_mass2,
'eta': conv.eta_from_mass1_mass2,
'mchirp': conv.mchirp_from_mass1_mass2,
'chirp_mass': conv.mchirp_from_mass1_mass2,
}
spin_conversions = {
'chieff': conv.chi_eff,
'chi_eff': conv.chi_eff,
'effective_spin': conv.chi_eff,
'chi_a': conv.chi_a
}
def get_bank_property(parameter, bank, template_ids):
""" Get a specific value from a hdf file object in standard PyCBC
template bank format
Parameters
----------
parameter: str
the parameter to convert to, must be in conversion_options
bank: h5py File object or dictionary of arrays
Template bank containing the parameters for use in conversions
must contain mass1, mass2, spin1z, spin2z as a minimum
template_ids: numpy array
Array of template IDs for reading a set of templates from the bank
Returns
-------
values: numpy array, same size as template_ids
Array of whatever the requested parameter is calculated for
the specified templates in the bank
"""
# These just give things already in the bank
if parameter in bank:
values = bank[parameter][:][template_ids]
# Duration may be in the bank, but if not, we need to calculate
elif parameter.endswith('duration'):
fullband_req = False
prem_required = False
if parameter != "premerger_duration" and 'template_duration' in bank:
# This statement should be the reached only if 'duration'
# is given, but 'template_duration' is in the bank
values = bank['template_duration'][:][template_ids]
elif parameter in ['template_duration', 'duration']:
# Only calculate fullband/premerger durations if we need to
fullband_req = True
if 'f_final' in bank:
prem_required = True
elif parameter == "premerger_duration":
prem_required = True
# Set up the arguments for get_imr_duration
imr_args = ['mass1', 'mass2', 'spin1z', 'spin2z']
if 'approximant' in bank:
kwargs = {'approximant': bank['approximant'][:][template_ids]}
else:
kwargs = {}
if fullband_req:
# Unpack the appropriate arguments
fullband_dur = pnutils.get_imr_duration(
*[bank[k][:][template_ids]
for k in imr_args + ['f_lower']],
**kwargs)
if prem_required and 'f_final' in bank:
# If f_final is in the bank, then we need to calculate
# the premerger time of the end of the template
prem_dur = pnutils.get_imr_duration(
*[bank[k][:][template_ids]
for k in imr_args + ['f_final']],
**kwargs)
elif prem_required:
# Pre-merger for bank without f_final is zero
prem_dur = np.zeros_like(template_ids)
# Now we decide what to return:
if parameter in ['template_duration', 'duration']:
values = fullband_dur
if prem_required:
values -= prem_dur
else:
values = prem_dur
# Basic conversions
elif parameter in mass_conversions.keys():
values = mass_conversions[parameter](bank['mass1'][:][template_ids],
bank['mass2'][:][template_ids])
elif parameter in spin_conversions.keys():
values = spin_conversions[parameter](bank['mass1'][:][template_ids],
bank['mass2'][:][template_ids],
bank['spin1z'][:][template_ids],
bank['spin2z'][:][template_ids])
else:
# parameter not in the current conversion parameter list
raise NotImplementedError("Bank conversion function " + parameter
+ " not recognised: choose from '" +
"', '".join(conversion_options) + "'.")
return values
| 5,828
| 37.348684
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/calc_moments.py
|
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import numpy
from pycbc.tmpltbank.lambda_mapping import generate_mapping
def determine_eigen_directions(metricParams, preserveMoments=False,
vary_fmax=False, vary_density=None):
"""
This function will calculate the coordinate transfomations that are needed
to rotate from a coordinate system described by the various Lambda
components in the frequency expansion, to a coordinate system where the
metric is Cartesian.
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
preserveMoments : boolean, optional (default False)
Currently only used for debugging.
If this is given then if the moments structure is already set
within metricParams then they will not be recalculated.
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
**THIS FUNCTION ONLY RETURNS THE CLASS**
The following will be **added** to this structure
metricParams.evals : Dictionary of numpy.array
Each entry in the dictionary corresponds to the different frequency
ranges described in vary_fmax. If vary_fmax = False, the only entry
will be f_upper, this corresponds to integrals in [f_low,f_upper). This
entry is always present. Each other entry will use floats as keys to
the dictionary. These floats give the upper frequency cutoff when it is
varying.
Each numpy.array contains the eigenvalues which, with the eigenvectors
in evecs, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
metricParams.evecs : Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
metricParams.metric : Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the metric of the parameter space in the
Lambda_i coordinate system.
metricParams.moments : Moments structure
See the structure documentation for a description of this. This
contains the result of all the integrals used in computing the metrics
above. It can be used for the ethinca components calculation, or other
similar calculations.
"""
evals = {}
evecs = {}
metric = {}
unmax_metric = {}
# First step is to get the moments needed to calculate the metric
if not (metricParams.moments and preserveMoments):
get_moments(metricParams, vary_fmax=vary_fmax,
vary_density=vary_density)
# What values are going to be in the moments
# J7 is the normalization factor so it *MUST* be present
list = metricParams.moments['J7'].keys()
# We start looping over every item in the list of metrics
for item in list:
# Here we convert the moments into a form easier to use here
Js = {}
for i in range(-7,18):
Js[i] = metricParams.moments['J%d'%(i)][item]
logJs = {}
for i in range(-1,18):
logJs[i] = metricParams.moments['log%d'%(i)][item]
loglogJs = {}
for i in range(-1,18):
loglogJs[i] = metricParams.moments['loglog%d'%(i)][item]
logloglogJs = {}
for i in range(-1,18):
logloglogJs[i] = metricParams.moments['logloglog%d'%(i)][item]
loglogloglogJs = {}
for i in range(-1,18):
loglogloglogJs[i] = metricParams.moments['loglogloglog%d'%(i)][item]
mapping = generate_mapping(metricParams.pnOrder)
# Calculate the metric
gs, unmax_metric_curr = calculate_metric(Js, logJs, loglogJs,
logloglogJs, loglogloglogJs, mapping)
metric[item] = gs
unmax_metric[item] = unmax_metric_curr
# And the eigenvalues
evals[item], evecs[item] = numpy.linalg.eig(gs)
# Numerical error can lead to small negative eigenvalues.
for i in range(len(evals[item])):
if evals[item][i] < 0:
# Due to numerical imprecision the very small eigenvalues can
# be negative. Make these positive.
evals[item][i] = -evals[item][i]
if evecs[item][i,i] < 0:
# We demand a convention that all diagonal terms in the matrix
# of eigenvalues are positive.
# This is done to help visualization of the spaces (increasing
# mchirp always goes the same way)
evecs[item][:,i] = - evecs[item][:,i]
metricParams.evals = evals
metricParams.evecs = evecs
metricParams.metric = metric
metricParams.time_unprojected_metric = unmax_metric
return metricParams
def get_moments(metricParams, vary_fmax=False, vary_density=None):
"""
This function will calculate the various integrals (moments) that are
needed to compute the metric used in template bank placement and
coincidence.
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
None : None
**THIS FUNCTION RETURNS NOTHING**
The following will be **added** to the metricParams structure
metricParams.moments : Moments structure
This contains the result of all the integrals used in computing the
metrics above. It can be used for the ethinca components calculation,
or other similar calculations. This is composed of two compound
dictionaries. The first entry indicates which moment is being
calculated and the second entry indicates the upper frequency cutoff
that was used.
In all cases x = f/f0.
For the first entries the options are:
moments['J%d' %(i)][f_cutoff]
This stores the integral of
x**((-i)/3.) * delta X / PSD(x)
moments['log%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x)
The second entry stores the frequency cutoff used when computing
the integral. See description of the vary_fmax option above.
All of these values are nomralized by a factor of
x**((-7)/3.) * delta X / PSD(x)
The normalization factor can be obtained in
moments['I7'][f_cutoff]
"""
# NOTE: Unless the TaylorR2F4 metric is used the log^3 and log^4 terms are
# not needed. As this calculation is not too slow compared to bank
# placement we just do this anyway.
psd_amp = metricParams.psd.data
psd_f = numpy.arange(len(psd_amp), dtype=float) * metricParams.deltaF
new_f, new_amp = interpolate_psd(psd_f, psd_amp, metricParams.deltaF)
# Need I7 first as this is the normalization factor
funct = lambda x,f0: 1
I7 = calculate_moment(new_f, new_amp, metricParams.fLow, \
metricParams.fUpper, metricParams.f0, funct,\
vary_fmax=vary_fmax, vary_density=vary_density)
# Do all the J moments
moments = {}
moments['I7'] = I7
for i in range(-7,18):
funct = lambda x,f0: x**((-i+7)/3.)
moments['J%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the logx multiplied by some power terms
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.))) * x**((-i+7)/3.)
moments['log%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the loglog term
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**2 * x**((-i+7)/3.)
moments['loglog%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the logloglog term
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**3 * x**((-i+7)/3.)
moments['logloglog%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
# Do the logloglog term
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**4 * x**((-i+7)/3.)
moments['loglogloglog%d' %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
metricParams.moments = moments
def interpolate_psd(psd_f, psd_amp, deltaF):
"""
Function to interpolate a PSD to a different value of deltaF. Uses linear
interpolation.
Parameters
----------
psd_f : numpy.array or list or similar
List of the frequencies contained within the PSD.
psd_amp : numpy.array or list or similar
List of the PSD values at the frequencies in psd_f.
deltaF : float
Value of deltaF to interpolate the PSD to.
Returns
--------
new_psd_f : numpy.array
Array of the frequencies contained within the interpolated PSD
new_psd_amp : numpy.array
Array of the interpolated PSD values at the frequencies in new_psd_f.
"""
# In some cases this will be a no-op. I thought about removing this, but
# this function can take unequally sampled PSDs and it is difficult to
# check for this. As this function runs quickly anyway (compared to the
# moment calculation) I decided to always interpolate.
new_psd_f = []
new_psd_amp = []
fcurr = psd_f[0]
for i in range(len(psd_f) - 1):
f_low = psd_f[i]
f_high = psd_f[i+1]
amp_low = psd_amp[i]
amp_high = psd_amp[i+1]
while(1):
if fcurr > f_high:
break
new_psd_f.append(fcurr)
gradient = (amp_high - amp_low) / (f_high - f_low)
fDiff = fcurr - f_low
new_psd_amp.append(amp_low + fDiff * gradient)
fcurr = fcurr + deltaF
return numpy.asarray(new_psd_f), numpy.asarray(new_psd_amp)
def calculate_moment(psd_f, psd_amp, fmin, fmax, f0, funct,
norm=None, vary_fmax=False, vary_density=None):
"""
Function for calculating one of the integrals used to construct a template
bank placement metric. The integral calculated will be
\int funct(x) * (psd_x)**(-7./3.) * delta_x / PSD(x)
where x = f / f0. The lower frequency cutoff is given by fmin, see
the parameters below for details on how the upper frequency cutoff is
chosen
Parameters
-----------
psd_f : numpy.array
numpy array holding the set of evenly spaced frequencies used in the PSD
psd_amp : numpy.array
numpy array holding the PSD values corresponding to the psd_f
frequencies
fmin : float
The lower frequency cutoff used in the calculation of the integrals
used to obtain the metric.
fmax : float
The upper frequency cutoff used in the calculation of the integrals
used to obtain the metric. This can be varied (see the vary_fmax
option below).
f0 : float
This is an arbitrary scaling factor introduced to avoid the potential
for numerical overflow when calculating this. Generally the default
value (70) is safe here. **IMPORTANT, if you want to calculate the
ethinca metric components later this MUST be set equal to f_low.**
funct : Lambda function
The function to use when computing the integral as described above.
norm : Dictionary of floats
If given then moment[f_cutoff] will be divided by norm[f_cutoff]
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
moment : Dictionary of floats
moment[f_cutoff] will store the value of the moment at the frequency
cutoff given by f_cutoff.
"""
# Must ensure deltaF in psd_f is constant
psd_x = psd_f / f0
deltax = psd_x[1] - psd_x[0]
mask = numpy.logical_and(psd_f > fmin, psd_f < fmax)
psdf_red = psd_f[mask]
comps_red = psd_x[mask] ** (-7./3.) * funct(psd_x[mask], f0) * deltax / \
psd_amp[mask]
moment = {}
moment[fmax] = comps_red.sum()
if norm:
moment[fmax] = moment[fmax] / norm[fmax]
if vary_fmax:
for t_fmax in numpy.arange(fmin + vary_density, fmax, vary_density):
moment[t_fmax] = comps_red[psdf_red < t_fmax].sum()
if norm:
moment[t_fmax] = moment[t_fmax] / norm[t_fmax]
return moment
def calculate_metric(Js, logJs, loglogJs, logloglogJs, loglogloglogJs, \
mapping):
"""
This function will take the various integrals calculated by get_moments and
convert this into a metric for the appropriate parameter space.
Parameters
-----------
Js : Dictionary
The list of (log^0 x) * x**(-i/3) integrals computed by get_moments()
The index is Js[i]
logJs : Dictionary
The list of (log^1 x) * x**(-i/3) integrals computed by get_moments()
The index is logJs[i]
loglogJs : Dictionary
The list of (log^2 x) * x**(-i/3) integrals computed by get_moments()
The index is loglogJs[i]
logloglogJs : Dictionary
The list of (log^3 x) * x**(-i/3) integrals computed by get_moments()
The index is logloglogJs[i]
loglogloglogJs : Dictionary
The list of (log^4 x) * x**(-i/3) integrals computed by get_moments()
The index is loglogloglogJs[i]
mapping : dictionary
Used to identify which Lambda components are active in this parameter
space and map these to entries in the metric matrix.
Returns
--------
metric : numpy.matrix
The resulting metric.
"""
# How many dimensions in the parameter space?
maxLen = len(mapping.keys())
metric = numpy.zeros(shape=(maxLen,maxLen), dtype=float)
unmax_metric = numpy.zeros(shape=(maxLen+1,maxLen+1), dtype=float)
for i in range(16):
for j in range(16):
calculate_metric_comp(metric, unmax_metric, i, j, Js,
logJs, loglogJs, logloglogJs,
loglogloglogJs, mapping)
return metric, unmax_metric
def calculate_metric_comp(gs, unmax_metric, i, j, Js, logJs, loglogJs,
logloglogJs, loglogloglogJs, mapping):
"""
Used to compute part of the metric. Only call this from within
calculate_metric(). Please see the documentation for that function.
"""
# Time term in unmax_metric. Note that these terms are recomputed a bunch
# of time, but this cost is insignificant compared to computing the moments
unmax_metric[-1,-1] = (Js[1] - Js[4]*Js[4])
# Normal terms
if 'Lambda%d'%i in mapping and 'Lambda%d'%j in mapping:
gammaij = Js[17-i-j] - Js[12-i]*Js[12-j]
gamma0i = (Js[9-i] - Js[4]*Js[12-i])
gamma0j = (Js[9-j] - Js[4] * Js[12-j])
gs[mapping['Lambda%d'%i],mapping['Lambda%d'%j]] = \
0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4]))
unmax_metric[mapping['Lambda%d'%i], -1] = gamma0i
unmax_metric[-1, mapping['Lambda%d'%j]] = gamma0j
unmax_metric[mapping['Lambda%d'%i],mapping['Lambda%d'%j]] = gammaij
# Normal,log cross terms
if 'Lambda%d'%i in mapping and 'LogLambda%d'%j in mapping:
gammaij = logJs[17-i-j] - logJs[12-j] * Js[12-i]
gamma0i = (Js[9-i] - Js[4] * Js[12-i])
gamma0j = logJs[9-j] - logJs[12-j] * Js[4]
gs[mapping['Lambda%d'%i],mapping['LogLambda%d'%j]] = \
gs[mapping['LogLambda%d'%j],mapping['Lambda%d'%i]] = \
0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4]))
unmax_metric[mapping['Lambda%d'%i], -1] = gamma0i
unmax_metric[-1, mapping['Lambda%d'%i]] = gamma0i
unmax_metric[-1, mapping['LogLambda%d'%j]] = gamma0j
unmax_metric[mapping['LogLambda%d'%j], -1] = gamma0j
unmax_metric[mapping['Lambda%d'%i],mapping['LogLambda%d'%j]] = gammaij
unmax_metric[mapping['LogLambda%d'%j],mapping['Lambda%d'%i]] = gammaij
# Log,log terms
if 'LogLambda%d'%i in mapping and 'LogLambda%d'%j in mapping:
gammaij = loglogJs[17-i-j] - logJs[12-j] * logJs[12-i]
gamma0i = (logJs[9-i] - Js[4] * logJs[12-i])
gamma0j = logJs[9-j] - logJs[12-j] * Js[4]
gs[mapping['LogLambda%d'%i],mapping['LogLambda%d'%j]] = \
0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4]))
unmax_metric[mapping['LogLambda%d'%i], -1] = gamma0i
unmax_metric[-1, mapping['LogLambda%d'%j]] = gamma0j
unmax_metric[mapping['LogLambda%d'%i],mapping['LogLambda%d'%j]] =\
gammaij
# Normal,loglog cross terms
if 'Lambda%d'%i in mapping and 'LogLogLambda%d'%j in mapping:
gammaij = loglogJs[17-i-j] - loglogJs[12-j] * Js[12-i]
gamma0i = (Js[9-i] - Js[4] * Js[12-i])
gamma0j = loglogJs[9-j] - loglogJs[12-j] * Js[4]
gs[mapping['Lambda%d'%i],mapping['LogLogLambda%d'%j]] = \
gs[mapping['LogLogLambda%d'%j],mapping['Lambda%d'%i]] = \
0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4]))
unmax_metric[mapping['Lambda%d'%i], -1] = gamma0i
unmax_metric[-1, mapping['Lambda%d'%i]] = gamma0i
unmax_metric[-1, mapping['LogLogLambda%d'%j]] = gamma0j
unmax_metric[mapping['LogLogLambda%d'%j], -1] = gamma0j
unmax_metric[mapping['Lambda%d'%i],mapping['LogLogLambda%d'%j]] = \
gammaij
unmax_metric[mapping['LogLogLambda%d'%j],mapping['Lambda%d'%i]] = \
gammaij
# log,loglog cross terms
if 'LogLambda%d'%i in mapping and 'LogLogLambda%d'%j in mapping:
gammaij = logloglogJs[17-i-j] - loglogJs[12-j] * logJs[12-i]
gamma0i = (logJs[9-i] - Js[4] * logJs[12-i])
gamma0j = loglogJs[9-j] - loglogJs[12-j] * Js[4]
gs[mapping['LogLambda%d'%i],mapping['LogLogLambda%d'%j]] = \
gs[mapping['LogLogLambda%d'%j],mapping['LogLambda%d'%i]] = \
0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4]))
unmax_metric[mapping['LogLambda%d'%i], -1] = gamma0i
unmax_metric[-1, mapping['LogLambda%d'%i]] = gamma0i
unmax_metric[-1, mapping['LogLogLambda%d'%j]] = gamma0j
unmax_metric[mapping['LogLogLambda%d'%j], -1] = gamma0j
unmax_metric[mapping['LogLambda%d'%i],mapping['LogLogLambda%d'%j]] = \
gammaij
unmax_metric[mapping['LogLogLambda%d'%j],mapping['LogLambda%d'%i]] = \
gammaij
# Loglog,loglog terms
if 'LogLogLambda%d'%i in mapping and 'LogLogLambda%d'%j in mapping:
gammaij = loglogloglogJs[17-i-j] - loglogJs[12-j] * loglogJs[12-i]
gamma0i = (loglogJs[9-i] - Js[4] * loglogJs[12-i])
gamma0j = loglogJs[9-j] - loglogJs[12-j] * Js[4]
gs[mapping['LogLogLambda%d'%i],mapping['LogLogLambda%d'%j]] = \
0.5 * (gammaij - gamma0i*gamma0j/(Js[1] - Js[4]*Js[4]))
unmax_metric[mapping['LogLogLambda%d'%i], -1] = gamma0i
unmax_metric[-1, mapping['LogLogLambda%d'%j]] = gamma0j
unmax_metric[mapping['LogLogLambda%d'%i],mapping['LogLogLambda%d'%j]] =\
gammaij
| 23,569
| 43.056075
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/brute_force_methods.py
|
import numpy
from pycbc.tmpltbank.coord_utils import get_cov_params
def get_physical_covaried_masses(xis, bestMasses, bestXis, req_match,
massRangeParams, metricParams, fUpper,
giveUpThresh = 5000):
"""
This function takes the position of a point in the xi parameter space and
iteratively finds a close point in the physical coordinate space (masses
and spins).
Parameters
-----------
xis : list or array
Desired position of the point in the xi space. If only N values are
provided and the xi space's dimension is larger then it is assumed that
*any* value in the remaining xi coordinates is acceptable.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the desired point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
req_match : float
Desired maximum mismatch between xis and the obtained point. If a point
is found with mismatch < req_match immediately stop and return that
point. A point with this mismatch will not always be found.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
giveUpThresh : int, optional (default = 5000)
The program will try this many iterations. If no close matching point
has been found after this it will give up.
Returns
--------
mass1 : float
The heavier mass of the obtained point.
mass2 : float
The smaller mass of the obtained point
spin1z : float
The heavier bodies spin of the obtained point.
spin2z : float
The smaller bodies spin of the obtained point.
count : int
How many iterations it took to find the point. For debugging.
mismatch : float
The mismatch between the obtained point and the input xis.
new_xis : list
The position of the point in the xi space
"""
# TUNABLE PARAMETERS GO HERE!
# This states how far apart to scatter test points in the first proposal
origScaleFactor = 1
# Set up
xi_size = len(xis)
scaleFactor = origScaleFactor
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
count = 0
unFixedCount = 0
currDist = 100000000000000000
while(1):
# If we are a long way away we use larger jumps
if count:
if currDist > 1 and scaleFactor == origScaleFactor:
scaleFactor = origScaleFactor*10
# Get a set of test points with mass -> xi mappings
totmass, eta, spin1z, spin2z, mass1, mass2, new_xis = \
get_mass_distribution([bestChirpmass, bestMasses[1], bestMasses[2],
bestMasses[3]],
scaleFactor, massRangeParams, metricParams,
fUpper)
cDist = (new_xis[0] - xis[0])**2
for j in range(1,xi_size):
cDist += (new_xis[j] - xis[j])**2
if (cDist.min() < req_match):
idx = cDist.argmin()
scaleFactor = origScaleFactor
new_xis_list = [new_xis[ldx][idx] for ldx in range(len(new_xis))]
return mass1[idx], mass2[idx], spin1z[idx], spin2z[idx], count, \
cDist.min(), new_xis_list
if (cDist.min() < currDist):
idx = cDist.argmin()
bestMasses[0] = totmass[idx]
bestMasses[1] = eta[idx]
bestMasses[2] = spin1z[idx]
bestMasses[3] = spin2z[idx]
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
currDist = cDist.min()
unFixedCount = 0
scaleFactor = origScaleFactor
count += 1
unFixedCount += 1
if unFixedCount > giveUpThresh:
# Stop at this point
diff = (bestMasses[0]*bestMasses[0] * (1-4*bestMasses[1]))**0.5
mass1 = (bestMasses[0] + diff)/2.
mass2 = (bestMasses[0] - diff)/2.
new_xis_list = [new_xis[ldx][0] for ldx in range(len(new_xis))]
return mass1, mass2, bestMasses[2], bestMasses[3], count, \
currDist, new_xis_list
if not unFixedCount % 100:
scaleFactor *= 2
if scaleFactor > 64:
scaleFactor = 1
# Shouldn't be here!
raise RuntimeError
def get_mass_distribution(bestMasses, scaleFactor, massRangeParams,
metricParams, fUpper,
numJumpPoints=100, chirpMassJumpFac=0.0001,
etaJumpFac=0.01, spin1zJumpFac=0.01,
spin2zJumpFac=0.01):
"""
Given a set of masses, this function will create a set of points nearby
in the mass space and map these to the xi space.
Parameters
-----------
bestMasses : list
Contains [ChirpMass, eta, spin1z, spin2z]. Points will be placed around
tjos
scaleFactor : float
This parameter describes the radius away from bestMasses that points
will be placed in.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
numJumpPoints : int, optional (default = 100)
The number of points that will be generated every iteration
chirpMassJumpFac : float, optional (default=0.0001)
The jump points will be chosen with fractional variation in chirpMass
up to this multiplied by scaleFactor.
etaJumpFac : float, optional (default=0.01)
The jump points will be chosen with fractional variation in eta
up to this multiplied by scaleFactor.
spin1zJumpFac : float, optional (default=0.01)
The jump points will be chosen with absolute variation in spin1z up to
this multiplied by scaleFactor.
spin2zJumpFac : float, optional (default=0.01)
The jump points will be chosen with absolute variation in spin2z up to
this multiplied by scaleFactor.
Returns
--------
Totmass : numpy.array
Total mass of the resulting points
Eta : numpy.array
Symmetric mass ratio of the resulting points
Spin1z : numpy.array
Spin of the heavier body of the resulting points
Spin2z : numpy.array
Spin of the smaller body of the resulting points
Diff : numpy.array
Mass1 - Mass2 of the resulting points
Mass1 : numpy.array
Mass1 (mass of heavier body) of the resulting points
Mass2 : numpy.array
Mass2 (mass of smaller body) of the resulting points
new_xis : list of numpy.array
Position of points in the xi coordinates
"""
# FIXME: It would be better if rejected values could be drawn from the
# full possible mass/spin distribution. However speed in this function is
# a major factor and must be considered.
bestChirpmass = bestMasses[0]
bestEta = bestMasses[1]
bestSpin1z = bestMasses[2]
bestSpin2z = bestMasses[3]
# Firstly choose a set of values for masses and spins
chirpmass = bestChirpmass * (1 - (numpy.random.random(numJumpPoints)-0.5) \
* chirpMassJumpFac * scaleFactor )
etaRange = massRangeParams.maxEta - massRangeParams.minEta
currJumpFac = etaJumpFac * scaleFactor
if currJumpFac > etaRange:
currJumpFac = etaRange
eta = bestEta * ( 1 - (numpy.random.random(numJumpPoints) - 0.5) \
* currJumpFac)
maxSpinMag = max(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag)
minSpinMag = min(massRangeParams.maxNSSpinMag, massRangeParams.maxBHSpinMag)
# Note that these two are cranged by spinxzFac, *not* spinxzFac/spinxz
currJumpFac = spin1zJumpFac * scaleFactor
if currJumpFac > maxSpinMag:
currJumpFac = maxSpinMag
# Actually set the new spin trial points
if massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag):
curr_spin_1z_jump_fac = currJumpFac
curr_spin_2z_jump_fac = currJumpFac
# Check spins aren't going to be unphysical
if currJumpFac > massRangeParams.maxBHSpinMag:
curr_spin_1z_jump_fac = massRangeParams.maxBHSpinMag
if currJumpFac > massRangeParams.maxNSSpinMag:
curr_spin_2z_jump_fac = massRangeParams.maxNSSpinMag
spin1z = bestSpin1z + ( (numpy.random.random(numJumpPoints) - 0.5) \
* curr_spin_1z_jump_fac)
spin2z = bestSpin2z + ( (numpy.random.random(numJumpPoints) - 0.5) \
* curr_spin_2z_jump_fac)
else:
# If maxNSSpinMag is very low (0) and maxBHSpinMag is high we can
# find it hard to place any points. So mix these when
# masses are swapping between the NS and BH.
curr_spin_bh_jump_fac = currJumpFac
curr_spin_ns_jump_fac = currJumpFac
# Check spins aren't going to be unphysical
if currJumpFac > massRangeParams.maxBHSpinMag:
curr_spin_bh_jump_fac = massRangeParams.maxBHSpinMag
if currJumpFac > massRangeParams.maxNSSpinMag:
curr_spin_ns_jump_fac = massRangeParams.maxNSSpinMag
spin1z = numpy.zeros(numJumpPoints, dtype=float)
spin2z = numpy.zeros(numJumpPoints, dtype=float)
split_point = int(numJumpPoints/2)
# So set the first half to be at least within the BH range and the
# second half to be at least within the NS range
spin1z[:split_point] = bestSpin1z + \
( (numpy.random.random(split_point) - 0.5)\
* curr_spin_bh_jump_fac)
spin1z[split_point:] = bestSpin1z + \
( (numpy.random.random(numJumpPoints-split_point) - 0.5)\
* curr_spin_ns_jump_fac)
spin2z[:split_point] = bestSpin2z + \
( (numpy.random.random(split_point) - 0.5)\
* curr_spin_bh_jump_fac)
spin2z[split_point:] = bestSpin2z + \
( (numpy.random.random(numJumpPoints-split_point) - 0.5)\
* curr_spin_ns_jump_fac)
# Point[0] is always set to the original point
chirpmass[0] = bestChirpmass
eta[0] = bestEta
spin1z[0] = bestSpin1z
spin2z[0] = bestSpin2z
# Remove points where eta becomes unphysical
eta[eta > massRangeParams.maxEta] = massRangeParams.maxEta
if massRangeParams.minEta:
eta[eta < massRangeParams.minEta] = massRangeParams.minEta
else:
eta[eta < 0.0001] = 0.0001
# Total mass, masses and mass diff
totmass = chirpmass / (eta**(3./5.))
diff = (totmass*totmass * (1-4*eta))**0.5
mass1 = (totmass + diff)/2.
mass2 = (totmass - diff)/2.
# Check the validity of the spin values
# Do the first spin
if maxSpinMag == 0:
# Shortcut if non-spinning
pass
elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag):
# Simple case where I don't have to worry about correlation with mass
numploga = abs(spin1z) > massRangeParams.maxBHSpinMag
spin1z[numploga] = 0
else:
# Do have to consider masses
boundary_mass = massRangeParams.ns_bh_boundary_mass
numploga1 = numpy.logical_and(mass1 >= boundary_mass,
abs(spin1z) <= massRangeParams.maxBHSpinMag)
numploga2 = numpy.logical_and(mass1 < boundary_mass,
abs(spin1z) <= massRangeParams.maxNSSpinMag)
numploga = numpy.logical_or(numploga1, numploga2)
numploga = numpy.logical_not(numploga)
spin1z[numploga] = 0
# Same for the second spin
if maxSpinMag == 0:
# Shortcut if non-spinning
pass
elif massRangeParams.nsbhFlag or (maxSpinMag == minSpinMag):
numplogb = abs(spin2z) > massRangeParams.maxNSSpinMag
spin2z[numplogb] = 0
else:
# Do have to consider masses
boundary_mass = massRangeParams.ns_bh_boundary_mass
numplogb1 = numpy.logical_and(mass2 >= boundary_mass,
abs(spin2z) <= massRangeParams.maxBHSpinMag)
numplogb2 = numpy.logical_and(mass2 < boundary_mass,
abs(spin2z) <= massRangeParams.maxNSSpinMag)
numplogb = numpy.logical_or(numplogb1, numplogb2)
numplogb = numpy.logical_not(numplogb)
spin2z[numplogb] = 0
if (maxSpinMag) and (numploga[0] or numplogb[0]):
raise ValueError("Cannot remove the guide point!")
# And remove points where the individual masses are outside of the physical
# range. Or the total masses are.
# These "removed" points will have metric distances that will be much, much
# larger than any thresholds used in the functions in brute_force_utils.py
# and will always be rejected. An unphysical value cannot be used as it
# would result in unphysical metric distances and cause failures.
totmass[mass1 < massRangeParams.minMass1*0.9999] = 0.0001
totmass[mass1 > massRangeParams.maxMass1*1.0001] = 0.0001
totmass[mass2 < massRangeParams.minMass2*0.9999] = 0.0001
totmass[mass2 > massRangeParams.maxMass2*1.0001] = 0.0001
# There is some numerical error which can push this a bit higher. We do
# *not* want to reject the initial guide point. This error comes from
# Masses -> totmass, eta -> masses conversion, we will have points pushing
# onto the boudaries of the space.
totmass[totmass > massRangeParams.maxTotMass*1.0001] = 0.0001
totmass[totmass < massRangeParams.minTotMass*0.9999] = 0.0001
if massRangeParams.max_chirp_mass:
totmass[chirpmass > massRangeParams.max_chirp_mass*1.0001] = 0.0001
if massRangeParams.min_chirp_mass:
totmass[chirpmass < massRangeParams.min_chirp_mass*0.9999] = 0.0001
if totmass[0] < 0.00011:
raise ValueError("Cannot remove the guide point!")
mass1[totmass < 0.00011] = 0.0001
mass2[totmass < 0.00011] = 0.0001
# Then map to xis
new_xis = get_cov_params(mass1, mass2, spin1z, spin2z,
metricParams, fUpper)
return totmass, eta, spin1z, spin2z, mass1, mass2, new_xis
def stack_xi_direction_brute(xis, bestMasses, bestXis, direction_num,
req_match, massRangeParams, metricParams, fUpper,
scaleFactor=0.8, numIterations=3000):
"""
This function is used to assess the depth of the xi_space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find maxima and minima.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_min : float
The minimal value of the specified dimension at the specified point in
parameter space.
xi_max : float
The maximal value of the specified dimension at the specified point in
parameter space.
"""
# Find minimum
ximin = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=True, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
# Find maximum
ximax = find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, \
req_match, massRangeParams, metricParams, \
fUpper, find_minimum=False, \
scaleFactor=scaleFactor, \
numIterations=numIterations)
return ximin, ximax
def find_xi_extrema_brute(xis, bestMasses, bestXis, direction_num, req_match, \
massRangeParams, metricParams, fUpper, \
find_minimum=False, scaleFactor=0.8, \
numIterations=3000):
"""
This function is used to find the largest or smallest value of the xi
space in a specified
dimension at a specified point in the higher dimensions. It does this by
iteratively throwing points at the space to find extrema.
Parameters
-----------
xis : list or array
Position in the xi space at which to assess the depth. This can be only
a subset of the higher dimensions than that being sampled.
bestMasses : list
Contains [totalMass, eta, spin1z, spin2z]. Is a physical position
mapped to xi coordinates in bestXis that is close to the xis point.
This is aimed to give the code a starting point.
bestXis : list
Contains the position of bestMasses in the xi coordinate system.
direction_num : int
The dimension that you want to assess the depth of (0 = 1, 1 = 2 ...)
req_match : float
When considering points to assess the depth with, only consider points
with a mismatch that is smaller than this with xis.
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper that was used when obtaining the xi_i
coordinates. This lets us know how to rotate potential physical points
into the correct xi_i space. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
find_minimum : boolean, optional (default = False)
If True, find the minimum value of the xi direction. If False find the
maximum value.
scaleFactor : float, optional (default = 0.8)
The value of the scale factor that is used when calling
pycbc.tmpltbank.get_mass_distribution.
numIterations : int, optional (default = 3000)
The number of times to make calls to get_mass_distribution when
assessing the maximum/minimum of this parameter space. Making this
smaller makes the code faster, but at the cost of accuracy.
Returns
--------
xi_extent : float
The extremal value of the specified dimension at the specified point in
parameter space.
"""
# Setup
xi_size = len(xis)
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
if find_minimum:
xiextrema = 10000000000
else:
xiextrema = -100000000000
for _ in range(numIterations):
# Evaluate extrema of the xi direction specified
totmass, eta, spin1z, spin2z, _, _, new_xis = \
get_mass_distribution([bestChirpmass,bestMasses[1],bestMasses[2],
bestMasses[3]],
scaleFactor, massRangeParams, metricParams,
fUpper)
cDist = (new_xis[0] - xis[0])**2
for j in range(1, xi_size):
cDist += (new_xis[j] - xis[j])**2
redCDist = cDist[cDist < req_match]
if len(redCDist):
if not find_minimum:
new_xis[direction_num][cDist > req_match] = -10000000
currXiExtrema = (new_xis[direction_num]).max()
idx = (new_xis[direction_num]).argmax()
else:
new_xis[direction_num][cDist > req_match] = 10000000
currXiExtrema = (new_xis[direction_num]).min()
idx = (new_xis[direction_num]).argmin()
if ( ((not find_minimum) and (currXiExtrema > xiextrema)) or \
(find_minimum and (currXiExtrema < xiextrema)) ):
xiextrema = currXiExtrema
bestMasses[0] = totmass[idx]
bestMasses[1] = eta[idx]
bestMasses[2] = spin1z[idx]
bestMasses[3] = spin2z[idx]
bestChirpmass = bestMasses[0] * (bestMasses[1])**(3./5.)
return xiextrema
| 23,515
| 44.750973
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/__init__.py
|
from pycbc.tmpltbank.calc_moments import *
from pycbc.tmpltbank.lambda_mapping import *
from pycbc.tmpltbank.coord_utils import *
from pycbc.tmpltbank.lattice_utils import *
from pycbc.tmpltbank.brute_force_methods import *
from pycbc.tmpltbank.option_utils import *
from pycbc.tmpltbank.partitioned_bank import *
from pycbc.tmpltbank.bank_conversions import *
| 361
| 39.222222
| 49
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/option_utils.py
|
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import logging
import textwrap
import numpy
import os
from pycbc.tmpltbank.lambda_mapping import get_ethinca_orders, pycbcValidOrdersHelpDescriptions
from pycbc import pnutils
from pycbc.neutron_stars import load_ns_sequence
from pycbc.types import positive_float, nonnegative_float
class IndentedHelpFormatterWithNL(argparse.ArgumentDefaultsHelpFormatter):
"""
This class taken from
https://groups.google.com/forum/#!topic/comp.lang.python/bfbmtUGhW8I
and is used to format the argparse help messages to deal with line breaking
nicer. Specfically the pn-order help is large and looks crappy without this.
This function is (C) Tim Chase
"""
def format_description(self, description):
"""
No documentation
"""
if not description: return ""
desc_width = self.width - self.current_indent
indent = " "*self.current_indent
# the above is still the same
bits = description.split('\n')
formatted_bits = [
textwrap.fill(bit,
desc_width,
initial_indent=indent,
subsequent_indent=indent)
for bit in bits]
result = "\n".join(formatted_bits) + "\n"
return result
def format_option(self, option):
"""
No documentation
"""
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
# Everything is the same up through here
help_lines = []
for para in help_text.split("\n"):
help_lines.extend(textwrap.wrap(para, self.help_width))
# Everything is the same after here
result.append("%*s%s\n" % (
indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def get_options_from_group(option_group):
"""
Take an option group and return all the options that are defined in that
group.
"""
option_list = option_group._group_actions
command_lines = []
for option in option_list:
option_strings = option.option_strings
for string in option_strings:
if string.startswith('--'):
command_lines.append(string)
return command_lines
def insert_base_bank_options(parser, match_req=True):
"""
Adds essential common options for template bank generation to an
ArgumentParser instance.
"""
def match_type(s):
err_msg = "must be a number between 0 and 1 excluded, not %r" % s
try:
value = float(s)
except ValueError:
raise argparse.ArgumentTypeError(err_msg)
if value <= 0 or value >= 1:
raise argparse.ArgumentTypeError(err_msg)
return value
parser.add_argument(
'-m', '--min-match', type=match_type, required=match_req,
help="Generate bank with specified minimum match. Required.")
parser.add_argument(
'-O', '--output-file', required=True,
help="Output file name. Required.")
parser.add_argument('--f-low-column', type=str, metavar='NAME',
help='If given, store the lower frequency cutoff into '
'column NAME of the single-inspiral table. '
'(Requires an output file ending in .xml)')
parser.add_argument('--output-f-final', action='store_true',
help="Include 'f_final' in the output hdf file.")
def insert_metric_calculation_options(parser):
"""
Adds the options used to obtain a metric in the bank generation codes to an
argparser as an OptionGroup. This should be used if you want to use these
options in your code.
"""
metricOpts = parser.add_argument_group(
"Options related to calculating the parameter space metric")
metricOpts.add_argument("--pn-order", action="store", type=str,
required=True,
help="Determines the PN order to use. For a bank of "
"non-spinning templates, spin-related terms in the "
"metric will be zero. REQUIRED. "
"Choices: %s" %(pycbcValidOrdersHelpDescriptions))
metricOpts.add_argument("--f0", action="store", type=positive_float,
default=70.,\
help="f0 is used as a dynamic scaling factor when "
"calculating integrals used in metric construction. "
"I.e. instead of integrating F(f) we integrate F(f/f0) "
"then rescale by powers of f0. The default value 70Hz "
"should be fine for most applications. OPTIONAL. "
"UNITS=Hz. **WARNING: If the ethinca metric is to be "
"calculated, f0 must be set equal to f-low**")
metricOpts.add_argument("--f-low", action="store", type=positive_float,
required=True,
help="Lower frequency cutoff used in computing the "
"parameter space metric. REQUIRED. UNITS=Hz")
metricOpts.add_argument("--f-upper", action="store", type=positive_float,
required=True,
help="Upper frequency cutoff used in computing the "
"parameter space metric. REQUIRED. UNITS=Hz")
metricOpts.add_argument("--delta-f", action="store", type=positive_float,
required=True,
help="Frequency spacing used in computing the parameter "
"space metric: integrals of the form \int F(f) df "
"are approximated as \sum F(f) delta_f. REQUIRED. "
"UNITS=Hz")
metricOpts.add_argument("--write-metric", action="store_true",
default=False, help="If given write the metric components "
"to disk as they are calculated.")
return metricOpts
def verify_metric_calculation_options(opts, parser):
"""
Parses the metric calculation options given and verifies that they are
correct.
Parameters
----------
opts : argparse.Values instance
Result of parsing the input options with OptionParser
parser : object
The OptionParser instance.
"""
if not opts.pn_order:
parser.error("Must supply --pn-order")
class metricParameters(object):
"""
This class holds all of the options that are parsed in the function
insert_metric_calculation_options
and all products produced using these options. It can also be initialized
from the __init__ function, providing directly the options normally
provided on the command line.
"""
_psd = None
_metric = None
_evals = None
_evecs = None
_evecsCV = None
def __init__(self, pnOrder, fLow, fUpper, deltaF, f0=70,
write_metric=False):
"""
Initialize an instance of the metricParameters by providing all
options directly. See the help message associated with any code
that uses the metric options for more details of how to set each of
these, e.g. pycbc_aligned_stoch_bank --help
"""
self.pnOrder=pnOrder
self.fLow=fLow
self.fUpper=fUpper
self.deltaF=deltaF
self.f0=f0
self._moments=None
self.write_metric=write_metric
@classmethod
def from_argparse(cls, opts):
"""
Initialize an instance of the metricParameters class from an
argparse.OptionParser instance. This assumes that
insert_metric_calculation_options
and
verify_metric_calculation_options
have already been called before initializing the class.
"""
return cls(opts.pn_order, opts.f_low, opts.f_upper, opts.delta_f,\
f0=opts.f0, write_metric=opts.write_metric)
@property
def psd(self):
"""
A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation.
"""
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd
@psd.setter
def psd(self, inPsd):
self._psd = inPsd
@property
def moments(self):
"""
Moments structure
This contains the result of all the integrals used in computing the
metrics above. It can be used for the ethinca components calculation,
or other similar calculations. This is composed of two compound
dictionaries. The first entry indicates which moment is being
calculated and the second entry indicates the upper frequency cutoff
that was used.
In all cases x = f/f0.
For the first entries the options are:
moments['J%d' %(i)][f_cutoff]
This stores the integral of
x**((-i)/3.) * delta X / PSD(x)
moments['log%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x)
The second entry stores the frequency cutoff that was used when
computing the integral.
"""
return self._moments
@moments.setter
def moments(self, inMoments):
self._moments=inMoments
@property
def evals(self):
"""
The eigenvalues of the parameter space.
This is a Dictionary of numpy.array
Each entry in the dictionary corresponds to the different frequency
ranges described in vary_fmax. If vary_fmax = False, the only entry
will be f_upper, this corresponds to integrals in [f_low,f_upper). This
entry is always present. Each other entry will use floats as keys to
the dictionary. These floats give the upper frequency cutoff when it is
varying.
Each numpy.array contains the eigenvalues which, with the eigenvectors
in evecs, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
"""
if self._evals is None:
errMsg = "The metric eigenvalues have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evals
@evals.setter
def evals(self, inEvals):
if self.write_metric:
for frequency in inEvals.keys():
numpy.savetxt("metric_evals_%d.dat" %(frequency),
inEvals[frequency])
self._evals = inEvals
@property
def evecs(self):
"""
The eigenvectors of the parameter space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
"""
if self._evecs is None:
errMsg = "The metric eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evecs
@evecs.setter
def evecs(self, inEvecs):
if self.write_metric:
for frequency in inEvecs.keys():
numpy.savetxt("metric_evecs_%d.dat" %(frequency),
inEvecs[frequency])
self._evecs = inEvecs
@property
def metric(self):
"""
The metric of the parameter space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the metric of the parameter space in the
Lambda_i coordinate system.
"""
if self._metric is None:
errMsg = "The metric eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._metric
@metric.setter
def metric(self, inMetric):
if self.write_metric:
for frequency in inMetric.keys():
numpy.savetxt("metric_components_%d.dat" %(frequency),
inMetric[frequency])
self._metric = inMetric
@property
def time_unprojected_metric(self):
"""
The metric of the parameter space with the time dimension unprojected.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the metric of the parameter space in the
Lambda_i, t coordinate system. The time components are always in the
last [-1] position in the matrix.
"""
if self._time_unprojected_metric is None:
err_msg = "The time unprojected metric has not been set in the "
err_msg += "metricParameters instance."
raise ValueError(err_msg)
return self._time_unprojected_metric
@time_unprojected_metric.setter
def time_unprojected_metric(self, inMetric):
if self.write_metric:
for frequency in inMetric.keys():
numpy.savetxt("metric_timeunprojected_%d.dat" %(frequency),
inMetric[frequency])
self._time_unprojected_metric = inMetric
@property
def evecsCV(self):
"""
The eigenvectors of the principal directions of the mu space.
This is a Dictionary of numpy.matrix
Each entry in the dictionary is as described under evals.
Each numpy.matrix contains the eigenvectors which, with the eigenvalues
in evals, are needed to rotate the
coordinate system to one in which the metric is the identity matrix.
"""
if self._evecsCV is None:
errMsg = "The covariance eigenvectors have not been set in the "
errMsg += "metricParameters instance."
raise ValueError(errMsg)
return self._evecsCV
@evecsCV.setter
def evecsCV(self, inEvecs):
if self.write_metric:
for frequency in inEvecs.keys():
numpy.savetxt("covariance_evecs_%d.dat" %(frequency),
inEvecs[frequency])
self._evecsCV = inEvecs
def insert_mass_range_option_group(parser,nonSpin=False):
"""
Adds the options used to specify mass ranges in the bank generation codes
to an argparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
nonSpin : boolean, optional (default=False)
If this is provided the spin-related options will not be added.
"""
massOpts = parser.add_argument_group("Options related to mass and spin "
"limits for bank generation")
massOpts.add_argument("--min-mass1", action="store", type=positive_float,
required=True,
help="Minimum mass1: must be >= min-mass2. "
"REQUIRED. UNITS=Solar mass")
massOpts.add_argument("--max-mass1", action="store", type=positive_float,
required=True,
help="Maximum mass1: must be >= max-mass2. "
"REQUIRED. UNITS=Solar mass")
massOpts.add_argument("--min-mass2", action="store", type=positive_float,
required=True,
help="Minimum mass2. REQUIRED. UNITS=Solar mass")
massOpts.add_argument("--max-mass2", action="store", type=positive_float,
required=True,
help="Maximum mass2. REQUIRED. UNITS=Solar mass")
massOpts.add_argument("--max-total-mass", action="store",
type=positive_float, default=None,
help="Maximum total mass. OPTIONAL, if not provided "
"the max total mass is determined by the component "
"masses. UNITS=Solar mass")
massOpts.add_argument("--min-total-mass", action="store",
type=positive_float, default=None,
help="Minimum total mass. OPTIONAL, if not provided the "
"min total mass is determined by the component masses."
" UNITS=Solar mass")
massOpts.add_argument("--max-chirp-mass", action="store",
type=positive_float, default=None,
help="Maximum chirp mass. OPTIONAL, if not provided the "
"max chirp mass is determined by the component masses."
" UNITS=Solar mass")
massOpts.add_argument("--min-chirp-mass", action="store",
type=positive_float, default=None,
help="Minimum total mass. OPTIONAL, if not provided the "
"min chirp mass is determined by the component masses."
" UNITS=Solar mass")
massOpts.add_argument("--max-eta", action="store", type=positive_float,
default=0.25,
help="Maximum symmetric mass ratio. OPTIONAL, no upper bound"
" on eta will be imposed if not provided. "
"UNITS=Solar mass.")
massOpts.add_argument("--min-eta", action="store", type=nonnegative_float,
default=0.,
help="Minimum symmetric mass ratio. OPTIONAL, no lower bound"
" on eta will be imposed if not provided. "
"UNITS=Solar mass.")
massOpts.add_argument("--ns-eos", action="store",
default=None,
help="Select the EOS to be used for the NS when calculating "
"the remnant disk mass. Only 2H is currently supported. "
"OPTIONAL")
massOpts.add_argument("--remnant-mass-threshold", action="store",
type=nonnegative_float, default=None,
help="Setting this filters EM dim NS-BH binaries: if the "
"remnant disk mass does not exceed this value, the NS-BH "
"binary is dropped from the target parameter space. "
"When it is set to None (default value) the EM dim "
"filter is not activated. OPTIONAL")
massOpts.add_argument("--use-eos-max-ns-mass", action="store_true", default=False,
help="Cut the mass range of the smaller object to the maximum "
"mass allowed by EOS. "
"OPTIONAL")
massOpts.add_argument("--delta-bh-spin", action="store",
type=positive_float, default=None,
help="Grid spacing used for the BH spin z component when "
"generating the surface of the minumum minimum symmetric "
"mass ratio as a function of BH spin and NS mass required "
"to produce a remnant disk mass that exceeds the threshold "
"specificed in --remnant-mass-threshold. "
"OPTIONAL (0.1 by default) ")
massOpts.add_argument("--delta-ns-mass", action="store",
type=positive_float, default=None,
help="Grid spacing used for the NS mass when generating the "
"surface of the minumum minimum symmetric mass ratio as "
"a function of BH spin and NS mass required to produce "
"a remnant disk mass that exceeds the thrsehold specified "
"in --remnant-mass-threshold. "
"OPTIONAL (0.1 by default) ")
if nonSpin:
parser.add_argument_group(massOpts)
return massOpts
massOpts.add_argument("--max-ns-spin-mag", action="store",
type=nonnegative_float, default=None,
help="Maximum neutron star spin magnitude. Neutron stars "
"are defined as components lighter than the NS-BH "
"boundary (3 Msun by default). REQUIRED if min-mass2 "
"< ns-bh-boundary-mass")
massOpts.add_argument("--max-bh-spin-mag", action="store",
type=nonnegative_float, default=None,
help="Maximum black hole spin magnitude. Black holes are "
"defined as components at or above the NS-BH boundary "
"(3 Msun by default). REQUIRED if max-mass1 >= "
"ns-bh-boundary-mass")
# Mutually exclusive group prevents both options being set on command line
# If --nsbh-flag is True then spinning bank generation must ignore the
# default value of ns-bh-boundary-mass.
action = massOpts.add_mutually_exclusive_group(required=False)
action.add_argument("--ns-bh-boundary-mass", action='store',
type=positive_float,
help="Mass boundary between neutron stars and black holes. "
"Components below this mass are considered neutron "
"stars and are subject to the neutron star spin limits. "
"Components at/above are subject to the black hole spin "
"limits. OPTIONAL, default=%f. UNITS=Solar mass" \
% massRangeParameters.default_nsbh_boundary_mass)
action.add_argument("--nsbh-flag", action="store_true", default=False,
help="Set this flag if generating a bank that contains only "
"systems with 1 black hole and 1 neutron star. With "
"this flag set the heavier body will always be subject "
"to the black hole spin restriction and the lighter "
"to the neutron star spin restriction, regardless of "
"mass. OPTIONAL. If set, the value of "
"--ns-bh-boundary-mass will be ignored.")
return massOpts
def verify_mass_range_options(opts, parser, nonSpin=False):
"""
Parses the metric calculation options given and verifies that they are
correct.
Parameters
----------
opts : argparse.Values instance
Result of parsing the input options with OptionParser
parser : object
The OptionParser instance.
nonSpin : boolean, optional (default=False)
If this is provided the spin-related options will not be checked.
"""
# Mass1 must be the heavier!
if opts.min_mass1 < opts.min_mass2:
parser.error("min-mass1 cannot be less than min-mass2!")
if opts.max_mass1 < opts.max_mass2:
parser.error("max-mass1 cannot be less than max-mass2!")
# If given are min/max total mass/chirp mass possible?
if opts.min_total_mass \
and (opts.min_total_mass > opts.max_mass1 + opts.max_mass2):
err_msg = "Supplied minimum total mass %f " %(opts.min_total_mass,)
err_msg += "greater than the sum of the two max component masses "
err_msg += " %f and %f." %(opts.max_mass1,opts.max_mass2)
parser.error(err_msg)
if opts.max_total_mass \
and (opts.max_total_mass < opts.min_mass1 + opts.min_mass2):
err_msg = "Supplied maximum total mass %f " %(opts.max_total_mass,)
err_msg += "smaller than the sum of the two min component masses "
err_msg += " %f and %f." %(opts.min_mass1,opts.min_mass2)
parser.error(err_msg)
if opts.max_total_mass and opts.min_total_mass \
and (opts.max_total_mass < opts.min_total_mass):
parser.error("Min total mass must be larger than max total mass.")
# Warn the user that his/her setup is such that EM dim NS-BH binaries
# will not be targeted by the template bank that is being built. Also
# inform him/her about the caveats involved in this.
if hasattr(opts, 'remnant_mass_threshold') \
and opts.remnant_mass_threshold is not None:
logging.info("""You have asked to exclude EM dim NS-BH systems from the
target parameter space. The script will assume that m1 is
the BH and m2 is the NS: make sure that your settings
respect this convention. The script will also treat the
NS as non-spinning: use NS spins in the template bank
at your own risk!""")
if opts.use_eos_max_ns_mass:
logging.info("""You have asked to take into account the maximum NS
mass value for the EOS in use.""")
# Find out if the EM constraint surface data already exists or not
# and inform user whether this will be read from file or generated.
# This is the minumum eta as a function of BH spin and NS mass
# required to produce an EM counterpart
if os.path.isfile('constraint_em_bright.npz'):
logging.info("""The constraint surface for EM bright binaries
will be read in from constraint_em_bright.npz.""")
# Assign min/max total mass from mass1, mass2 if not specified
if (not opts.min_total_mass) or \
((opts.min_mass1 + opts.min_mass2) > opts.min_total_mass):
opts.min_total_mass = opts.min_mass1 + opts.min_mass2
if (not opts.max_total_mass) or \
((opts.max_mass1 + opts.max_mass2) < opts.max_total_mass):
opts.max_total_mass = opts.max_mass1 + opts.max_mass2
# It is vital that min and max total mass be set correctly.
# This is becasue the heavily-used function get_random_mass will place
# points first in total mass (to some power), and then in eta. If the total
# mass limits are not well known ahead of time it will place unphysical
# points and fail.
# This test is a bit convoluted as we identify the maximum and minimum
# possible total mass from chirp mass and/or eta restrictions.
if opts.min_chirp_mass is not None:
# Need to get the smallest possible min_tot_mass from this chirp mass
# There are 4 possibilities for where the min_tot_mass is found on the
# line of min_chirp_mass that interacts with the component mass limits.
# Either it is found at max_m2, or at min_m1, or it starts on the equal
# mass line within the parameter space, or it doesn't intersect
# at all.
# First let's get the masses at both of these possible points
m1_at_max_m2 = pnutils.mchirp_mass1_to_mass2(opts.min_chirp_mass,
opts.max_mass2)
if m1_at_max_m2 < opts.max_mass2:
# Unphysical, remove
m1_at_max_m2 = -1
m2_at_min_m1 = pnutils.mchirp_mass1_to_mass2(opts.min_chirp_mass,
opts.min_mass1)
if m2_at_min_m1 > opts.min_mass1:
# Unphysical, remove
m2_at_min_m1 = -1
# Get the values on the equal mass line
m1_at_equal_mass, m2_at_equal_mass = pnutils.mchirp_eta_to_mass1_mass2(
opts.min_chirp_mass, 0.25)
# Are any of these possible?
if m1_at_max_m2 <= opts.max_mass1 and m1_at_max_m2 >= opts.min_mass1:
min_tot_mass = opts.max_mass2 + m1_at_max_m2
elif m2_at_min_m1 <= opts.max_mass2 and m2_at_min_m1 >= opts.min_mass2:
min_tot_mass = opts.min_mass1 + m2_at_min_m1
elif m1_at_equal_mass <= opts.max_mass1 and \
m1_at_equal_mass >= opts.min_mass1 and \
m2_at_equal_mass <= opts.max_mass2 and \
m2_at_equal_mass >= opts.min_mass2:
min_tot_mass = m1_at_equal_mass + m2_at_equal_mass
# So either the restriction is low enough to be redundant, or is
# removing all the parameter space
elif m2_at_min_m1 < opts.min_mass2:
# This is the redundant case, ignore
min_tot_mass = opts.min_total_mass
else:
# And this is the bad case
err_msg = "The minimum chirp mass provided is not possible given "
err_msg += "restrictions on component masses."
raise ValueError(err_msg)
# Is there also an eta restriction?
if opts.max_eta:
# Get the value of m1,m2 at max_eta, min_chirp_mass
max_eta_m1, max_eta_m2 = pnutils.mchirp_eta_to_mass1_mass2(
opts.min_chirp_mass, opts.max_eta)
max_eta_min_tot_mass = max_eta_m1 + max_eta_m2
if max_eta_min_tot_mass > min_tot_mass:
# Okay, eta does restrict this further. Still physical?
min_tot_mass = max_eta_min_tot_mass
if max_eta_m1 > opts.max_mass1:
err_msg = "The combination of component mass, chirp "
err_msg += "mass, eta and (possibly) total mass limits "
err_msg += "have precluded all systems."
raise ValueError(err_msg)
# Update min_tot_mass if needed
if min_tot_mass > opts.min_total_mass:
opts.min_total_mass = float(min_tot_mass)
# Then need to do max_chirp_mass and min_eta
if opts.max_chirp_mass is not None:
# Need to get the largest possible maxn_tot_mass from this chirp mass
# There are 3 possibilities for where the max_tot_mass is found on the
# line of max_chirp_mass that interacts with the component mass limits.
# Either it is found at min_m2, or at max_m1, or it doesn't intersect
# at all.
# First let's get the masses at both of these possible points
m1_at_min_m2 = pnutils.mchirp_mass1_to_mass2(opts.max_chirp_mass,
opts.min_mass2)
m2_at_max_m1 = pnutils.mchirp_mass1_to_mass2(opts.max_chirp_mass,
opts.max_mass1)
# Are either of these possible?
if m1_at_min_m2 <= opts.max_mass1 and m1_at_min_m2 >= opts.min_mass1:
max_tot_mass = opts.min_mass2 + m1_at_min_m2
elif m2_at_max_m1 <= opts.max_mass2 and m2_at_max_m1 >= opts.min_mass2:
max_tot_mass = opts.max_mass1 + m2_at_max_m1
# So either the restriction is low enough to be redundant, or is
# removing all the paramter space
elif m2_at_max_m1 > opts.max_mass2:
# This is the redundant case, ignore
max_tot_mass = opts.max_total_mass
else:
# And this is the bad case
err_msg = "The maximum chirp mass provided is not possible given "
err_msg += "restrictions on component masses."
raise ValueError(err_msg)
# Is there also an eta restriction?
if opts.min_eta:
# Get the value of m1,m2 at max_eta, min_chirp_mass
min_eta_m1, min_eta_m2 = pnutils.mchirp_eta_to_mass1_mass2(
opts.max_chirp_mass, opts.min_eta)
min_eta_max_tot_mass = min_eta_m1 + min_eta_m2
if min_eta_max_tot_mass < max_tot_mass:
# Okay, eta does restrict this further. Still physical?
max_tot_mass = min_eta_max_tot_mass
if min_eta_m1 < opts.min_mass1:
err_msg = "The combination of component mass, chirp "
err_msg += "mass, eta and (possibly) total mass limits "
err_msg += "have precluded all systems."
raise ValueError(err_msg)
# Update min_tot_mass if needed
if max_tot_mass < opts.max_total_mass:
opts.max_total_mass = float(max_tot_mass)
# Need to check max_eta alone for minimum and maximum mass
if opts.max_eta:
# Similar to above except this can affect both the minimum and maximum
# total mass. Need to identify where the line of max_eta intersects
# the parameter space, and if it affects mass restrictions.
m1_at_min_m2 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.min_mass2,
return_mass_heavier=True)
m2_at_min_m1 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.min_mass1,
return_mass_heavier=False)
m1_at_max_m2 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.max_mass2,
return_mass_heavier=True)
m2_at_max_m1 = pnutils.eta_mass1_to_mass2(opts.max_eta, opts.max_mass1,
return_mass_heavier=False)
# Check for restrictions on the minimum total mass
# Are either of these possible?
if m1_at_min_m2 <= opts.max_mass1 and m1_at_min_m2 >= opts.min_mass1:
min_tot_mass = opts.min_mass2 + m1_at_min_m2
elif m2_at_min_m1 <= opts.max_mass2 and m2_at_min_m1 >= opts.min_mass2:
# This case doesn't change the minimal total mass
min_tot_mass = opts.min_total_mass
# So either the restriction is low enough to be redundant, or is
# removing all the paramter space
elif m2_at_min_m1 > opts.max_mass2:
# This is the redundant case, ignore
min_tot_mass = opts.min_total_mass
elif opts.max_eta == 0.25 and (m1_at_min_m2 < opts.min_mass2 or \
m2_at_min_m1 > opts.min_mass1):
# This just catches potential roundoff issues in the case that
# max-eta is not used
min_tot_mass = opts.min_total_mass
else:
# And this is the bad case
err_msg = "The maximum eta provided is not possible given "
err_msg += "restrictions on component masses."
print(m1_at_min_m2, m2_at_min_m1, m1_at_max_m2, m2_at_max_m1)
print(opts.min_mass1, opts.max_mass1, opts.min_mass2, opts.max_mass2)
raise ValueError(err_msg)
# Update min_tot_mass if needed
if min_tot_mass > opts.min_total_mass:
opts.min_total_mass = float(min_tot_mass)
# Check for restrictions on the maximum total mass
# Are either of these possible?
if m2_at_max_m1 <= opts.max_mass2 and m2_at_max_m1 >= opts.min_mass2:
max_tot_mass = opts.max_mass1 + m2_at_max_m1
elif m1_at_max_m2 <= opts.max_mass1 and m1_at_max_m2 >= opts.min_mass1:
# This case doesn't change the maximal total mass
max_tot_mass = opts.max_total_mass
# So either the restriction is low enough to be redundant, or is
# removing all the paramter space, the latter case is already tested
else:
# This is the redundant case, ignore
max_tot_mass = opts.max_total_mass
if max_tot_mass < opts.max_total_mass:
opts.max_total_mass = float(max_tot_mass)
# Need to check min_eta alone for maximum and minimum total mass
if opts.min_eta:
# Same as max_eta.
# Need to identify where the line of max_eta intersects
# the parameter space, and if it affects mass restrictions.
m1_at_min_m2 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.min_mass2,
return_mass_heavier=True)
m2_at_min_m1 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.min_mass1,
return_mass_heavier=False)
m1_at_max_m2 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.max_mass2,
return_mass_heavier=True)
m2_at_max_m1 = pnutils.eta_mass1_to_mass2(opts.min_eta, opts.max_mass1,
return_mass_heavier=False)
# Check for restrictions on the maximum total mass
# Are either of these possible?
if m1_at_max_m2 <= opts.max_mass1 and m1_at_max_m2 >= opts.min_mass1:
max_tot_mass = opts.max_mass2 + m1_at_max_m2
elif m2_at_max_m1 <= opts.max_mass2 and m2_at_max_m1 >= opts.min_mass2:
# This case doesn't affect the maximum total mass
max_tot_mass = opts.max_total_mass
# So either the restriction is low enough to be redundant, or is
# removing all the paramter space
elif m2_at_max_m1 < opts.min_mass2:
# This is the redundant case, ignore
max_tot_mass = opts.max_total_mass
else:
# And this is the bad case
err_msg = "The minimum eta provided is not possible given "
err_msg += "restrictions on component masses."
raise ValueError(err_msg)
# Update min_tot_mass if needed
if max_tot_mass < opts.max_total_mass:
opts.max_total_mass = float(max_tot_mass)
# Check for restrictions on the minimum total mass
# Are either of these possible?
if m2_at_min_m1 <= opts.max_mass2 and m2_at_min_m1 >= opts.min_mass2:
min_tot_mass = opts.min_mass1 + m2_at_min_m1
elif m1_at_min_m2 <= opts.max_mass1 and m1_at_min_m2 >= opts.min_mass1:
# This case doesn't change the maximal total mass
min_tot_mass = opts.min_total_mass
# So either the restriction is low enough to be redundant, or is
# removing all the paramter space, which is tested above
else:
# This is the redundant case, ignore
min_tot_mass = opts.min_total_mass
if min_tot_mass > opts.min_total_mass:
opts.min_total_mass = float(min_tot_mass)
if opts.max_total_mass < opts.min_total_mass:
err_msg = "After including restrictions on chirp mass, component mass, "
err_msg += "eta and total mass, no physical systems are possible."
raise ValueError(err_msg)
if opts.max_eta and opts.min_eta and (opts.max_eta < opts.min_eta):
parser.error("--max-eta must be larger than --min-eta.")
if nonSpin:
return
if opts.max_ns_spin_mag is None:
if opts.nsbh_flag:
parser.error("Must supply --max_ns_spin_mag with --nsbh-flag")
# Can ignore this if no NSs will be generated
elif opts.min_mass2 < (opts.ns_bh_boundary_mass or
massRangeParameters.default_nsbh_boundary_mass):
parser.error("Must supply --max-ns-spin-mag for the chosen"
" value of --min_mass2")
else:
opts.max_ns_spin_mag = opts.max_bh_spin_mag
if opts.max_bh_spin_mag is None:
if opts.nsbh_flag:
parser.error("Must supply --max_bh_spin_mag with --nsbh-flag")
# Can ignore this if no BHs will be generated
if opts.max_mass1 >= (opts.ns_bh_boundary_mass or
massRangeParameters.default_nsbh_boundary_mass):
parser.error("Must supply --max-bh-spin-mag for the chosen"
" value of --max_mass1")
else:
opts.max_bh_spin_mag = opts.max_ns_spin_mag
class massRangeParameters(object):
"""
This class holds all of the options that are parsed in the function
insert_mass_range_option_group
and all products produced using these options. It can also be initialized
from the __init__ function providing directly the options normally
provided on the command line
"""
default_nsbh_boundary_mass = 3.
default_ns_eos = '2H'
default_delta_bh_spin = 0.1
default_delta_ns_mass = 0.1
def __init__(self, minMass1, maxMass1, minMass2, maxMass2,
maxNSSpinMag=0, maxBHSpinMag=0, maxTotMass=None,
minTotMass=None, maxEta=None, minEta=0,
max_chirp_mass=None, min_chirp_mass=None,
ns_bh_boundary_mass=None, nsbhFlag=False,
remnant_mass_threshold=None, ns_eos=None, use_eos_max_ns_mass=False,
delta_bh_spin=None, delta_ns_mass=None):
"""
Initialize an instance of the massRangeParameters by providing all
options directly. See the help message associated with any code
that uses the metric options for more details of how to set each of
these. For e.g. pycbc_aligned_stoch_bank --help
"""
self.minMass1=minMass1
self.maxMass1=maxMass1
self.minMass2=minMass2
self.maxMass2=maxMass2
self.maxNSSpinMag=maxNSSpinMag
self.maxBHSpinMag=maxBHSpinMag
self.minTotMass = minMass1 + minMass2
if minTotMass and (minTotMass > self.minTotMass):
self.minTotMass = minTotMass
self.maxTotMass = maxMass1 + maxMass2
if maxTotMass and (maxTotMass < self.maxTotMass):
self.maxTotMass = maxTotMass
self.maxTotMass=maxTotMass
self.minTotMass=minTotMass
if maxEta:
self.maxEta=maxEta
else:
self.maxEta=0.25
self.max_chirp_mass = max_chirp_mass
self.min_chirp_mass = min_chirp_mass
self.minEta=minEta
self.ns_bh_boundary_mass = (
ns_bh_boundary_mass or self.default_nsbh_boundary_mass)
self.nsbhFlag=nsbhFlag
self.remnant_mass_threshold = remnant_mass_threshold
self.ns_eos = (
ns_eos or self.default_ns_eos)
self.delta_bh_spin = (
delta_bh_spin or self.default_delta_bh_spin)
self.delta_ns_mass = (
delta_ns_mass or self.default_delta_ns_mass)
self.use_eos_max_ns_mass = use_eos_max_ns_mass
if self.remnant_mass_threshold is not None:
if self.ns_eos is not '2H':
errMsg = """
By setting a value for --remnant-mass-threshold
you have asked to filter out EM dim NS-BH templates.
The EOS you chose is not supported currently: please
remove the --ns-eos option from your command line or
set it to '2H'.
"""
raise ValueError(errMsg)
if use_eos_max_ns_mass:
_, max_ns_g_mass = load_ns_sequence(self.ns_eos)
if(self.maxMass2 > max_ns_g_mass):
errMsg = """
The maximum NS mass supported by this EOS is
{0}. Please set --max-mass2 to this value or run
without the --use-eos-max-ns-mass flag.
""".format(max_ns_g_mass-0.0000000001)
raise ValueError(errMsg)
self.delta_bh_spin = (
delta_bh_spin or self.default_delta_bh_spin)
self.delta_ns_mass = (
delta_ns_mass or self.default_delta_ns_mass)
# FIXME: This may be inaccurate if Eta limits are given
# This will not cause any problems, but maybe could be fixed.
self.minCompMass = self.minMass2
self.maxCompMass = self.maxMass1
# WARNING: We expect mass1 > mass2 ALWAYS
# Check input:
if (minMass2 > minMass1) or (maxMass2 > maxMass1):
errMsg = "Mass1 must be larger than mass2. Check input options."
raise ValueError(errMsg)
if (minMass2 > maxMass2) or (minMass1 > maxMass1):
errMsg = "Minimum masses cannot be larger than maximum masses."
errMsg += "Check input options."
raise ValueError(errMsg)
@classmethod
def from_argparse(cls, opts, nonSpin=False):
"""
Initialize an instance of the massRangeParameters class from an
argparse.OptionParser instance. This assumes that
insert_mass_range_option_group
and
verify_mass_range_options
have already been called before initializing the class.
"""
if nonSpin:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass)
else:
return cls(opts.min_mass1, opts.max_mass1, opts.min_mass2,
opts.max_mass2, maxTotMass=opts.max_total_mass,
minTotMass=opts.min_total_mass, maxEta=opts.max_eta,
minEta=opts.min_eta, maxNSSpinMag=opts.max_ns_spin_mag,
maxBHSpinMag=opts.max_bh_spin_mag,
nsbhFlag=opts.nsbh_flag,
max_chirp_mass=opts.max_chirp_mass,
min_chirp_mass=opts.min_chirp_mass,
ns_bh_boundary_mass=opts.ns_bh_boundary_mass,
remnant_mass_threshold=opts.remnant_mass_threshold,
ns_eos=opts.ns_eos, use_eos_max_ns_mass=opts.use_eos_max_ns_mass,
delta_bh_spin=opts.delta_bh_spin, delta_ns_mass=opts.delta_ns_mass)
def is_outside_range(self, mass1, mass2, spin1z, spin2z):
"""
Test if a given location in mass1, mass2, spin1z, spin2z is within the
range of parameters allowed by the massParams object.
"""
# Mass1 test
if mass1 * 1.001 < self.minMass1:
return 1
if mass1 > self.maxMass1 * 1.001:
return 1
# Mass2 test
if mass2 * 1.001 < self.minMass2:
return 1
if mass2 > self.maxMass2 * 1.001:
return 1
# Spin1 test
if self.nsbhFlag:
if (abs(spin1z) > self.maxBHSpinMag * 1.001):
return 1
else:
spin1zM = abs(spin1z)
if not( (mass1 * 1.001 > self.ns_bh_boundary_mass \
and spin1zM <= self.maxBHSpinMag * 1.001) \
or (mass1 < self.ns_bh_boundary_mass * 1.001 \
and spin1zM <= self.maxNSSpinMag * 1.001)):
return 1
# Spin2 test
if self.nsbhFlag:
if (abs(spin2z) > self.maxNSSpinMag * 1.001):
return 1
else:
spin2zM = abs(spin2z)
if not( (mass2 * 1.001 > self.ns_bh_boundary_mass \
and spin2zM <= self.maxBHSpinMag * 1.001) \
or (mass2 < self.ns_bh_boundary_mass * 1.001 and \
spin2zM <= self.maxNSSpinMag * 1.001)):
return 1
# Total mass test
mTot = mass1 + mass2
if mTot > self.maxTotMass * 1.001:
return 1
if mTot * 1.001 < self.minTotMass:
return 1
# Eta test
eta = mass1 * mass2 / (mTot * mTot)
if eta > self.maxEta * 1.001:
return 1
if eta * 1.001 < self.minEta:
return 1
# Chirp mass test
chirp_mass = mTot * eta**(3./5.)
if self.min_chirp_mass is not None \
and chirp_mass * 1.001 < self.min_chirp_mass:
return 1
if self.max_chirp_mass is not None \
and chirp_mass > self.max_chirp_mass * 1.001:
return 1
return 0
class ethincaParameters(object):
"""
This class holds all of the options that are parsed in the function
insert_ethinca_metric_options
and all products produced using these options. It can also be initialized
from the __init__ function, providing directly the options normally
provided on the command line
"""
def __init__(self, pnOrder, cutoff, freqStep, fLow=None, full_ethinca=False,
time_ethinca=False):
"""
Initialize an instance of ethincaParameters by providing all
options directly. See the insert_ethinca_metric_options() function
for explanation or e.g. run pycbc_geom_nonspinbank --help
"""
self.full_ethinca=full_ethinca
self.time_ethinca=time_ethinca
self.doEthinca= self.full_ethinca or self.time_ethinca
self.pnOrder=pnOrder
self.cutoff=cutoff
self.freqStep=freqStep
# independent fLow for ethinca metric is currently not used
self.fLow=fLow
# check that ethinca options make sense
if self.full_ethinca and self.time_ethinca:
err_msg = "It does not make sense to ask me to do the time "
err_msg += "restricted ethinca and also the full ethinca."
raise ValueError(err_msg)
if self.doEthinca and not (
cutoff in pnutils.named_frequency_cutoffs.keys()):
raise ValueError("Need a valid cutoff formula to calculate "
"ethinca! Possible values are "+
str(tuple(pnutils.named_frequency_cutoffs.keys())))
if self.doEthinca and not freqStep:
raise ValueError("Need to specify a cutoff frequency step to "
"calculate ethinca! (ethincaFreqStep)")
@classmethod
def from_argparse(cls, opts):
"""
Initialize an instance of the ethincaParameters class from an
argparse.OptionParser instance. This assumes that
insert_ethinca_metric_options
and
verify_ethinca_metric_options
have already been called before initializing the class.
"""
return cls(opts.ethinca_pn_order, opts.filter_cutoff,
opts.ethinca_frequency_step, fLow=None,
full_ethinca=opts.calculate_ethinca_metric,
time_ethinca=opts.calculate_time_metric_components)
def insert_ethinca_metric_options(parser):
"""
Adds the options used to calculate the ethinca metric, if required.
Parameters
-----------
parser : object
OptionParser instance.
"""
ethincaGroup = parser.add_argument_group("Ethinca metric options",
"Options used in the calculation of Gamma metric "
"components for the ethinca coincidence test and for "
"assigning high-frequency cutoffs to templates.")
ethinca_methods = ethincaGroup.add_mutually_exclusive_group()
ethinca_methods.add_argument("--calculate-time-metric-components",
action="store_true", default=False,
help="If given, the ethinca metric will be calculated "
"for only the time component, and stored in the Gamma0 "
"entry of the sngl_inspiral table. OPTIONAL, default=False")
ethinca_methods.add_argument("--calculate-ethinca-metric",
action="store_true", default=False,
help="If given, the ethinca metric will be calculated "
"and stored in the Gamma entries of the sngl_inspiral "
"table. OPTIONAL, default=False")
ethincaGroup.add_argument("--ethinca-pn-order",
default=None, choices=get_ethinca_orders(),
help="Specify a PN order to be used in calculating the "
"ethinca metric. OPTIONAL: if not specified, the same "
"order will be used as for the bank metric.")
ethincaGroup.add_argument("--filter-cutoff",
default=None,
choices=tuple(pnutils.named_frequency_cutoffs.keys()),
help="Specify an upper frequency cutoff formula for the "
"ethinca metric calculation, and for the values of f_final"
" assigned to the templates. REQUIRED if the "
"calculate-ethinca-metric option is given.")
ethincaGroup.add_argument("--ethinca-frequency-step", action="store",
type=float, default=10.,
help="Control the precision of the upper frequency cutoff."
" For speed, the metric is calculated only for discrete "
"f_max values with a spacing given by this option. Each "
"template is assigned the metric for the f_max closest to "
"its analytical cutoff formula. OPTIONAL, default=10. "
"UNITS=Hz")
return ethincaGroup
def verify_ethinca_metric_options(opts, parser):
"""
Checks that the necessary options are given for the ethinca metric
calculation.
Parameters
----------
opts : argparse.Values instance
Result of parsing the input options with OptionParser
parser : object
The OptionParser instance.
"""
if opts.filter_cutoff is not None and not (opts.filter_cutoff in
pnutils.named_frequency_cutoffs.keys()):
parser.error("Need a valid cutoff formula to calculate ethinca or "
"assign filter f_final values! Possible values are "
+str(tuple(pnutils.named_frequency_cutoffs.keys())))
if (opts.calculate_ethinca_metric or opts.calculate_time_metric_components)\
and not opts.ethinca_frequency_step:
parser.error("Need to specify a cutoff frequency step to calculate "
"ethinca!")
if not (opts.calculate_ethinca_metric or\
opts.calculate_time_metric_components) and opts.ethinca_pn_order:
parser.error("Can't specify an ethinca PN order if not "
"calculating ethinca metric!")
def check_ethinca_against_bank_params(ethincaParams, metricParams):
"""
Cross-check the ethinca and bank layout metric calculation parameters
and set the ethinca metric PN order equal to the bank PN order if not
previously set.
Parameters
----------
ethincaParams: instance of ethincaParameters
metricParams: instance of metricParameters
"""
if ethincaParams.doEthinca:
if metricParams.f0 != metricParams.fLow:
raise ValueError("If calculating ethinca metric, f0 and f-low "
"must be equal!")
if ethincaParams.fLow is not None and (
ethincaParams.fLow != metricParams.fLow):
raise ValueError("Ethinca metric calculation does not currently "
"support a f-low value different from the bank "
"metric!")
if ethincaParams.pnOrder is None:
ethincaParams.pnOrder = metricParams.pnOrder
else: pass
| 57,353
| 46.4
| 95
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/partitioned_bank.py
|
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import copy
import numpy
import logging
from pycbc.tmpltbank import coord_utils
class PartitionedTmpltbank(object):
"""
This class is used to hold a template bank partitioned into numerous bins
based on position in the Cartesian parameter space where the axes are the
principal components. It can also be used to hold intermediary
products used while constructing (e.g.) a stochastic template bank.
"""
def __init__(self, mass_range_params, metric_params, ref_freq,
bin_spacing, bin_range_check=1):
"""
Set up the partitioned template bank class. The combination of the
reference frequency, the bin spacing and the metric dictates how the
parameter space will be partitioned.
Parameters
-----------
mass_range_params : massRangeParameters object
An initialized massRangeParameters object holding the details of
the mass and spin ranges being considered.
metric_params : metricParameters object
An initialized metricParameters object holding the details of the
parameter space metric that is being used.
ref_freq : float
The reference frequency to use as the upper frequency cutoff of
the metric when partitioning the bank. In general this would be
set to the *smallest* upper frequency cutoff that is possible in
the given parameter space. However, in some cases this can lead
to only a small number of partitions and the computational cost
will increase dramatically. NOTE: when using the vary-fupper
option this upper frequency cutoff is only used to determine which
points should be matched against each other, it is *not* used in
the actual metric-based calculation of the distance (which uses the
frequency cutoffs of the points being considered).
bin_spacing : float
The metric distance to space the bins by. NOTE: If you want to
place the bins to have a width corresponding to a minimal match of
0.97 you would set this to :math:`(1 - 0.97)^{0.5}`.
Note the square root,
matches correspond to the square of parameter space distance.
bin_range_check : int
When computing matches consider points in the corresponding bin and
all bins +/- this value in both chi_1 and chi_2 directions.
DEFAULT = 1.
"""
# Flags to be used in other methods of this class. Initialized here for
# simplicity
self.spin_warning_given = False
# These will probably be used a lot, so add to object
self.mass_range_params = mass_range_params
self.metric_params = metric_params
self.ref_freq = ref_freq
self.bin_spacing = bin_spacing
# Get parameter space extent
vals = coord_utils.estimate_mass_range(1000000, mass_range_params,
metric_params, ref_freq, covary=True)
chi1_max = vals[0].max()
chi1_min = vals[0].min()
chi1_diff = chi1_max - chi1_min
chi2_max = vals[1].max()
chi2_min = vals[1].min()
chi2_diff = chi2_max - chi2_min
# Add a little bit extra as we may not have reached the edges.
# FIXME: Maybe better to use the numerical code to find maxima here?
chi1_min = chi1_min - 0.1*chi1_diff
chi1_max = chi1_max + 0.1*chi1_diff
chi2_min = chi2_min - 0.1*chi2_diff
chi2_max = chi2_max + 0.1*chi2_diff
massbank = {}
bank = {}
# Also add a little bit here
for i in range(-2, int((chi1_max - chi1_min) // bin_spacing + 2)):
bank[i] = {}
massbank[i] = {}
for j in range(-2, int((chi2_max - chi2_min) // bin_spacing + 2)):
bank[i][j] = []
massbank[i][j] = {}
massbank[i][j]['mass1s'] = numpy.array([])
self.massbank = massbank
self.bank = bank
# Record minimum and maximum bins
self.min_chi1_bin = -2
self.min_chi2_bin = -2
self.max_chi1_bin = int((chi1_max - chi1_min) // bin_spacing + 1)
self.max_chi2_bin = int((chi2_max - chi2_min) // bin_spacing + 1)
self.chi1_min = chi1_min
self.chi1_max = chi1_max
self.chi2_min = chi2_min
self.chi2_max = chi2_max
# How many adjacent bins should we check?
self.bin_range_check = 1
self.bin_loop_order = coord_utils.outspiral_loop(self.bin_range_check)
def get_point_from_bins_and_idx(self, chi1_bin, chi2_bin, idx):
"""Find masses and spins given bin numbers and index.
Given the chi1 bin, chi2 bin and an index, return the masses and spins
of the point at that index. Will fail if no point exists there.
Parameters
-----------
chi1_bin : int
The bin number for chi1.
chi2_bin : int
The bin number for chi2.
idx : int
The index within the chi1, chi2 bin.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of heavier body.
spin2z : float
Spin of lighter body.
"""
mass1 = self.massbank[chi1_bin][chi2_bin]['mass1s'][idx]
mass2 = self.massbank[chi1_bin][chi2_bin]['mass2s'][idx]
spin1z = self.massbank[chi1_bin][chi2_bin]['spin1s'][idx]
spin2z = self.massbank[chi1_bin][chi2_bin]['spin2s'][idx]
return mass1, mass2, spin1z, spin2z
def get_freq_map_and_normalizations(self, frequency_list,
upper_freq_formula):
"""
If using the --vary-fupper capability we need to store the mapping
between index and frequencies in the list. We also precalculate the
normalization factor at every frequency, which is used when estimating
overlaps to account for abrupt changes in termination frequency.
Parameters
-----------
frequency_list : array of floats
The frequencies for which the metric has been computed and lie
within the parameter space being considered.
upper_freq_formula : string
"""
self.frequency_map = {}
self.normalization_map = {}
self.upper_freq_formula = upper_freq_formula
# FIXME: Must this be sorted on input
frequency_list.sort()
for idx, frequency in enumerate(frequency_list):
self.frequency_map[frequency] = idx
self.normalization_map[frequency] = \
(self.metric_params.moments['I7'][frequency])**0.5
def find_point_bin(self, chi_coords):
"""
Given a set of coordinates in the chi parameter space, identify the
indices of the chi1 and chi2 bins that the point occurs in. Returns
these indices.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
Returns
--------
chi1_bin : int
Index of the chi_1 bin.
chi2_bin : int
Index of the chi_2 bin.
"""
# Identify bin
chi1_bin = int((chi_coords[0] - self.chi1_min) // self.bin_spacing)
chi2_bin = int((chi_coords[1] - self.chi2_min) // self.bin_spacing)
self.check_bin_existence(chi1_bin, chi2_bin)
return chi1_bin, chi2_bin
def check_bin_existence(self, chi1_bin, chi2_bin):
"""
Given indices for bins in chi1 and chi2 space check that the bin
exists in the object. If not add it. Also check for the existence of
all bins within +/- self.bin_range_check and add if not present.
Parameters
-----------
chi1_bin : int
The index of the chi1_bin to check
chi2_bin : int
The index of the chi2_bin to check
"""
bin_range_check = self.bin_range_check
# Check if this bin actually exists. If not add it
if ( (chi1_bin < self.min_chi1_bin+bin_range_check) or
(chi1_bin > self.max_chi1_bin-bin_range_check) or
(chi2_bin < self.min_chi2_bin+bin_range_check) or
(chi2_bin > self.max_chi2_bin-bin_range_check) ):
for temp_chi1 in range(chi1_bin-bin_range_check,
chi1_bin+bin_range_check+1):
if temp_chi1 not in self.massbank:
self.massbank[temp_chi1] = {}
self.bank[temp_chi1] = {}
for temp_chi2 in range(chi2_bin-bin_range_check,
chi2_bin+bin_range_check+1):
if temp_chi2 not in self.massbank[temp_chi1]:
self.massbank[temp_chi1][temp_chi2] = {}
self.massbank[temp_chi1][temp_chi2]['mass1s'] =\
numpy.array([])
self.bank[temp_chi1][temp_chi2] = []
def calc_point_distance(self, chi_coords):
"""
Calculate distance between point and the bank. Return the closest
distance.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
for idx, bank_chis in \
enumerate(self.bank[curr_chi1_bin][curr_chi2_bin]):
dist = coord_utils.calc_point_dist(chi_coords, bank_chis)
if dist < min_dist:
min_dist = dist
indexes = (curr_chi1_bin, curr_chi2_bin, idx)
return min_dist, indexes
def test_point_distance(self, chi_coords, distance_threshold):
"""
Test if the distance between the supplied point and the bank is less
than the supplied distance theshold.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
distance_threshold : float
The **SQUARE ROOT** of the metric distance to test as threshold.
E.g. if you want to test to a minimal match of 0.97 you would
use 1 - 0.97 = 0.03 for this value.
Returns
--------
Boolean
True if point is within the distance threshold. False if not.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
for bank_chis in self.bank[curr_chi1_bin][curr_chi2_bin]:
dist = coord_utils.calc_point_dist(chi_coords, bank_chis)
if dist < distance_threshold:
return True
else:
return False
def calc_point_distance_vary(self, chi_coords, point_fupper, mus):
"""
Calculate distance between point and the bank allowing the metric to
vary based on varying upper frequency cutoff. Slower than
calc_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
Returns
--------
min_dist : float
The smallest **SQUARED** metric distance between the test point and
the bank.
indexes : The chi1_bin, chi2_bin and position within that bin at which
the closest matching point lies.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
min_dist = 1000000000
indexes = None
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
# No points = Next iteration
curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin]
if not curr_bank['mass1s'].size:
continue
# *NOT* the same of .min and .max
f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts'])
f_other = numpy.maximum(point_fupper, curr_bank['freqcuts'])
# NOTE: freq_idxes is a vector!
freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper])
# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
vecs1 = mus[freq_idxes, :]
# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
range_idxes = numpy.arange(len(freq_idxes))
vecs2 = curr_bank['mus'][range_idxes, freq_idxes, :]
# Now do the sums
dists = (vecs1 - vecs2)*(vecs1 - vecs2)
# This reduces to 1D: idx = stored index
dists = numpy.sum(dists, axis=1)
norm_upper = numpy.array([self.normalization_map[f] \
for f in f_upper])
norm_other = numpy.array([self.normalization_map[f] \
for f in f_other])
norm_fac = norm_upper / norm_other
renormed_dists = 1 - (1 - dists)*norm_fac
curr_min_dist = renormed_dists.min()
if curr_min_dist < min_dist:
min_dist = curr_min_dist
indexes = curr_chi1_bin, curr_chi2_bin, renormed_dists.argmin()
return min_dist, indexes
def test_point_distance_vary(self, chi_coords, point_fupper, mus,
distance_threshold):
"""
Test if distance between point and the bank is greater than distance
threshold while allowing the metric to
vary based on varying upper frequency cutoff. Slower than
test_point_distance, but more reliable when upper frequency cutoff can
change a lot.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
point_fupper : float
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
distance_threshold : float
The **SQUARE ROOT** of the metric distance to test as threshold.
E.g. if you want to test to a minimal match of 0.97 you would
use 1 - 0.97 = 0.03 for this value.
Returns
--------
Boolean
True if point is within the distance threshold. False if not.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
for chi1_bin_offset, chi2_bin_offset in self.bin_loop_order:
curr_chi1_bin = chi1_bin + chi1_bin_offset
curr_chi2_bin = chi2_bin + chi2_bin_offset
# No points = Next iteration
curr_bank = self.massbank[curr_chi1_bin][curr_chi2_bin]
if not curr_bank['mass1s'].size:
continue
# *NOT* the same of .min and .max
f_upper = numpy.minimum(point_fupper, curr_bank['freqcuts'])
f_other = numpy.maximum(point_fupper, curr_bank['freqcuts'])
# NOTE: freq_idxes is a vector!
freq_idxes = numpy.array([self.frequency_map[f] for f in f_upper])
# vecs1 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
vecs1 = mus[freq_idxes, :]
# vecs2 gives a 2x2 vector: idx0 = stored index, idx1 = mu index
range_idxes = numpy.arange(len(freq_idxes))
vecs2 = curr_bank['mus'][range_idxes,freq_idxes,:]
# Now do the sums
dists = (vecs1 - vecs2)*(vecs1 - vecs2)
# This reduces to 1D: idx = stored index
dists = numpy.sum(dists, axis=1)
# I wonder if this line actually speeds things up?
if (dists > distance_threshold).all():
continue
# This is only needed for close templates, should we prune?
norm_upper = numpy.array([self.normalization_map[f] \
for f in f_upper])
norm_other = numpy.array([self.normalization_map[f] \
for f in f_other])
norm_fac = norm_upper / norm_other
renormed_dists = 1 - (1 - dists)*norm_fac
if (renormed_dists < distance_threshold).any():
return True
else:
return False
def add_point_by_chi_coords(self, chi_coords, mass1, mass2, spin1z, spin2z,
point_fupper=None, mus=None):
"""
Add a point to the partitioned template bank. The point_fupper and mus
kwargs must be provided for all templates if the vary fupper capability
is desired. This requires that the chi_coords, as well as mus and
point_fupper if needed, to be precalculated. If you just have the
masses and don't want to worry about translations see
add_point_by_masses, which will do translations and then call this.
Parameters
-----------
chi_coords : numpy.array
The position of the point in the chi coordinates.
mass1 : float
The heavier mass of the point to add.
mass2 : float
The lighter mass of the point to add.
spin1z: float
The [aligned] spin on the heavier body.
spin2z: float
The [aligned] spin on the lighter body.
The upper frequency cutoff to use for this point. This value must
be one of the ones already calculated in the metric.
mus : numpy.array
A 2D array where idx 0 holds the upper frequency cutoff and idx 1
holds the coordinates in the [not covaried] mu parameter space for
each value of the upper frequency cutoff.
"""
chi1_bin, chi2_bin = self.find_point_bin(chi_coords)
self.bank[chi1_bin][chi2_bin].append(copy.deepcopy(chi_coords))
curr_bank = self.massbank[chi1_bin][chi2_bin]
if curr_bank['mass1s'].size:
curr_bank['mass1s'] = numpy.append(curr_bank['mass1s'],
numpy.array([mass1]))
curr_bank['mass2s'] = numpy.append(curr_bank['mass2s'],
numpy.array([mass2]))
curr_bank['spin1s'] = numpy.append(curr_bank['spin1s'],
numpy.array([spin1z]))
curr_bank['spin2s'] = numpy.append(curr_bank['spin2s'],
numpy.array([spin2z]))
if point_fupper is not None:
curr_bank['freqcuts'] = numpy.append(curr_bank['freqcuts'],
numpy.array([point_fupper]))
# Mus needs to append onto axis 0. See below for contents of
# the mus variable
if mus is not None:
curr_bank['mus'] = numpy.append(curr_bank['mus'],
numpy.array([mus[:,:]]), axis=0)
else:
curr_bank['mass1s'] = numpy.array([mass1])
curr_bank['mass2s'] = numpy.array([mass2])
curr_bank['spin1s'] = numpy.array([spin1z])
curr_bank['spin2s'] = numpy.array([spin2z])
if point_fupper is not None:
curr_bank['freqcuts'] = numpy.array([point_fupper])
# curr_bank['mus'] is a 3D array
# NOTE: mu relates to the non-covaried Cartesian coordinate system
# Axis 0: Template index
# Axis 1: Frequency cutoff index
# Axis 2: Mu coordinate index
if mus is not None:
curr_bank['mus'] = numpy.array([mus[:,:]])
def add_point_by_masses(self, mass1, mass2, spin1z, spin2z,
vary_fupper=False):
"""
Add a point to the template bank. This differs from add point to bank
as it assumes that the chi coordinates and the products needed to use
vary_fupper have not already been calculated. This function calculates
these products and then calls add_point_by_chi_coords.
This function also
carries out a number of sanity checks (eg. is the point within the
ranges given by mass_range_params) that add_point_by_chi_coords does
not do for speed concerns.
Parameters
-----------
mass1 : float
Mass of the heavier body
mass2 : float
Mass of the lighter body
spin1z : float
Spin of the heavier body
spin2z : float
Spin of the lighter body
"""
# Test that masses are the expected way around (ie. mass1 > mass2)
if mass2 > mass1:
if not self.spin_warning_given:
warn_msg = "Am adding a template where mass2 > mass1. The "
warn_msg += "convention is that mass1 > mass2. Swapping mass1 "
warn_msg += "and mass2 and adding point to bank. This message "
warn_msg += "will not be repeated."
logging.warn(warn_msg)
self.spin_warning_given = True
# These that masses obey the restrictions of mass_range_params
if self.mass_range_params.is_outside_range(mass1, mass2, spin1z,
spin2z):
err_msg = "Point with masses given by "
err_msg += "%f %f %f %f " %(mass1, mass2, spin1z, spin2z)
err_msg += "(mass1, mass2, spin1z, spin2z) is not consistent "
err_msg += "with the provided command-line restrictions on masses "
err_msg += "and spins."
raise ValueError(err_msg)
# Get chi coordinates
chi_coords = coord_utils.get_cov_params(mass1, mass2, spin1z, spin2z,
self.metric_params,
self.ref_freq)
# Get mus and best fupper for this point, if needed
if vary_fupper:
mass_dict = {}
mass_dict['m1'] = numpy.array([mass1])
mass_dict['m2'] = numpy.array([mass2])
mass_dict['s1z'] = numpy.array([spin1z])
mass_dict['s2z'] = numpy.array([spin2z])
freqs = numpy.array(list(self.frequency_map.keys()), dtype=float)
freq_cutoff = coord_utils.return_nearest_cutoff(\
self.upper_freq_formula, mass_dict, freqs)
freq_cutoff = freq_cutoff[0]
lambdas = coord_utils.get_chirp_params\
(mass1, mass2, spin1z, spin2z, self.metric_params.f0,
self.metric_params.pnOrder)
mus = []
for freq in self.frequency_map:
mus.append(coord_utils.get_mu_params(lambdas,
self.metric_params, freq) )
mus = numpy.array(mus)
else:
freq_cutoff=None
mus=None
self.add_point_by_chi_coords(chi_coords, mass1, mass2, spin1z, spin2z,
point_fupper=freq_cutoff, mus=mus)
def add_tmpltbank_from_xml_table(self, sngl_table, vary_fupper=False):
"""
This function will take a sngl_inspiral_table of templates and add them
into the partitioned template bank object.
Parameters
-----------
sngl_table : sngl_inspiral_table
List of sngl_inspiral templates.
vary_fupper : False
If given also include the additional information needed to compute
distances with a varying upper frequency cutoff.
"""
for sngl in sngl_table:
self.add_point_by_masses(sngl.mass1, sngl.mass2, sngl.spin1z,
sngl.spin2z, vary_fupper=vary_fupper)
def add_tmpltbank_from_hdf_file(self, hdf_fp, vary_fupper=False):
"""
This function will take a pointer to an open HDF File object containing
a list of templates and add them into the partitioned template bank
object.
Parameters
-----------
hdf_fp : h5py.File object
The template bank in HDF5 format.
vary_fupper : False
If given also include the additional information needed to compute
distances with a varying upper frequency cutoff.
"""
mass1s = hdf_fp['mass1'][:]
mass2s = hdf_fp['mass2'][:]
spin1zs = hdf_fp['spin1z'][:]
spin2zs = hdf_fp['spin2z'][:]
for idx in range(len(mass1s)):
self.add_point_by_masses(mass1s[idx], mass2s[idx], spin1zs[idx],
spin2zs[idx], vary_fupper=vary_fupper)
def output_all_points(self):
"""Return all points in the bank.
Return all points in the bank as lists of m1, m2, spin1z, spin2z.
Returns
-------
mass1 : list
List of mass1 values.
mass2 : list
List of mass2 values.
spin1z : list
List of spin1z values.
spin2z : list
List of spin2z values.
"""
mass1 = []
mass2 = []
spin1z = []
spin2z = []
for i in self.massbank.keys():
for j in self.massbank[i].keys():
for k in range(len(self.massbank[i][j]['mass1s'])):
curr_bank = self.massbank[i][j]
mass1.append(curr_bank['mass1s'][k])
mass2.append(curr_bank['mass2s'][k])
spin1z.append(curr_bank['spin1s'][k])
spin2z.append(curr_bank['spin2s'][k])
return mass1, mass2, spin1z, spin2z
| 28,141
| 42.903276
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/tmpltbank/coord_utils.py
|
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import numpy
from pycbc.tmpltbank.lambda_mapping import get_chirp_params
from pycbc import conversions
from pycbc import pnutils
from pycbc.neutron_stars import load_ns_sequence
def estimate_mass_range(numPoints, massRangeParams, metricParams, fUpper,\
covary=True):
"""
This function will generate a large set of points with random masses and
spins (using pycbc.tmpltbank.get_random_mass) and translate these points
into the xi_i coordinate system for the given upper frequency cutoff.
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper). It also must be a key in
metricParams.evecsCV if covary=True.
covary : boolean, optional (default = True)
If this is given then evecsCV will be used to rotate from the Cartesian
coordinate system into the principal coordinate direction (xi_i). If
not given then points in the original Cartesian coordinates are
returned.
Returns
-------
xis : numpy.array
A list of the positions of each point in the xi_i coordinate system.
"""
vals_set = get_random_mass(numPoints, massRangeParams)
mass1 = vals_set[0]
mass2 = vals_set[1]
spin1z = vals_set[2]
spin2z = vals_set[3]
if covary:
lambdas = get_cov_params(mass1, mass2, spin1z, spin2z, metricParams,
fUpper)
else:
lambdas = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams,
fUpper)
return numpy.array(lambdas)
def get_random_mass_point_particles(numPoints, massRangeParams):
"""
This function will generate a large set of points within the chosen mass
and spin space. It will also return the corresponding PN spin coefficients
for ease of use later (though these may be removed at some future point).
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
"""
# WARNING: We expect mass1 > mass2 ALWAYS
# First we choose the total masses from a unifrom distribution in mass
# to the -5/3. power.
mass = numpy.random.random(numPoints) * \
(massRangeParams.minTotMass**(-5./3.) \
- massRangeParams.maxTotMass**(-5./3.)) \
+ massRangeParams.maxTotMass**(-5./3.)
mass = mass**(-3./5.)
# Next we choose the mass ratios, this will take different limits based on
# the value of total mass
maxmass2 = numpy.minimum(mass/2., massRangeParams.maxMass2)
minmass1 = numpy.maximum(massRangeParams.minMass1, mass/2.)
mineta = numpy.maximum(massRangeParams.minCompMass \
* (mass-massRangeParams.minCompMass)/(mass*mass), \
massRangeParams.maxCompMass \
* (mass-massRangeParams.maxCompMass)/(mass*mass))
# Note that mineta is a numpy.array because mineta depends on the total
# mass. Therefore this is not precomputed in the massRangeParams instance
if massRangeParams.minEta:
mineta = numpy.maximum(massRangeParams.minEta, mineta)
# Eta also restricted by chirp mass restrictions
if massRangeParams.min_chirp_mass:
eta_val_at_min_chirp = massRangeParams.min_chirp_mass / mass
eta_val_at_min_chirp = eta_val_at_min_chirp**(5./3.)
mineta = numpy.maximum(mineta, eta_val_at_min_chirp)
maxeta = numpy.minimum(massRangeParams.maxEta, maxmass2 \
* (mass - maxmass2) / (mass*mass))
maxeta = numpy.minimum(maxeta, minmass1 \
* (mass - minmass1) / (mass*mass))
# max eta also affected by chirp mass restrictions
if massRangeParams.max_chirp_mass:
eta_val_at_max_chirp = massRangeParams.max_chirp_mass / mass
eta_val_at_max_chirp = eta_val_at_max_chirp**(5./3.)
maxeta = numpy.minimum(maxeta, eta_val_at_max_chirp)
if (maxeta < mineta).any():
errMsg = "ERROR: Maximum eta is smaller than minimum eta!!"
raise ValueError(errMsg)
eta = numpy.random.random(numPoints) * (maxeta - mineta) + mineta
# Also calculate the component masses; mass1 > mass2
diff = (mass*mass * (1-4*eta))**0.5
mass1 = (mass + diff)/2.
mass2 = (mass - diff)/2.
# Check the masses are where we want them to be (allowing some floating
# point rounding error).
if (mass1 > massRangeParams.maxMass1*1.001).any() \
or (mass1 < massRangeParams.minMass1*0.999).any():
errMsg = "Mass1 is not within the specified mass range."
raise ValueError(errMsg)
if (mass2 > massRangeParams.maxMass2*1.001).any() \
or (mass2 < massRangeParams.minMass2*0.999).any():
errMsg = "Mass2 is not within the specified mass range."
raise ValueError(errMsg)
# Next up is the spins. First check if we have non-zero spins
if massRangeParams.maxNSSpinMag == 0 and massRangeParams.maxBHSpinMag == 0:
spin1z = numpy.zeros(numPoints,dtype=float)
spin2z = numpy.zeros(numPoints,dtype=float)
elif massRangeParams.nsbhFlag:
# Spin 1 first
mspin = numpy.zeros(len(mass1))
mspin += massRangeParams.maxBHSpinMag
spin1z = (2*numpy.random.random(numPoints) - 1) * mspin
# Then spin2
mspin = numpy.zeros(len(mass2))
mspin += massRangeParams.maxNSSpinMag
spin2z = (2*numpy.random.random(numPoints) - 1) * mspin
else:
boundary_mass = massRangeParams.ns_bh_boundary_mass
# Spin 1 first
mspin = numpy.zeros(len(mass1))
mspin += massRangeParams.maxNSSpinMag
mspin[mass1 > boundary_mass] = massRangeParams.maxBHSpinMag
spin1z = (2*numpy.random.random(numPoints) - 1) * mspin
# Then spin 2
mspin = numpy.zeros(len(mass2))
mspin += massRangeParams.maxNSSpinMag
mspin[mass2 > boundary_mass] = massRangeParams.maxBHSpinMag
spin2z = (2*numpy.random.random(numPoints) - 1) * mspin
return mass1, mass2, spin1z, spin2z
def get_random_mass(numPoints, massRangeParams, eos='2H'):
"""
This function will generate a large set of points within the chosen mass
and spin space, and with the desired minimum remnant disk mass (this applies
to NS-BH systems only). It will also return the corresponding PN spin
coefficients for ease of use later (though these may be removed at some
future point).
Parameters
----------
numPoints : int
Number of systems to simulate
massRangeParams : massRangeParameters instance
Instance holding all the details of mass ranges and spin ranges.
eos : string
Name of equation of state of neutron star.
Returns
--------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
"""
# WARNING: We expect mass1 > mass2 ALWAYS
# Check if EM contraints are required, i.e. if the systems must produce
# a minimum remnant disk mass. If this is not the case, proceed treating
# the systems as point particle binaries
if massRangeParams.remnant_mass_threshold is None:
mass1, mass2, spin1z, spin2z = \
get_random_mass_point_particles(numPoints, massRangeParams)
# otherwise, load EOS dependent data, generate the EM constraint
# (i.e. compute the minimum symmetric mass ratio needed to
# generate a given remnant disk mass as a function of the NS
# mass and the BH spin along z) and then proceed by accepting
# only systems that can yield (at least) the desired remnant
# disk mass and that pass the mass and spin range cuts.
else:
max_ns_g_mass = load_ns_sequence(massRangeParams.ns_eos)[1]
boundary_mass = massRangeParams.ns_bh_boundary_mass
if max_ns_g_mass < boundary_mass:
warn_msg = "WARNING: "
warn_msg += "Option of ns-bh-boundary-mass is %s " %(boundary_mass)
warn_msg += "which is higher than the maximum NS gravitational "
warn_msg += "mass admitted by the EOS that was prescribed "
warn_msg += "(%s). " %(max_ns_g_mass)
warn_msg += "The code will proceed using the latter value "
warn_msg += "as the boundary mass."
logging.warn(warn_msg)
boundary_mass = max_ns_g_mass
# Empty arrays to store points that pass all cuts
mass1_out = []
mass2_out = []
spin1z_out = []
spin2z_out = []
# As the EM cut can remove several randomly generated
# binaries, track the number of accepted points that pass
# all cuts and stop only once enough of them are generated
numPointsFound = 0
while numPointsFound < numPoints:
# Generate the random points within the required mass
# and spin cuts
mass1, mass2, spin1z, spin2z = \
get_random_mass_point_particles(numPoints-numPointsFound,
massRangeParams)
# Now proceed with cutting out EM dim systems
# Use a logical mask to track points that do not correspond to
# BBHs. The remaining points will be BNSs and NSBHs.
# Further down, EM-dim NSBHs will also be removed.
mask_not_bbh = numpy.zeros(len(mass1), dtype=bool)
# Keep a point if:
# 1) the secondary object is a not a BH (mass2 < boundary mass)
# [Store masses and spins of non BBH]
mask_not_bbh[mass2 < boundary_mass] = True
mass1_not_bbh = mass1[mask_not_bbh]
mass2_not_bbh = mass2[mask_not_bbh]
spin1z_not_bbh = spin1z[mask_not_bbh]
spin2z_not_bbh = spin2z[mask_not_bbh]
# 2) and if the primary mass is a NS (i.e., it is a BNS), or...
mask_nsbh = numpy.zeros(len(mass1_not_bbh), dtype=bool)
# [mask_nsbh identifies NSBH systems]
mask_nsbh[mass1_not_bbh > boundary_mass] = True
# [mask_bns identifies BNS systems]
mask_bns = ~mask_nsbh
# [Store masses and spins of BNSs]
mass1_bns = mass1_not_bbh[mask_bns]
mass2_bns = mass2_not_bbh[mask_bns]
spin1z_bns = spin1z_not_bbh[mask_bns]
spin2z_bns = spin2z_not_bbh[mask_bns]
# 3) ...it is an NS-BH with remnant mass greater than the threshold
# required to have a counterpart
# [Store masses and spins of all NSBHs]
mass1_nsbh = mass1_not_bbh[mask_nsbh]
mass2_nsbh = mass2_not_bbh[mask_nsbh]
spin1z_nsbh = spin1z_not_bbh[mask_nsbh]
spin2z_nsbh = spin2z_not_bbh[mask_nsbh]
# [Store etas of all NSBHs]
eta_nsbh = conversions.eta_from_mass1_mass2(mass1_nsbh, mass2_nsbh)
# [mask_bright_nsbh will identify NSBH systems with high enough
# threshold mass]
mask_bright_nsbh = numpy.zeros(len(mass1_nsbh), dtype=bool)
if eta_nsbh.size != 0:
remnant = conversions.remnant_mass_from_mass1_mass2_cartesian_spin_eos(
mass1_nsbh,
mass2_nsbh,
spin1x=0.0,
spin1y=0.0,
spin1z=spin1z_nsbh,
eos=eos
)
mask_bright_nsbh[remnant
>
massRangeParams.remnant_mass_threshold] = True
# Keep only points that correspond to binaries that can produce an
# EM counterpart (i.e., BNSs and EM-bright NSBHs) and add their
# properties to the pile of accpeted points to output
mass1_out = numpy.concatenate((mass1_out, mass1_bns,
mass1_nsbh[mask_bright_nsbh]))
mass2_out = numpy.concatenate((mass2_out, mass2_bns,
mass2_nsbh[mask_bright_nsbh]))
spin1z_out = numpy.concatenate((spin1z_out, spin1z_bns,
spin1z_nsbh[mask_bright_nsbh]))
spin2z_out = numpy.concatenate((spin2z_out, spin2z_bns,
spin2z_nsbh[mask_bright_nsbh]))
# Number of points that survived all cuts
numPointsFound = len(mass1_out)
# Ready to go
mass1 = mass1_out
mass2 = mass2_out
spin1z = spin1z_out
spin2z = spin2z_out
return mass1, mass2, spin1z, spin2z
def get_cov_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=None, lambda2=None, quadparam1=None,
quadparam2=None):
"""
Function to convert between masses and spins and locations in the xi
parameter space. Xi = Cartesian metric and rotated to principal components.
Parameters
-----------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
xis : list of floats or numpy.arrays
Position of the system(s) in the xi coordinate system
"""
# Do this by doing masses - > lambdas -> mus
mus = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=lambda1, lambda2=lambda2,
quadparam1=quadparam1, quadparam2=quadparam2)
# and then mus -> xis
xis = get_covaried_params(mus, metricParams.evecsCV[fUpper])
return xis
def get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper,
lambda1=None, lambda2=None, quadparam1=None,
quadparam2=None):
"""
Function to convert between masses and spins and locations in the mu
parameter space. Mu = Cartesian metric, but not principal components.
Parameters
-----------
mass1 : float
Mass of heavier body.
mass2 : float
Mass of lighter body.
spin1z : float
Spin of body 1.
spin2z : float
Spin of body 2.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
mus : list of floats or numpy.arrays
Position of the system(s) in the mu coordinate system
"""
# Do this by masses -> lambdas
lambdas = get_chirp_params(mass1, mass2, spin1z, spin2z,
metricParams.f0, metricParams.pnOrder,
lambda1=lambda1, lambda2=lambda2,
quadparam1=quadparam1, quadparam2=quadparam2)
# and lambdas -> mus
mus = get_mu_params(lambdas, metricParams, fUpper)
return mus
def get_mu_params(lambdas, metricParams, fUpper):
"""
Function to rotate from the lambda coefficients into position in the mu
coordinate system. Mu = Cartesian metric, but not principal components.
Parameters
-----------
lambdas : list of floats or numpy.arrays
Position of the system(s) in the lambda coefficients
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals and
metricParams.evecs (ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
mus : list of floats or numpy.arrays
Position of the system(s) in the mu coordinate system
"""
lambdas = numpy.array(lambdas, copy=False)
# If original inputs were floats we need to make this a 2D array
if len(lambdas.shape) == 1:
resize_needed = True
lambdas = lambdas[:,None]
else:
resize_needed = False
evecs = metricParams.evecs[fUpper]
evals = metricParams.evals[fUpper]
evecs = numpy.array(evecs, copy=False)
mus = ((lambdas.T).dot(evecs)).T
mus = mus * numpy.sqrt(evals)[:,None]
if resize_needed:
mus = numpy.ndarray.flatten(mus)
return mus
def get_covaried_params(mus, evecsCV):
"""
Function to rotate from position(s) in the mu_i coordinate system into the
position(s) in the xi_i coordinate system
Parameters
-----------
mus : list of floats or numpy.arrays
Position of the system(s) in the mu coordinate system
evecsCV : numpy.matrix
This matrix is used to perform the rotation to the xi_i
coordinate system.
Returns
--------
xis : list of floats or numpy.arrays
Position of the system(s) in the xi coordinate system
"""
mus = numpy.array(mus, copy=False)
# If original inputs were floats we need to make this a 2D array
if len(mus.shape) == 1:
resize_needed = True
mus = mus[:,None]
else:
resize_needed = False
xis = ((mus.T).dot(evecsCV)).T
if resize_needed:
xis = numpy.ndarray.flatten(xis)
return xis
def rotate_vector(evecs, old_vector, rescale_factor, index):
"""
Function to find the position of the system(s) in one of the xi_i or mu_i
directions.
Parameters
-----------
evecs : numpy.matrix
Matrix of the eigenvectors of the metric in lambda_i coordinates. Used
to rotate to a Cartesian coordinate system.
old_vector : list of floats or numpy.arrays
The position of the system(s) in the original coordinates
rescale_factor : float
Scaling factor to apply to resulting position(s)
index : int
The index of the final coordinate system that is being computed. Ie.
if we are going from mu_i -> xi_j, this will give j.
Returns
--------
positions : float or numpy.array
Position of the point(s) in the resulting coordinate.
"""
temp = 0
for i in range(len(evecs)):
temp += (evecs[i,index] * rescale_factor) * old_vector[i]
return temp
def get_point_distance(point1, point2, metricParams, fUpper):
"""
Function to calculate the mismatch between two points, supplied in terms
of the masses and spins, using the xi_i parameter space metric to
approximate the mismatch of the two points. Can also take one of the points
as an array of points and return an array of mismatches (but only one can
be an array!)
point1 : List of floats or numpy.arrays
point1[0] contains the mass(es) of the heaviest body(ies).
point1[1] contains the mass(es) of the smallest body(ies).
point1[2] contains the spin(es) of the heaviest body(ies).
point1[3] contains the spin(es) of the smallest body(ies).
point2 : List of floats
point2[0] contains the mass of the heaviest body.
point2[1] contains the mass of the smallest body.
point2[2] contains the spin of the heaviest body.
point2[3] contains the spin of the smallest body.
metricParams : metricParameters instance
Structure holding all the options for construction of the metric
and the eigenvalues, eigenvectors and covariance matrix
needed to manipulate the space.
fUpper : float
The value of fUpper to use when getting the mu coordinates from the
lambda coordinates. This must be a key in metricParams.evals,
metricParams.evecs and metricParams.evecsCV
(ie. we must know how to do the transformation for
the given value of fUpper)
Returns
--------
dist : float or numpy.array
Distance between the point2 and all points in point1
xis1 : List of floats or numpy.arrays
Position of the input point1(s) in the xi_i parameter space
xis2 : List of floats
Position of the input point2 in the xi_i parameter space
"""
aMass1 = point1[0]
aMass2 = point1[1]
aSpin1 = point1[2]
aSpin2 = point1[3]
bMass1 = point2[0]
bMass2 = point2[1]
bSpin1 = point2[2]
bSpin2 = point2[3]
aXis = get_cov_params(aMass1, aMass2, aSpin1, aSpin2, metricParams, fUpper)
bXis = get_cov_params(bMass1, bMass2, bSpin1, bSpin2, metricParams, fUpper)
dist = (aXis[0] - bXis[0])**2
for i in range(1,len(aXis)):
dist += (aXis[i] - bXis[i])**2
return dist, aXis, bXis
def calc_point_dist(vsA, entryA):
"""
This function is used to determine the distance between two points.
Parameters
----------
vsA : list or numpy.array or similar
An array of point 1's position in the \chi_i coordinate system
entryA : list or numpy.array or similar
An array of point 2's position in the \chi_i coordinate system
MMdistA : float
The minimal mismatch allowed between the points
Returns
--------
val : float
The metric distance between the two points.
"""
chi_diffs = vsA - entryA
val = ((chi_diffs)*(chi_diffs)).sum()
return val
def test_point_dist(point_1_chis, point_2_chis, distance_threshold):
"""
This function tests if the difference between two points in the chi
parameter space is less than a distance threshold. Returns True if it is
and False if it is not.
Parameters
----------
point_1_chis : numpy.array
An array of point 1's position in the \chi_i coordinate system
point_2_chis : numpy.array
An array of point 2's position in the \chi_i coordinate system
distance_threshold : float
The distance threshold to use.
"""
return calc_point_dist(point_1_chis, point_2_chis) < distance_threshold
def calc_point_dist_vary(mus1, fUpper1, mus2, fUpper2, fMap, norm_map, MMdistA):
"""
Function to determine if two points, with differing upper frequency cutoffs
have a mismatch < MMdistA for *both* upper frequency cutoffs.
Parameters
----------
mus1 : List of numpy arrays
mus1[i] will give the array of point 1's position in the \chi_j
coordinate system. The i element corresponds to varying values of the
upper frequency cutoff. fMap is used to map between i and actual
frequencies
fUpper1 : float
The upper frequency cutoff of point 1.
mus2 : List of numpy arrays
mus2[i] will give the array of point 2's position in the \chi_j
coordinate system. The i element corresponds to varying values of the
upper frequency cutoff. fMap is used to map between i and actual
frequencies
fUpper2 : float
The upper frequency cutoff of point 2.
fMap : dictionary
fMap[fUpper] will give the index needed to get the \chi_j coordinates
in the two sets of mus
norm_map : dictionary
norm_map[fUpper] will give the relative frequency domain template
amplitude (sigma) at the given value of fUpper.
MMdistA
The minimal mismatch allowed between the points
Returns
--------
Boolean
True if the points have a mismatch < MMdistA
False if the points have a mismatch > MMdistA
"""
f_upper = min(fUpper1, fUpper2)
f_other = max(fUpper1, fUpper2)
idx = fMap[f_upper]
vecs1 = mus1[idx]
vecs2 = mus2[idx]
val = ((vecs1 - vecs2)*(vecs1 - vecs2)).sum()
if (val > MMdistA):
return False
# Reduce match to account for normalization.
norm_fac = norm_map[f_upper] / norm_map[f_other]
val = 1 - (1 - val)*norm_fac
return (val < MMdistA)
def find_max_and_min_frequencies(name, mass_range_params, freqs):
"""
ADD DOCS
"""
cutoff_fns = pnutils.named_frequency_cutoffs
if name not in cutoff_fns.keys():
err_msg = "%s not recognized as a valid cutoff frequency choice." %name
err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys())
raise ValueError(err_msg)
# Can I do this quickly?
total_mass_approxs = {
"SchwarzISCO": pnutils.f_SchwarzISCO,
"LightRing" : pnutils.f_LightRing,
"ERD" : pnutils.f_ERD
}
if name in total_mass_approxs.keys():
# This can be done quickly if the cutoff only depends on total mass
# Assumes that lower total mass = higher cutoff frequency
upper_f_cutoff = total_mass_approxs[name](mass_range_params.minTotMass)
lower_f_cutoff = total_mass_approxs[name](mass_range_params.maxTotMass)
else:
# Do this numerically
# FIXME: Is 1000000 the right choice? I think so, but just highlighting
mass1, mass2, spin1z, spin2z = \
get_random_mass(1000000, mass_range_params)
mass_dict = {}
mass_dict['mass1'] = mass1
mass_dict['mass2'] = mass2
mass_dict['spin1z'] = spin1z
mass_dict['spin2z'] = spin2z
tmp_freqs = cutoff_fns[name](mass_dict)
upper_f_cutoff = tmp_freqs.max()
lower_f_cutoff = tmp_freqs.min()
cutoffs = numpy.array([lower_f_cutoff,upper_f_cutoff])
if lower_f_cutoff < freqs.min():
warn_msg = "WARNING: "
warn_msg += "Lowest frequency cutoff is %s Hz " %(lower_f_cutoff,)
warn_msg += "which is lower than the lowest frequency calculated "
warn_msg += "for the metric: %s Hz. " %(freqs.min())
warn_msg += "Distances for these waveforms will be calculated at "
warn_msg += "the lowest available metric frequency."
logging.warn(warn_msg)
if upper_f_cutoff > freqs.max():
warn_msg = "WARNING: "
warn_msg += "Highest frequency cutoff is %s Hz " %(upper_f_cutoff,)
warn_msg += "which is larger than the highest frequency calculated "
warn_msg += "for the metric: %s Hz. " %(freqs.max())
warn_msg += "Distances for these waveforms will be calculated at "
warn_msg += "the largest available metric frequency."
logging.warn(warn_msg)
return find_closest_calculated_frequencies(cutoffs, freqs)
def return_nearest_cutoff(name, mass_dict, freqs):
"""
Given an array of total mass values and an (ascending) list of
frequencies, this will calculate the specified cutoff formula for each
mtotal and return the nearest frequency to each cutoff from the input
list.
Currently only supports cutoffs that are functions of the total mass
and no other parameters (SchwarzISCO, LightRing, ERD)
Parameters
----------
name : string
Name of the cutoff formula to be approximated
mass_dict : Dictionary where the keys are used to call the functions
returned by tmpltbank.named_frequency_cutoffs. The values can be
numpy arrays or single values.
freqs : list of floats
A list of frequencies (must be sorted ascending)
Returns
-------
numpy.array
The frequencies closest to the cutoff for each value of totmass.
"""
# A bypass for the redundant case
if len(freqs) == 1:
return numpy.zeros(len(mass_dict['m1']), dtype=float) + freqs[0]
cutoff_fns = pnutils.named_frequency_cutoffs
if name not in cutoff_fns.keys():
err_msg = "%s not recognized as a valid cutoff frequency choice." %name
err_msg += "Recognized choices: " + " ".join(cutoff_fns.keys())
raise ValueError(err_msg)
f_cutoff = cutoff_fns[name](mass_dict)
return find_closest_calculated_frequencies(f_cutoff, freqs)
def find_closest_calculated_frequencies(input_freqs, metric_freqs):
"""
Given a value (or array) of input frequencies find the closest values in
the list of frequencies calculated in the metric.
Parameters
-----------
input_freqs : numpy.array or float
The frequency(ies) that you want to find the closest value in
metric_freqs
metric_freqs : numpy.array
The list of frequencies calculated by the metric
Returns
--------
output_freqs : numpy.array or float
The list of closest values to input_freqs for which the metric was
computed
"""
try:
refEv = numpy.zeros(len(input_freqs),dtype=float)
except TypeError:
refEv = numpy.zeros(1, dtype=float)
input_freqs = numpy.array([input_freqs])
if len(metric_freqs) == 1:
refEv[:] = metric_freqs[0]
return refEv
# FIXME: This seems complicated for what is a simple operation. Is there
# a simpler *and* faster way of doing this?
# NOTE: This function assumes a sorted list of frequencies
# NOTE: totmass and f_cutoff are both numpy arrays as this function is
# designed so that the cutoff can be calculated for many systems
# simulataneously
for i in range(len(metric_freqs)):
if i == 0:
# If frequency is lower than halfway between the first two entries
# use the first (lowest) value
logicArr = input_freqs < ((metric_freqs[0] + metric_freqs[1])/2.)
elif i == (len(metric_freqs)-1):
# If frequency is larger than halfway between the last two entries
# use the last (highest) value
logicArr = input_freqs > ((metric_freqs[-2] + metric_freqs[-1])/2.)
else:
# For frequencies within the range in freqs, check which points
# should use the frequency corresponding to index i.
logicArrA = input_freqs > ((metric_freqs[i-1] + metric_freqs[i])/2.)
logicArrB = input_freqs < ((metric_freqs[i] + metric_freqs[i+1])/2.)
logicArr = numpy.logical_and(logicArrA,logicArrB)
if logicArr.any():
refEv[logicArr] = metric_freqs[i]
return refEv
def outspiral_loop(N):
"""
Return a list of points that will loop outwards in a 2D lattice in terms
of distance from a central point. So if N=2 this will be [0,0], [0,1],
[0,-1],[1,0],[-1,0],[1,1] .... This is useful when you want to loop over
a number of bins, but want to start in the center and work outwards.
"""
# Create a 2D lattice of all points
X,Y = numpy.meshgrid(numpy.arange(-N,N+1), numpy.arange(-N,N+1))
# Flatten it
X = numpy.ndarray.flatten(X)
Y = numpy.ndarray.flatten(Y)
# Force to an integer
X = numpy.array(X, dtype=int)
Y = numpy.array(Y, dtype=int)
# Calculate distances
G = numpy.sqrt(X**2+Y**2)
# Combine back into an array
out_arr = numpy.array([X,Y,G])
# And order correctly
sorted_out_arr = out_arr[:,out_arr[2].argsort()]
return sorted_out_arr[:2,:].T
| 33,431
| 38.611374
| 87
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/pegasus_workflow.py
|
# Copyright (C) 2014 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides thin wrappers around Pegasus.DAX3 functionality that
provides additional abstraction and argument handling.
"""
import os
import shutil
import tempfile
import subprocess
from packaging import version
from urllib.request import pathname2url
from urllib.parse import urljoin, urlsplit
import Pegasus.api as dax
PEGASUS_FILE_DIRECTORY = os.path.join(os.path.dirname(__file__),
'pegasus_files')
class ProfileShortcuts(object):
""" Container of common methods for setting pegasus profile information
on Executables and nodes. This class expects to be inherited from
and for a add_profile method to be implemented.
"""
def set_memory(self, size):
""" Set the amount of memory that is required in megabytes
"""
self.add_profile('condor', 'request_memory', '%sM' % size)
def set_storage(self, size):
""" Set the amount of storage required in megabytes
"""
self.add_profile('condor', 'request_disk', '%sM' % size)
def set_num_cpus(self, number):
self.add_profile('condor', 'request_cpus', number)
def set_universe(self, universe):
if universe == 'standard':
self.add_profile("pegasus", "gridstart", "none")
self.add_profile("condor", "universe", universe)
def set_category(self, category):
self.add_profile('dagman', 'category', category)
def set_priority(self, priority):
self.add_profile('dagman', 'priority', priority)
def set_num_retries(self, number):
self.add_profile("dagman", "retry", number)
def set_execution_site(self, site):
self.add_profile("selector", "execution_site", site)
class Executable(ProfileShortcuts):
""" The workflow representation of an Executable
"""
id = 0
def __init__(self, name, os='linux',
arch='x86_64', installed=False,
container=None):
self.logical_name = name + "_ID%s" % str(Executable.id)
self.pegasus_name = name
Executable.id += 1
self.os = dax.OS(os)
self.arch = dax.Arch(arch)
self.installed = installed
self.container = container
self.in_workflow = False
self.profiles = {}
self.transformations = {}
def create_transformation(self, site, url):
transform = Transformation(
self.logical_name,
site=site,
pfn=url,
is_stageable=self.installed,
arch=self.arch,
os_type=self.os,
container=self.container
)
transform.pycbc_name = self.pegasus_name
for (namespace, key), value in self.profiles.items():
transform.add_profiles(
dax.Namespace(namespace),
key=key,
value=value
)
self.transformations[site] = transform
def add_profile(self, namespace, key, value):
""" Add profile information to this executable
"""
if self.transformations:
err_msg = "Need code changes to be able to add profiles "
err_msg += "after transformations are created."
raise ValueError(err_msg)
self.profiles[(namespace, key)] = value
class Transformation(dax.Transformation):
def is_same_as(self, other):
test_vals = ['namespace', 'version']
test_site_vals = ['arch', 'os_type', 'os_release',
'os_version', 'bypass', 'container']
# Check for logical name first
if not self.pycbc_name == other.pycbc_name:
return False
# Check the properties of the executable
for val in test_vals:
sattr = getattr(self, val)
oattr = getattr(other, val)
if not sattr == oattr:
return False
# Some properties are stored in the TransformationSite
self_site = list(self.sites.values())
assert len(self_site) == 1
self_site = self_site[0]
other_site = list(other.sites.values())
assert len(other_site) == 1
other_site = other_site[0]
for val in test_site_vals:
sattr = getattr(self_site, val)
oattr = getattr(other_site, val)
if not sattr == oattr:
return False
# Also check the "profile". This is things like Universe, RAM/disk/CPU
# requests, execution site, getenv=True, etc.
for profile in self.profiles:
if profile not in other.profiles:
return False
for profile in other.profiles:
if profile not in self.profiles:
return False
return True
class Node(ProfileShortcuts):
def __init__(self, transformation):
self.in_workflow = False
self.transformation=transformation
self._inputs = []
self._outputs = []
self._dax_node = dax.Job(transformation)
# NOTE: We are enforcing one site per transformation. Therefore the
# transformation used indicates the site to be used.
self.set_execution_site(list(transformation.sites.keys())[0])
self._args = []
# Each value in _options is added separated with whitespace
# so ['--option','value'] --> "--option value"
self._options = []
# For _raw_options *NO* whitespace is added.
# so ['--option','value'] --> "--optionvalue"
# and ['--option',' ','value'] --> "--option value"
self._raw_options = []
def add_arg(self, arg):
""" Add an argument
"""
if not isinstance(arg, File):
arg = str(arg)
self._args += [arg]
def add_raw_arg(self, arg):
""" Add an argument to the command line of this job, but do *NOT* add
white space between arguments. This can be added manually by adding
' ' if needed
"""
if not isinstance(arg, File):
arg = str(arg)
self._raw_options += [arg]
def add_opt(self, opt, value=None):
""" Add a option
"""
if value is not None:
if not isinstance(value, File):
value = str(value)
self._options += [opt, value]
else:
self._options += [opt]
#private functions to add input and output data sources/sinks
def _add_input(self, inp):
""" Add as source of input data
"""
self._inputs += [inp]
self._dax_node.add_inputs(inp)
def _add_output(self, out):
""" Add as destination of output data
"""
self._outputs += [out]
out.node = self
stage_out = out.storage_path is not None
self._dax_node.add_outputs(out, stage_out=stage_out)
# public functions to add options, arguments with or without data sources
def add_input(self, inp):
"""Declares an input file without adding it as a command-line option.
"""
self._add_input(inp)
def add_output(self, inp):
"""Declares an output file without adding it as a command-line option.
"""
self._add_output(inp)
def add_input_opt(self, opt, inp):
""" Add an option that determines an input
"""
self.add_opt(opt, inp._dax_repr())
self._add_input(inp)
def add_output_opt(self, opt, out):
""" Add an option that determines an output
"""
self.add_opt(opt, out._dax_repr())
self._add_output(out)
def add_output_list_opt(self, opt, outputs):
""" Add an option that determines a list of outputs
"""
self.add_opt(opt)
for out in outputs:
self.add_opt(out)
self._add_output(out)
def add_input_list_opt(self, opt, inputs):
""" Add an option that determines a list of inputs
"""
self.add_opt(opt)
for inp in inputs:
self.add_opt(inp)
self._add_input(inp)
def add_list_opt(self, opt, values):
""" Add an option with a list of non-file parameters.
"""
self.add_opt(opt)
for val in values:
self.add_opt(val)
def add_input_arg(self, inp):
""" Add an input as an argument
"""
self.add_arg(inp._dax_repr())
self._add_input(inp)
def add_output_arg(self, out):
""" Add an output as an argument
"""
self.add_arg(out._dax_repr())
self._add_output(out)
def new_output_file_opt(self, opt, name):
""" Add an option and return a new file handle
"""
fil = File(name)
self.add_output_opt(opt, fil)
return fil
# functions to describe properties of this node
def add_profile(self, namespace, key, value):
""" Add profile information to this node at the DAX level
"""
self._dax_node.add_profiles(
dax.Namespace(namespace),
key=key,
value=value
)
def _finalize(self):
if len(self._raw_options):
raw_args = [''.join([str(a) for a in self._raw_options])]
else:
raw_args = []
args = self._args + raw_args + self._options
self._dax_node.add_args(*args)
class Workflow(object):
"""
"""
def __init__(self, name='my_workflow', directory=None, cache_file=None,
dax_file_name=None):
self.name = name
self._rc = dax.ReplicaCatalog()
self._tc = dax.TransformationCatalog()
if directory is None:
self.out_dir = os.getcwd()
else:
self.out_dir = os.path.abspath(directory)
if cache_file is not None:
cache_file = os.path.abspath(cache_file)
self.cache_file = cache_file
self._inputs = []
self._outputs = []
self._transformations = []
self._containers = []
self.in_workflow = False
self.sub_workflows = []
if dax_file_name is None:
self.filename = self.name + '.dax'
else:
self.filename = dax_file_name
self._adag = dax.Workflow(self.filename)
# A pegasus job version of this workflow for use if it isncluded
# within a larger workflow
self._as_job = SubWorkflow(self.filename, is_planned=False,
_id=self.name)
self._swinputs = []
def add_workflow(self, workflow):
""" Add a sub-workflow to this workflow
This function adds a sub-workflow of Workflow class to this workflow.
Parent child relationships are determined by data dependencies
Parameters
----------
workflow : Workflow instance
The sub-workflow to add to this one
"""
workflow.in_workflow = self
self.sub_workflows += [workflow]
self._adag.add_jobs(workflow._as_job)
return self
def add_explicit_dependancy(self, parent, child):
"""
Add an explicit dependancy between two Nodes in this workflow.
Most dependencies (in PyCBC and Pegasus thinking) are added by
declaring file linkages. However, there are some cases where you might
want to override that and add an explicit dependancy.
Parameters
----------
parent : Node instance
The parent Node.
child : Node instance
The child Node
"""
self._adag.add_dependency(parent._dax_node, children=[child._dax_node])
def add_subworkflow_dependancy(self, parent_workflow, child_workflow):
"""
Add a dependency between two sub-workflows in this workflow
This is done if those subworkflows are themselves declared as Workflows
which are sub-workflows and not explicit SubWorkflows. (These Workflows
contain SubWorkflows inside them .... Yes, the relationship between
PyCBC and Pegasus becomes confusing here). If you are working with
explicit SubWorkflows these can be added normally using File relations.
Parameters
----------
parent_workflow : Workflow instance
The sub-workflow to use as the parent dependence.
Must be a sub-workflow of this workflow.
child_workflow : Workflow instance
The sub-workflow to add as the child dependence.
Must be a sub-workflow of this workflow.
"""
self._adag.add_dependency(parent_workflow._as_job,
children=[child_workflow._as_job])
def add_transformation(self, tranformation):
""" Add a transformation to this workflow
Adds the input transformation to this workflow.
Parameters
----------
transformation : Pegasus.api.Transformation
The transformation to be added.
"""
self._tc.add_transformations(tranformation)
def add_container(self, container):
""" Add a container to this workflow
Adds the input container to this workflow.
Parameters
----------
container : Pegasus.api.Container
The container to be added.
"""
self._tc.add_containers(container)
def add_node(self, node):
""" Add a node to this workflow
This function adds nodes to the workflow. It also determines
parent/child relations from the inputs to this job.
Parameters
----------
node : pycbc.workflow.pegasus_workflow.Node
A node that should be executed as part of this workflow.
"""
node._finalize()
node.in_workflow = self
# Record the executable that this node uses
if node.transformation not in self._transformations:
for tform in self._transformations:
# Check if transform is already in workflow
if node.transformation.is_same_as(tform):
node.transformation.in_workflow = True
node._dax_node.transformation = tform.name
node.transformation.name = tform.name
break
else:
self._transformations += [node.transformation]
lgc = (hasattr(node, 'executable')
and node.executable.container is not None
and node.executable.container not in self._containers)
if lgc:
self._containers.append(node.executable.container)
# Add the node itself
self._adag.add_jobs(node._dax_node)
# Determine the parent child relationships based on the inputs that
# this node requires.
# In Pegasus5 this is mostly handled by pegasus, we just need to
# connect files correctly if dealing with file management between
# workflows/subworkflows
for inp in node._inputs:
if inp.node is not None and inp.node.in_workflow == self:
# Standard case: File produced within the same workflow.
# Don't need to do anything here.
continue
elif inp.node is not None and not inp.node.in_workflow:
# This error should be rare, but can happen. If a Node hasn't
# yet been added to a workflow, this logic breaks. Always add
# nodes in order that files will be produced.
raise ValueError('Parents of this node must be added to the '
'workflow first.')
elif inp.node is None:
# File is external to the workflow (e.g. a pregenerated
# template bank). (if inp.node is None)
if inp not in self._inputs:
self._inputs += [inp]
elif inp.node.in_workflow != self:
# File is coming from a parent workflow, or other workflow
# These needs a few extra hooks later, use _swinputs for this.
if inp not in self._inputs:
self._inputs += [inp]
self._swinputs += [inp]
else:
err_msg = ("I don't understand how to deal with an input file "
"here. Ian doesn't think this message should be "
"possible, but if you get here something has gone "
"wrong and will need debugging!")
raise ValueError(err_msg)
# Record the outputs that this node generates
self._outputs += node._outputs
return self
def __add__(self, other):
if isinstance(other, Node):
return self.add_node(other)
elif isinstance(other, Workflow):
return self.add_workflow(other)
else:
raise TypeError('Cannot add type %s to this workflow' % type(other))
def traverse_workflow_io(self):
""" If input is needed from another workflow within a larger
hierarchical workflow, determine the path for the file to reach
the destination and add the file to workflows input / output as
needed.
"""
def root_path(v):
path = [v]
while v.in_workflow:
path += [v.in_workflow]
v = v.in_workflow
return path
for inp in self._swinputs:
workflow_root = root_path(self)
input_root = root_path(inp.node.in_workflow)
for step in workflow_root:
if step in input_root:
common = step
break
# Set our needed file as output so that it gets staged upwards
# to a workflow that contains the job which needs it.
for idx in range(input_root.index(common)):
child_wflow = input_root[idx]
parent_wflow = input_root[idx+1]
if inp not in child_wflow._as_job.get_outputs():
child_wflow._as_job.add_outputs(inp, stage_out=True)
parent_wflow._outputs += [inp]
# Set out needed file so it gets staged downwards towards the
# job that needs it.
for wf in workflow_root[:workflow_root.index(common)]:
if inp not in wf._as_job.get_inputs():
wf._as_job.add_inputs(inp)
for wf in self.sub_workflows:
wf.traverse_workflow_io()
def save(self, filename=None, submit_now=False, plan_now=False,
output_map_path=None, root=True):
""" Write this workflow to DAX file
"""
if filename is None:
filename = self.filename
if output_map_path is None:
output_map_path = 'output.map'
# Handle setting up io for inter-workflow file use ahead of time
# so that when daxes are saved the metadata is complete
if root:
self.traverse_workflow_io()
for sub in self.sub_workflows:
sub.save(root=False)
# FIXME: If I'm now putting output_map here, all output_map stuff
# should move here.
sub.output_map_file.insert_into_dax(self._rc, self.sites)
sub_workflow_file = File(sub.filename)
pfn = os.path.join(os.getcwd(), sub.filename)
sub_workflow_file.add_pfn(pfn, site='local')
sub_workflow_file.insert_into_dax(self._rc, self.sites)
# add workflow input files pfns for local site to dax
for fil in self._inputs:
fil.insert_into_dax(self._rc, self.sites)
self._adag.add_replica_catalog(self._rc)
# Add TC into workflow
self._adag.add_transformation_catalog(self._tc)
with open(output_map_path, 'w') as f:
for out in self._outputs:
try:
f.write(out.output_map_str() + '\n')
except ValueError:
# There was no storage path
pass
# Pegasus requires that we write the DAX file into the local directory
olddir = os.getcwd()
os.chdir(self.out_dir)
self._adag.write(filename)
if not self.in_workflow:
if submit_now or plan_now:
self.plan_and_submit(submit_now=submit_now)
else:
with open('additional_planner_args.dat', 'w') as f:
stage_site_str = self.staging_site_str
exec_sites = self.exec_sites_str
# For now we don't include --config as this can be added to
# in submit_dax. We should add an option to add additional
# pegasus properties (through the config files?) here.
#prop_file = os.path.join(PEGASUS_FILE_DIRECTORY,
# 'pegasus-properties.conf')
#f.write('--conf {} '.format(prop_file))
if self.cache_file is not None:
f.write('--cache {} '.format(self.cache_file))
f.write('--output-sites local ')
f.write('--sites {} '.format(exec_sites))
f.write('--staging-site {} '.format(stage_site_str))
f.write('--cluster label,horizontal ')
f.write('--cleanup inplace ')
f.write('--relative-dir work ')
# --dir is not being set here because it might be easier to
# set this in submit_dax still?
f.write('-q ')
f.write('--dax {}'.format(filename))
os.chdir(olddir)
def plan_and_submit(self, submit_now=True):
""" Plan and submit the workflow now.
"""
# New functionality, this might still need some work. Here's things
# that this might want to do, that submit_dax does:
# * Checks proxy (ignore this, user should already have this done)
# * Pulls properties file in (DONE)
# * Send necessary options to the planner (DONE)
# * Some logging about hostnames (NOT DONE, needed?)
# * Setup the helper scripts (start/debug/stop/status) .. (DONE)
# * Copy some of the interesting files into workflow/ (DONE)
# * Checks for dashboard URL (NOT DONE)
# * Does something with condor_reschedule (NOT DONE, needed?)
planner_args = {}
planner_args['submit'] = submit_now
# Get properties file - would be nice to add extra properties here.
prop_file = os.path.join(PEGASUS_FILE_DIRECTORY,
'pegasus-properties.conf')
planner_args['conf'] = prop_file
# Cache file, if there is one
if self.cache_file is not None:
planner_args['cache'] = [self.cache_file]
# Not really sure what this does, but Karan said to use it. Seems to
# matter for subworkflows
planner_args['output_sites'] = ['local']
# Site options
planner_args['sites'] = self.sites
planner_args['staging_sites'] = self.staging_site
# Make tmpdir for submitfiles
# default directory is the system default, but is overrideable
# This should probably be moved to core.py?
submit_opts = 'pegasus_profile', 'pycbc|submit-directory'
submit_dir = None
if self.cp.has_option(*submit_opts):
submit_dir = self.cp.get(*submit_opts)
submitdir = tempfile.mkdtemp(prefix='pycbc-tmp_', dir=submit_dir)
os.chmod(submitdir, 0o755)
try:
os.remove('submitdir')
except FileNotFoundError:
pass
os.symlink(submitdir, 'submitdir')
planner_args['dir'] = submitdir
# Other options
planner_args['cluster'] = ['label,horizontal']
planner_args['relative_dir'] = 'work'
planner_args['cleanup'] = 'inplace'
# This quietens the planner a bit. We cannot set the verbosity
# directly, which would be better. So be careful, if changing the
# pegasus.mode property, it will change the verbosity (a lot).
planner_args['quiet'] = 1
# FIXME: The location of output.map is hardcoded in the properties
# file. This is overridden for subworkflows, but is not for
# main workflows with submit_dax. If we ever remove submit_dax
# we should include the location explicitly here.
self._adag.plan(**planner_args)
# Set up convenience scripts
with open('status', 'w') as fp:
fp.write('pegasus-status --verbose ')
fp.write('--long {}/work $@'.format(submitdir))
with open('debug', 'w') as fp:
fp.write('pegasus-analyzer -r ')
fp.write('-v {}/work $@'.format(submitdir))
with open('stop', 'w') as fp:
fp.write('pegasus-remove {}/work $@'.format(submitdir))
with open('start', 'w') as fp:
fp.write('pegasus-run {}/work $@'.format(submitdir))
os.chmod('status', 0o755)
os.chmod('debug', 0o755)
os.chmod('stop', 0o755)
os.chmod('start', 0o755)
os.makedirs('workflow/planning', exist_ok=True)
shutil.copy2(prop_file, 'workflow/planning')
shutil.copy2(os.path.join(submitdir, 'work', 'braindump.yml'),
'workflow/planning')
if self.cache_file is not None:
shutil.copy2(self.cache_file, 'workflow/planning')
class SubWorkflow(dax.SubWorkflow):
"""Workflow job representation of a SubWorkflow.
This follows the Pegasus nomenclature where there are Workflows, Jobs and
SubWorkflows. Be careful though! A SubWorkflow is actually a Job, not a
Workflow. If creating a sub-workflow you would create a Workflow as normal
and write out the necessary dax files. Then you would create a SubWorkflow
object, which acts as the Job in the top-level workflow. Most of the
special linkages that are needed for sub-workflows are then handled at that
stage. We do add a little bit of functionality here.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pycbc_planner_args = {}
def add_into_workflow(self, container_wflow):
"""Add this Job into a container Workflow
"""
self.add_planner_args(**self.pycbc_planner_args)
# Set this to None so code will fail if more planner args are added
self.pycbc_planner_args = None
container_wflow._adag.add_jobs(self)
def add_planner_arg(self, value, option):
if self.pycbc_planner_args is None:
err_msg = ("We cannot add arguments to the SubWorkflow planning "
"stage after this is added to the parent workflow.")
raise ValueError(err_msg)
self.pycbc_planner_args[value] = option
def set_subworkflow_properties(self, output_map_file,
staging_site,
cache_file):
self.add_planner_arg('pegasus.dir.storage.mapper.replica.file',
os.path.basename(output_map_file.name))
# Ensure output_map_file has the for_planning flag set. There's no
# API way to set this after the File is initialized, so we have to
# change the attribute here.
# WORSE, we only want to set this if the pegasus *planner* is version
# 5.0.4 or larger
sproc_out = subprocess.check_output(['pegasus-version']).strip()
sproc_out = sproc_out.decode()
if version.parse(sproc_out) >= version.parse('5.0.4'):
output_map_file.for_planning=True
self.add_inputs(output_map_file)
# I think this is needed to deal with cases where the subworkflow file
# does not exist at submission time.
bname = os.path.splitext(os.path.basename(self.file))[0]
self.add_planner_arg('basename', bname)
self.add_planner_arg('output_sites', ['local'])
self.add_planner_arg('cleanup', 'inplace')
self.add_planner_arg('cluster', ['label', 'horizontal'])
self.add_planner_arg('verbose', 3)
if cache_file:
self.add_planner_arg('cache', [cache_file])
if staging_site:
self.add_planner_arg('staging_sites', staging_site)
class File(dax.File):
""" The workflow representation of a physical file
An object that represents a file from the perspective of setting up a
workflow. The file may or may not exist at the time of workflow generation.
If it does, this is represented by containing a physical file name (PFN).
A storage path is also available to indicate the desired final
destination of this file.
"""
def __init__(self, name):
self.name = name
self.node = None
dax.File.__init__(self, name)
# Storage_path is where the file would be *output* to
self.storage_path = None
# Input_pfns is *input* locations of the file. This needs a site.
self.input_pfns = []
# Adding to a dax finalizes the File. Ensure that changes cannot be
# made after doing this.
self.added_to_dax = False
def _dax_repr(self):
return self
@property
def dax_repr(self):
"""Return the dax representation of a File."""
return self._dax_repr()
def output_map_str(self):
if self.storage_path:
return '%s %s pool="%s"' % (self.name, self.storage_path, 'local')
else:
raise ValueError('This file does not have a storage path')
def add_pfn(self, url, site):
"""
Associate a PFN with this file. Takes a URL and associated site.
"""
self.input_pfns.append((url, site))
def has_pfn(self, url, site='local'):
"""
Check if the url, site is already associated to this File. If site is
not provided, we will assume it is 'local'.
"""
return (((url, site) in self.input_pfns)
or ((url, 'all') in self.input_pfns))
def insert_into_dax(self, rep_cat, sites):
for (url, site) in self.input_pfns:
if site == 'all':
for curr_site in sites:
rep_cat.add_replica(curr_site, self, url)
else:
rep_cat.add_replica(site, self, url)
@classmethod
def from_path(cls, path):
"""Takes a path and returns a File object with the path as the PFN."""
logging.warn("The from_path method in pegasus_workflow is deprecated. "
"Please use File.from_path (for output files) in core.py "
"or resolve_url_to_file in core.py (for input files) "
"instead.")
urlparts = urlsplit(path)
site = 'nonlocal'
if (urlparts.scheme == '' or urlparts.scheme == 'file'):
if os.path.isfile(urlparts.path):
path = os.path.abspath(urlparts.path)
path = urljoin('file:', pathname2url(path))
site = 'local'
fil = cls(os.path.basename(path))
fil.add_pfn(path, site=site)
return fil
| 32,296
| 36.511034
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/psd.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module is responsible for setting up PSD-related jobs in workflows.
"""
from pycbc.workflow.core import FileList, make_analysis_dir, Executable
from pycbc.workflow.core import SegFile
from ligo.segments import segmentlist
class CalcPSDExecutable(Executable):
current_retention_level = Executable.ALL_TRIGGERS
class MergePSDFiles(Executable):
current_retention_level = Executable.MERGED_TRIGGERS
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in range(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
def merge_psds(workflow, files, ifo, out_dir, tags=None):
make_analysis_dir(out_dir)
tags = [] if not tags else tags
node = MergePSDFiles(workflow.cp, 'merge_psds',
ifos=ifo, out_dir=out_dir,
tags=tags).create_node()
node.add_input_list_opt('--psd-files', files)
node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file')
workflow += node
return node.output_files[0]
def setup_psd_calculate(workflow, frame_files, ifo, segments,
segment_name, out_dir, tags=None):
make_analysis_dir(out_dir)
tags = [] if not tags else tags
if workflow.cp.has_option_tags('workflow-psd', 'parallelization-factor', tags=tags):
num_parts = int(workflow.cp.get_opt_tags('workflow-psd',
'parallelization-factor',
tags=tags))
else:
num_parts = 1
# get rid of duplicate segments which happen when splitting the bank
segments = segmentlist(frozenset(segments))
segment_lists = list(chunks(segments, num_parts))
psd_files = FileList([])
for i, segs in enumerate(segment_lists):
seg_file = SegFile.from_segment_list('%s_%s' %(segment_name, i),
segmentlist(segs), segment_name, ifo,
valid_segment=workflow.analysis_time,
extension='xml', directory=out_dir)
psd_files += [make_psd_file(workflow, frame_files, seg_file,
segment_name, out_dir,
tags=tags + ['PART%s' % i])]
if num_parts > 1:
return merge_psds(workflow, psd_files, ifo, out_dir, tags=tags)
else:
return psd_files[0]
def make_psd_file(workflow, frame_files, segment_file, segment_name, out_dir,
tags=None):
make_analysis_dir(out_dir)
tags = [] if not tags else tags
exe = CalcPSDExecutable(workflow.cp, 'calculate_psd',
ifos=segment_file.ifo, out_dir=out_dir,
tags=tags)
node = exe.create_node()
node.add_input_opt('--analysis-segment-file', segment_file)
node.add_opt('--segment-name', segment_name)
if frame_files and not exe.has_opt('frame-type'):
node.add_input_list_opt('--frame-files', frame_files)
node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file')
workflow += node
return node.output_files[0]
class AvgPSDExecutable(Executable):
current_retention_level = Executable.FINAL_RESULT
def make_average_psd(workflow, psd_files, out_dir, tags=None,
output_fmt='.txt'):
make_analysis_dir(out_dir)
tags = [] if tags is None else tags
node = AvgPSDExecutable(workflow.cp, 'average_psd', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--input-files', psd_files)
if len(workflow.ifos) > 1:
node.new_output_file_opt(workflow.analysis_time, output_fmt,
'--detector-avg-file')
node.new_multiifo_output_list_opt('--time-avg-file', workflow.ifos,
workflow.analysis_time, output_fmt, tags=tags)
workflow += node
return node.output_files
# keep namespace clean
__all__ = ['make_psd_file', 'make_average_psd', 'setup_psd_calculate', 'merge_psds']
| 4,824
| 38.54918
| 88
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/grb_utils.py
|
# Copyright (C) 2015 Andrew Williamson
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This library code contains functions and classes that are used in the
generation of pygrb workflows. For details about pycbc.workflow see here:
http://pycbc.org/pycbc/latest/html/workflow.html
"""
import glob
import os
import numpy as np
from scipy.stats import rayleigh
from gwdatafind.utils import filename_metadata
from pycbc import makedir
from pycbc.workflow.core import \
File, FileList, configparser_value_to_file, resolve_url_to_file,\
Executable, Node
from pycbc.workflow.jobsetup import select_generic_executable
from pycbc.workflow.pegasus_workflow import SubWorkflow
from pycbc.workflow.plotting import PlotExecutable
def _select_grb_pp_class(wflow, curr_exe):
"""
This function returns the class for PyGRB post-processing scripts.
Parameters
----------
curr_exe : string
The name of the executable
Returns
-------
exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
functions appropriate for the given executable. Instances of the class
('jobs') **must** have methods
* job.create_node()
and
* job.get_valid_times(ifo, )
"""
exe_path = wflow.cp.get('executables', curr_exe)
exe_name = os.path.basename(exe_path)
exe_to_class_map = {
'pycbc_grb_trig_combiner': PycbcGrbTrigCombinerExecutable,
'pycbc_grb_trig_cluster': PycbcGrbTrigClusterExecutable,
'pycbc_grb_inj_finder': PycbcGrbInjFinderExecutable,
'pycbc_grb_inj_combiner': PycbcGrbInjCombinerExecutable
}
if exe_name not in exe_to_class_map:
raise ValueError(f"No job class exists for executable {curr_exe}")
return exe_to_class_map[exe_name]
def set_grb_start_end(cp, start, end):
"""
Function to update analysis boundaries as workflow is generated
Parameters
----------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The parsed configuration options of a pycbc.workflow.core.Workflow.
start : int
The start of the workflow analysis time.
end : int
The end of the workflow analysis time.
Returns
--------
cp : pycbc.workflow.configuration.WorkflowConfigParser object
The modified WorkflowConfigParser object.
"""
cp.set("workflow", "start-time", str(start))
cp.set("workflow", "end-time", str(end))
return cp
def make_gating_node(workflow, datafind_files, outdir=None, tags=None):
'''
Generate jobs for autogating the data for PyGRB runs.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
datafind_files : pycbc.workflow.core.FileList
A FileList containing the frame files to be gated.
outdir : string
Path of the output directory
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
condition_strain_nodes : list
List containing the pycbc.workflow.core.Node objects representing the
autogating jobs.
condition_strain_outs : pycbc.workflow.core.FileList
FileList containing the pycbc.workflow.core.File objects representing
the gated frame files.
'''
cp = workflow.cp
if tags is None:
tags = []
condition_strain_class = select_generic_executable(workflow,
"condition_strain")
condition_strain_nodes = []
condition_strain_outs = FileList([])
for ifo in workflow.ifos:
input_files = FileList([datafind_file for datafind_file in \
datafind_files if datafind_file.ifo == ifo])
condition_strain_jobs = condition_strain_class(cp, "condition_strain",
ifos=ifo, out_dir=outdir, tags=tags)
condition_strain_node, condition_strain_out = \
condition_strain_jobs.create_node(input_files, tags=tags)
condition_strain_nodes.append(condition_strain_node)
condition_strain_outs.extend(FileList([condition_strain_out]))
return condition_strain_nodes, condition_strain_outs
def fermi_core_tail_model(
sky_err, rad, core_frac=0.98, core_sigma=3.6, tail_sigma=29.6):
"""Fermi systematic error model following
https://arxiv.org/abs/1909.03006, with default values valid
before 11 September 2019.
Parameters
----------
core_frac : float
Fraction of the systematic uncertainty contained within the core
component.
core_sigma : float
Size of the GBM systematic core component.
tail_sigma : float
Size of the GBM systematic tail component.
Returns
_______
tuple
Tuple containing the core and tail probability distributions
as a function of radius.
"""
scaledsq = sky_err**2 / -2 / np.log(0.32)
return (
frac * (1 - np.exp(-0.5 * (rad / np.sqrt(scaledsq + sigma**2))**2))
for frac, sigma
in zip([core_frac, 1 - core_frac], [core_sigma, tail_sigma]))
def get_sky_grid_scale(
sky_error=0.0, containment=0.9, upscale=False, fermi_sys=False,
precision=1e-3, **kwargs):
"""
Calculate the angular radius corresponding to a desired
localization uncertainty level. This is used to generate the search
grid and involves scaling up the standard 1-sigma value provided to
the workflow, assuming a normal probability profile. Fermi
systematic errors can be included, following
https://arxiv.org/abs/1909.03006, with default values valid before
11 September 2019. The default probability coverage is 90%.
Parameters
----------
sky_error : float
The reported statistical 1-sigma sky error of the trigger.
containment : float
The desired localization probability to be covered by the sky
grid.
upscale : bool, optional
Whether to apply rescale to convert from 1 sigma -> containment
for non-Fermi triggers. Default = True as Swift reports 90%
radius directly.
fermi_sys : bool, optional
Whether to apply Fermi-GBM systematics via
``fermi_core_tail_model``. Default = False.
precision : float, optional
Precision (in degrees) for calculating the error radius via
Fermi-GBM model.
**kwargs
Additional keyword arguments passed to `fermi_core_tail_model`.
Returns
_______
float
Sky error radius in degrees.
"""
if fermi_sys:
lims = (0.5, 4)
radii = np.linspace(
lims[0] * sky_error, lims[1] * sky_error,
int((lims[1] - lims[0]) * sky_error / precision) + 1)
core, tail = fermi_core_tail_model(sky_error, radii, **kwargs)
out = radii[(abs(core + tail - containment)).argmin()]
else:
# Use Rayleigh distribution to go from 1 sigma containment to
# containment given by function variable. Interval method returns
# bounds of equal probability about the median, but we want 1-sided
# bound, hence use (2 * containment - 1)
out = sky_error
if upscale:
out *= rayleigh.interval(2 * containment - 1)[-1]
return out
def setup_pygrb_pp_workflow(wf, pp_dir, seg_dir, segment, insp_files,
inj_files, inj_insp_files, inj_tags):
"""
Generate post-processing section of PyGRB offline workflow
"""
pp_outs = FileList([])
# pp_outs is returned by this function. It is structured as follows:
# pp_outs[0]: [ALL_TIMES, ONSOURCE, OFFSOURCE, OFFTRIAL_1, ..., OFFTRIAL_N]
# FileList (N can be set by the user and is 6 by default)
# pp_outs[1]: ALL_TIMES_CLUSTERED File
# pp_outs[2]: OFFSOURCE_CLUSTERED File
# pp_outs[3]: ONSOURCE_CLUSTERED File
# pp_outs[4]: OFFTRIAL_1_CLUSTERED File
# ...
# pp_outs[4+N]: OFFTRIAL_N_CLUSTERED File
# pp_outs[-2]: FOUNDMISSED FileList covering all injection sets
# pp_outs[-1]: FOUNDMISSED-FILTERED FileList covering all injection sets
# in the same order as pp_outs[-2]
# Begin setting up trig combiner job(s)
# Select executable class and initialize
exe_class = _select_grb_pp_class(wf, "trig_combiner")
job_instance = exe_class(wf.cp, "trig_combiner")
# Create node for coherent no injections jobs
node, trig_files = job_instance.create_node(wf.ifos, seg_dir, segment,
insp_files, pp_dir)
wf.add_node(node)
pp_outs.append(trig_files)
# Trig clustering for each trig file
exe_class = _select_grb_pp_class(wf, "trig_cluster")
job_instance = exe_class(wf.cp, "trig_cluster")
for trig_file in trig_files:
# Create and add nodes
node, out_file = job_instance.create_node(trig_file, pp_dir)
wf.add_node(node)
pp_outs.append(out_file)
# Find injections from triggers
exe_class = _select_grb_pp_class(wf, "inj_finder")
job_instance = exe_class(wf.cp, "inj_finder")
inj_find_files = FileList([])
for inj_tag in inj_tags:
tag_inj_files = FileList([f for f in inj_files
if inj_tag in f.tags])
# The here stems from the injection group information
# being stored in the second tag. This could be improved
# depending on the final implementation of injections
tag_insp_files = FileList([f for f in inj_insp_files
if inj_tag in f.tags[1]])
node, inj_find_file = job_instance.create_node(
tag_inj_files, tag_insp_files,
pp_dir)
wf.add_node(node)
inj_find_files.append(inj_find_file)
pp_outs.append(inj_find_files)
# Combine injections
exe_class = _select_grb_pp_class(wf, "inj_combiner")
job_instance = exe_class(wf.cp, "inj_combiner")
inj_comb_files = FileList([])
for in_file in inj_find_files:
if 'DETECTION' not in in_file.tags:
node, inj_comb_file = job_instance.create_node(in_file,
pp_dir,
in_file.tags,
segment)
wf.add_node(node)
inj_comb_files.append(inj_comb_file)
pp_outs.append(inj_comb_files)
return pp_outs
class PycbcGrbTrigCombinerExecutable(Executable):
""" The class responsible for creating jobs
for ''pycbc_grb_trig_combiner''.
"""
current_retention_level = Executable.ALL_TRIGGERS
def __init__(self, cp, name):
super().__init__(cp=cp, name=name)
self.trigger_name = cp.get('workflow', 'trigger-name')
self.trig_start_time = cp.get('workflow', 'start-time')
self.num_trials = int(cp.get('trig_combiner', 'num-trials'))
def create_node(self, ifo_tag, seg_dir, segment, insp_files,
out_dir, tags=None):
node = Node(self)
node.add_opt('--verbose')
node.add_opt("--ifo-tag", ifo_tag)
node.add_opt("--grb-name", self.trigger_name)
node.add_opt("--trig-start-time", self.trig_start_time)
node.add_opt("--segment-dir", seg_dir)
node.add_input_list_opt("--input-files", insp_files)
node.add_opt("--user-tag", "PYGRB")
node.add_opt("--num-trials", self.num_trials)
# Prepare output file tag
user_tag = f"PYGRB_GRB{self.trigger_name}"
if tags:
user_tag += "_{}".format(tags)
# Add on/off source and off trial outputs
output_files = FileList([])
outfile_types = ['ALL_TIMES', 'OFFSOURCE', 'ONSOURCE']
for i in range(self.num_trials):
outfile_types.append("OFFTRIAL_{}".format(i+1))
for out_type in outfile_types:
out_name = "{}-{}_{}-{}-{}.h5".format(
ifo_tag, user_tag, out_type,
segment[0], segment[1]-segment[0])
out_file = File(ifo_tag, 'trig_combiner', segment,
file_url=os.path.join(out_dir, out_name))
node.add_output(out_file)
output_files.append(out_file)
return node, output_files
class PycbcGrbTrigClusterExecutable(Executable):
""" The class responsible for creating jobs
for ''pycbc_grb_trig_cluster''.
"""
current_retention_level = Executable.ALL_TRIGGERS
def __init__(self, cp, name):
super().__init__(cp=cp, name=name)
def create_node(self, in_file, out_dir):
node = Node(self)
node.add_input_opt("--trig-file", in_file)
# Determine output file name
ifotag, filetag, segment = filename_metadata(in_file.name)
start, end = segment
out_name = "{}-{}_CLUSTERED-{}-{}.h5".format(ifotag, filetag,
start, end-start)
out_file = File(ifotag, 'trig_cluster', segment,
file_url=os.path.join(out_dir, out_name))
node.add_output(out_file)
return node, out_file
class PycbcGrbInjFinderExecutable(Executable):
"""The class responsible for creating jobs for ``pycbc_grb_inj_finder``
"""
current_retention_level = Executable.ALL_TRIGGERS
def __init__(self, cp, exe_name):
super().__init__(cp=cp, name=exe_name)
def create_node(self, inj_files, inj_insp_files,
out_dir, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_list_opt('--input-files', inj_insp_files)
node.add_input_list_opt('--inj-files', inj_files)
ifo_tag, desc, segment = filename_metadata(inj_files[0].name)
desc = '_'.join(desc.split('_')[:-1])
out_name = "{}-{}_FOUNDMISSED-{}-{}.h5".format(
ifo_tag, desc, segment[0], abs(segment))
out_file = File(ifo_tag, 'inj_finder', segment,
os.path.join(out_dir, out_name), tags=tags)
node.add_output(out_file)
return node, out_file
class PycbcGrbInjCombinerExecutable(Executable):
"""The class responsible for creating jobs ``pycbc_grb_inj_combiner``
"""
current_retention_level = Executable.ALL_TRIGGERS
def __init__(self, cp, exe_name):
super().__init__(cp=cp, name=exe_name)
def create_node(self, input_file, out_dir, ifo_tag, segment, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_opt('--input-files', input_file)
out_name = input_file.name.replace('.h5', '-FILTERED.h5')
out_file = File(ifo_tag, 'inj_combiner', segment,
os.path.join(out_dir, out_name), tags=tags)
node.add_output_opt('--output-file', out_file)
return node, out_file
def build_veto_filelist(workflow):
"""Construct a FileList instance containing all veto xml files"""
veto_dir = workflow.cp.get('workflow', 'veto-directory')
veto_files = glob.glob(veto_dir + '/*CAT*.xml')
veto_files = [resolve_url_to_file(vf) for vf in veto_files]
veto_files = FileList(veto_files)
return veto_files
def build_segment_filelist(workflow):
"""Construct a FileList instance containing all segments txt files"""
seg_dir = workflow.cp.get('workflow', 'segment-dir')
file_names = ["bufferSeg.txt", "offSourceSeg.txt", "onSourceSeg.txt"]
seg_files = [os.path.join(seg_dir, fn) for fn in file_names]
seg_files = [resolve_url_to_file(sf) for sf in seg_files]
seg_files = FileList(seg_files)
return seg_files
def make_pygrb_plot(workflow, exec_name, out_dir,
ifo=None, inj_file=None, trig_file=None, tags=None):
"""Adds a node for a plot of PyGRB results to the workflow"""
tags = [] if tags is None else tags
# Initialize job node with its tags
grb_name = workflow.cp.get('workflow', 'trigger-name')
extra_tags = ['GRB'+grb_name]
# TODO: why is inj_set repeated twice in output files?
# if inj_set is not None:
# extra_tags.append(inj_set)
if ifo:
extra_tags.append(ifo)
node = PlotExecutable(workflow.cp, exec_name, ifos=workflow.ifos,
out_dir=out_dir,
tags=tags+extra_tags).create_node()
# Pass the trigger file as an input File instance
# if exec_name in ['pygrb_plot_chisq_veto', 'pygrb_plot_coh_ifosnr',
# 'pygrb_plot_null_stats', 'pygrb_plot_skygrid',
# 'pygrb_plot_snr_timeseries']:
if trig_file is not None:
node.add_input_opt('--trig-file', resolve_url_to_file(trig_file))
# Pass the veto and segment files and options
if workflow.cp.has_option('workflow', 'veto-category'):
node.add_opt('--veto-category',
workflow.cp.get('workflow', 'veto-category'))
# FIXME: move to next if within previous one and else Raise error?
if workflow.cp.has_option('workflow', 'veto-files'):
veto_files = build_veto_filelist(workflow)
node.add_input_list_opt('--veto-files', veto_files)
# TODO: check this for pygrb_efficiency and pygrb_plot_stats_distribution
# They originally wanted seg_files
if exec_name in ['pygrb_plot_injs_results', 'pygrb_efficiency',
'pygrb_plot_snr_timeseries',
'pygrb_plot_stats_distribution']:
trig_time = workflow.cp.get('workflow', 'trigger-time')
node.add_opt('--trigger-time', trig_time)
# Other shared tuning values
if exec_name not in ['pygrb_plot_skygrid', 'pygrb_plot_coh_ifosnr']:
if not (exec_name == 'pygrb_plot_snr_timeseries' and
tags[0] != 'reweighted'):
for opt in ['chisq-index', 'chisq-nhigh', 'null-snr-threshold',
'snr-threshold', 'newsnr-threshold',
'sngl-snr-threshold', 'null-grad-thresh',
'null-grad-val']:
if workflow.cp.has_option('workflow', opt):
node.add_opt('--'+opt, workflow.cp.get('workflow', opt))
# Pass the injection file as an input File instance
if inj_file is not None and exec_name not in \
['pygrb_plot_skygrid', 'pygrb_plot_stats_distribution']:
fm_file = resolve_url_to_file(inj_file)
node.add_input_opt('--found-missed-file', fm_file)
# IFO option
if ifo:
node.add_opt('--ifo', ifo)
# Additional input files (passed as File instances)
# if exec_name in ['pygrb_plot_injs_results', 'pygrb_efficiency']:
# missed_file = inj_file
# node.add_input_opt('--missed-file', missed_file)
# FIXME: need found-missed-file option
# Output files and final input file (passed as a File instance)
if exec_name == 'pygrb_efficiency':
# In this case tags[0] is the offtrial number
onsource_file = configparser_value_to_file(workflow.cp,
'workflow', 'onsource-file')
node.add_input_opt('--onsource-file', onsource_file)
node.new_output_file_opt(workflow.analysis_time, '.png',
'--background-output-file',
tags=extra_tags+['max_background'])
node.new_output_file_opt(workflow.analysis_time, '.png',
'--onsource-output-file',
tags=extra_tags+['onsource'])
else:
node.new_output_file_opt(workflow.analysis_time, '.png',
'--output-file', tags=extra_tags)
if exec_name in ['pygrb_plot_coh_ifosnr', 'pygrb_plot_null_stats'] \
and 'zoomin' in tags:
node.add_opt('--zoom-in')
# Quantity to be displayed on the y-axis of the plot
if exec_name in ['pygrb_plot_chisq_veto', 'pygrb_plot_null_stats',
'pygrb_plot_snr_timeseries']:
node.add_opt('--y-variable', tags[0])
# Quantity to be displayed on the x-axis of the plot
elif exec_name == 'pygrb_plot_stats_distribution':
node.add_opt('--x-variable', tags[0])
elif exec_name == 'pygrb_plot_injs_results':
# Variables to plot on x and y axes
node.add_opt('--y-variable', tags[0])
node.add_opt('--x-variable', tags[1])
# Flag to plot found over missed or missed over found
if tags[2] == 'missed-on-top':
node.add_opt('--'+tags[2])
# Enable log axes
subsection = '_'.join(tags[0:2])
for log_flag in ['x-log', 'y-log']:
if workflow.cp.has_option_tags(exec_name, log_flag,
tags=[subsection]):
node.add_opt('--'+log_flag)
# Add job node to workflow
workflow += node
return node, node.output_files
def make_info_table(workflow, out_dir, tags=None):
"""Setup a job to create an html snippet with the GRB trigger information.
"""
tags = [] if tags is None else tags
# Exectuable
exec_name = 'pygrb_grb_info_table'
# Initialize job node
grb_name = workflow.cp.get('workflow', 'trigger-name')
extra_tags = ['GRB'+grb_name, 'INFO_TABLE']
node = PlotExecutable(workflow.cp, exec_name,
ifos=workflow.ifos, out_dir=out_dir,
tags=tags+extra_tags).create_node()
# Options
node.add_opt('--trigger-time', workflow.cp.get('workflow', 'trigger-time'))
node.add_opt('--ra', workflow.cp.get('workflow', 'ra'))
node.add_opt('--dec', workflow.cp.get('workflow', 'dec'))
node.add_opt('--sky-error', workflow.cp.get('workflow', 'sky-error'))
node.add_opt('--ifos', ' '.join(workflow.ifos))
node.new_output_file_opt(workflow.analysis_time, '.html',
'--output-file', tags=extra_tags)
# Add job node to workflow
workflow += node
return node, node.output_files
def make_pygrb_injs_tables(workflow, out_dir, # exclude=None, require=None,
inj_set=None, tags=None):
"""Adds a PyGRB job to make quiet-found and missed-found injection tables.
"""
tags = [] if tags is None else tags
# Exectuable
exec_name = 'pygrb_page_tables'
# Initialize job node
grb_name = workflow.cp.get('workflow', 'trigger-name')
extra_tags = ['GRB'+grb_name]
# TODO: why is inj_set repeated twice in output files?
if inj_set is not None:
extra_tags.append(inj_set)
node = PlotExecutable(workflow.cp, exec_name,
ifos=workflow.ifos, out_dir=out_dir,
tags=tags+extra_tags).create_node()
# Pass the veto and segment files and options
if workflow.cp.has_option('workflow', 'veto-files'):
veto_files = build_veto_filelist(workflow)
node.add_input_list_opt('--veto-files', veto_files)
trig_time = workflow.cp.get('workflow', 'trigger-time')
node.add_opt('--trigger-time', trig_time)
# Other shared tuning values
for opt in ['chisq-index', 'chisq-nhigh', 'null-snr-threshold',
'veto-category', 'snr-threshold', 'newsnr-threshold',
'sngl-snr-threshold', 'null-grad-thresh', 'null-grad-val']:
if workflow.cp.has_option('workflow', opt):
node.add_opt('--'+opt, workflow.cp.get('workflow', opt))
# Handle input/output for injections
if inj_set:
# Found-missed injection file (passed as File instance)
fm_file = configparser_value_to_file(workflow.cp,
'injections-'+inj_set,
'found-missed-file')
node.add_input_opt('--found-missed-file', fm_file)
# Missed-found and quiet-found injections html output files
for mf_or_qf in ['missed-found', 'quiet-found']:
mf_or_qf_tags = [mf_or_qf.upper().replace('-', '_')]
node.new_output_file_opt(workflow.analysis_time, '.html',
'--'+mf_or_qf+'-injs-output-file',
tags=extra_tags+mf_or_qf_tags)
# Quiet-found injections h5 output file
node.new_output_file_opt(workflow.analysis_time, '.h5',
'--quiet-found-injs-h5-output-file',
tags=extra_tags+['QUIET_FOUND'])
# Handle input/output for onsource/offsource
else:
# Onsource input file (passed as File instance)
onsource_file = configparser_value_to_file(workflow.cp,
'workflow', 'onsource-file')
node.add_input_opt('--onsource-file', onsource_file)
# Loudest offsource triggers and onsource trigger html and h5 output
for src_type in ['onsource-trig', 'offsource-trigs']:
src_type_tags = [src_type.upper().replace('-', '_')]
node.new_output_file_opt(workflow.analysis_time, '.html',
'--loudest-'+src_type+'-output-file',
tags=extra_tags+src_type_tags)
node.new_output_file_opt(workflow.analysis_time, '.h5',
'--loudest-'+src_type+'-h5-output-file',
tags=extra_tags+src_type_tags)
# Add job node to the workflow
workflow += node
return node, node.output_files
# Based on setup_single_det_minifollowups
def setup_pygrb_minifollowups(workflow, followups_file,
dax_output, out_dir,
trig_file=None, tags=None):
"""Create plots that followup the the loudest PyGRB triggers or
missed injections from an HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
followups_file: pycbc.workflow.File
The File class holding the triggers/injections.
dax_output: The directory that will contain the dax file.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
"""
tags = [] if tags is None else tags
# _workflow.makedir(dax_output)
makedir(dax_output)
# Turn the config file into a File instance
# curr_ifo = single_trig_file.ifo
# config_path = os.path.abspath(dax_output + '/' + curr_ifo + \
config_path = os.path.abspath(dax_output + '/' + \
'_'.join(tags) + '_minifollowup.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = resolve_url_to_file(config_path)
# wikifile = curr_ifo + '_'.join(tags) + 'loudest_table.txt'
wikifile = '_'.join(tags) + 'loudest_table.txt'
# Create the node
exe = Executable(workflow.cp, 'pygrb_minifollowups',
ifos=workflow.ifos, out_dir=dax_output,
tags=tags)
node = exe.create_node()
# Grab and pass all necessary files
if trig_file is not None:
node.add_input_opt('--trig-file', trig_file)
if workflow.cp.has_option('workflow', 'veto-files'):
veto_files = build_veto_filelist(workflow)
node.add_input_list_opt('--veto-files', veto_files)
trig_time = workflow.cp.get('workflow', 'trigger-time')
node.add_opt('--trigger-time', trig_time)
node.add_input_opt('--config-files', config_file)
node.add_input_opt('--followups-file', followups_file)
node.add_opt('--wiki-file', wikifile)
if tags:
node.add_list_opt('--tags', tags)
node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file')
node.new_output_file_opt(workflow.analysis_time, '.dax.map',
'--output-map')
name = node.output_files[0].name
assert name.endswith('.dax')
map_file = node.output_files[1]
assert map_file.name.endswith('.map')
node.add_opt('--workflow-name', name)
node.add_opt('--output-dir', out_dir)
workflow += node
# Execute this in a sub-workflow
fil = node.output_files[0]
job = SubWorkflow(fil.name, is_planned=False)
job.set_subworkflow_properties(map_file,
staging_site=workflow.staging_site,
cache_file=workflow.cache_file)
job.add_into_workflow(workflow)
def setup_pygrb_results_workflow(workflow, res_dir, trig_file,
inj_files, tags=None,
explicit_dependencies=None):
"""Create subworkflow to produce plots, tables,
and results webpage for a PyGRB analysis.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
res_dir: The post-processing directory where
results (plots, etc.) will be stored
trig_file: The triggers File object
inj_files: FileList of injection results
tags: {None, optional}
Tags to add to the executables
excplicit_dependencies: nodes that must precede this
"""
tags = [] if tags is None else tags
dax_output = res_dir+'/webpage_daxes'
# _workflow.makedir(dax_output)
makedir(dax_output)
# Turn the config file into a File instance
# config_path = os.path.abspath(dax_output + '/' + \
# '_'.join(tags) + 'webpage.ini')
# workflow.cp.write(open(config_path, 'w'))
# config_file = resolve_url_to_file(config_path)
# Create the node
exe = Executable(workflow.cp, 'pygrb_pp_workflow',
ifos=workflow.ifos, out_dir=dax_output,
tags=tags)
node = exe.create_node()
# Grab and pass all necessary files
node.add_input_opt('--trig-file', trig_file)
if workflow.cp.has_option('workflow', 'veto-files'):
veto_files = build_veto_filelist(workflow)
node.add_input_list_opt('--veto-files', veto_files)
# node.add_input_opt('--config-files', config_file)
node.add_input_list_opt('--inj-files', inj_files)
if tags:
node.add_list_opt('--tags', tags)
node.new_output_file_opt(workflow.analysis_time, '.dax',
'--dax-file', tags=tags)
node.new_output_file_opt(workflow.analysis_time, '.map',
'--output-map', tags=tags)
# + ['MAP'], use_tmp_subdirs=True)
name = node.output_files[0].name
assert name.endswith('.dax')
map_file = node.output_files[1]
assert map_file.name.endswith('.map')
node.add_opt('--workflow-name', name)
# This is the output dir for the products of this node, namely dax and map
node.add_opt('--output-dir', res_dir)
node.add_opt('--dax-file-directory', '.')
# Turn the config file into a File instance
config_path = os.path.abspath(dax_output + '/' + \
'_'.join(tags) + 'webpage.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = resolve_url_to_file(config_path)
node.add_input_opt('--config-files', config_file)
# Track additional ini file produced by pycbc_pygrb_pp_workflow
out_file = File(workflow.ifos, 'pygrb_pp_workflow', workflow.analysis_time,
file_url=os.path.join(dax_output, name+'.ini'))
node.add_output(out_file)
# Add node to the workflow workflow
workflow += node
if explicit_dependencies is not None:
for dep in explicit_dependencies:
workflow.add_explicit_dependancy(dep, node)
# Execute this in a sub-workflow
job = SubWorkflow(name, is_planned=False) # , _id='results')
job.set_subworkflow_properties(map_file,
staging_site=workflow.staging_site,
cache_file=workflow.cache_file)
job.add_into_workflow(workflow)
return node.output_files
| 33,027
| 39.47549
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/core.py
|
# Copyright (C) 2013, 2017 Ian Harry, Alex Nitz, Duncan Brown
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides the worker functions and classes that are used when
creating a workflow. For details about the workflow module see here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope.html
"""
import os, stat, subprocess, logging, math, string, urllib, pickle, copy
import configparser as ConfigParser
from urllib.request import pathname2url
from urllib.parse import urljoin
import numpy, random
from itertools import combinations, groupby, permutations
from operator import attrgetter
import lal
import lal.utils
import Pegasus.api # Try and move this into pegasus_workflow
from glue import lal as gluelal
from ligo import segments
from ligo.lw import lsctables, ligolw
from ligo.lw import utils as ligolw_utils
from ligo.lw.utils import segments as ligolw_segments
from pycbc import makedir
from pycbc.io.ligolw import LIGOLWContentHandler, create_process_table
from . import pegasus_workflow
from .configuration import WorkflowConfigParser, resolve_url
from .pegasus_sites import make_catalog
def make_analysis_dir(path):
"""
Make the analysis directory path, any parent directories that don't already
exist, and the 'logs' subdirectory of path.
"""
if path is not None:
makedir(os.path.join(path, 'logs'))
file_input_from_config_dict = {}
class Executable(pegasus_workflow.Executable):
# These are the file retention levels
INTERMEDIATE_PRODUCT = 1
ALL_TRIGGERS = 2
MERGED_TRIGGERS = 3
FINAL_RESULT = 4
# Set this parameter to indicate that this option is used to specify a
# file and is *not* handled explicitly in the create_node or __init__
# methods of the sub-class. Usually that is to say that this option is a
# file and is normally specified in an file, e.g. a PSD file. As files
# need to be identified as such to pegasus, this attempts to catch this
# case.
# These are standard file input arguments used in PyCBC, so we declare
# these as files if given to any PyCBC job.
file_input_options = ['--gating-file', '--frame-files', '--injection-file',
'--statistic-files', '--bank-file', '--config-files',
'--psd-file', '--asd-file',
'--fake-strain-from-file',
'--sgburst-injection-file']
# Set this parameter to indicate that this option should take different
# values based on the time. E.g. something like
# --option1 value1[0:1000],value2[1000:2000]
# would be replaced with --option1 value1 if the time is within 0,1000 and
# value2 if in 1000,2000. A failure will be replaced if the job time is
# not fully contained in one of these windows, or if fully contained in
# multiple of these windows. This is resolved when creating the Job from
# the Executable
time_dependent_options = []
# This is the default value. It will give a warning if a class is
# used where the retention level is not set. The file will still be stored
KEEP_BUT_RAISE_WARNING = 5
_warned_classes_list = ['Executable']
# Sub classes, or instances, should override this. If not overriden the
# file will be retained, but a warning given
current_retention_level = KEEP_BUT_RAISE_WARNING
def __init__(self, cp, name, ifos=None, out_dir=None, tags=None,
reuse_executable=True, set_submit_subdir=True):
"""
Initialize the Executable class.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration settings
exec_name : string
Executable name
universe : string, optional
Condor universe to run the job in
ifos : string or list, optional
The ifo(s) that the Job is valid for. If the job is
independently valid for multiple ifos it can be provided as a list.
Ie. ['H1',L1','V1'], if the job is only valid for the combination
of ifos (for e.g. ligolw_thinca) then this can be supplied
as, for e.g. "H1L1V1".
out_dir: path, optional
The folder to store output files of this job.
tags : list of strings
A list of strings that is used to identify this job.
"""
if isinstance(ifos, str):
self.ifo_list = [ifos]
else:
self.ifo_list = ifos
if self.ifo_list is not None:
self.ifo_list = sorted(self.ifo_list)
self.ifo_string = ''.join(self.ifo_list)
else:
self.ifo_string = None
self.cp = cp
self.name = name
self.container_cls = None
self.container_type = None
try:
self.installed = cp.getboolean('pegasus_profile-%s' % name,
'pycbc|installed')
except:
self.installed = False
self.update_current_tags(tags)
self.update_output_directory(out_dir=out_dir)
# Determine the level at which output files should be kept
self.update_current_retention_level(self.current_retention_level)
# Should I reuse this executable?
if reuse_executable:
self.pegasus_name = self.name
else:
self.pegasus_name = self.tagged_name
# Check that the executable actually exists locally or
# looks like a URL, in which case trust Pegasus to be
# able to fetch it.
exe_path = cp.get('executables', name)
self.needs_fetching = False
exe_url = urllib.parse.urlparse(exe_path)
# See if the user specified a list of sites for the executable
# Ordering is:
# 1) Check if a specific site for this Executable is set.
# 2) Check is primary_site is set globally.
# 3) Use condorpool_symlink as a fallback.
self.exe_pfns = {}
if cp.has_option_tags('pegasus_profile-%s' % name, 'pycbc|site', tags):
exe_site = cp.get_opt_tags('pegasus_profile-%s' % name,
'pycbc|site', tags)
elif cp.has_option('pegasus_profile', 'pycbc|primary_site'):
exe_site = cp.get('pegasus_profile', 'pycbc|primary_site')
else:
exe_site = 'condorpool_symlink'
exe_site = exe_site.strip()
if exe_url.scheme in ['', 'file']:
# NOTE: There could be a case where the exe is available at a
# remote site, but not on the submit host. Currently allowed
# for the OSG site, versioning will not work as planned if
# we can't see the executable (can we perhaps run versioning
# including singularity??)
# Check that executables at file urls
# on the local site exist
if os.path.isfile(exe_url.path) is False:
raise TypeError("Failed to find %s executable "
"at %s on site %s" % (name, exe_path,
exe_site))
elif exe_url.scheme == 'singularity':
# Will use an executable within a singularity container. Don't
# need to do anything here, as I cannot easily check it exists.
exe_path = exe_url.path
else:
# Could be http, https, etc. so it needs fetching if run now
self.needs_fetching = True
if self.needs_fetching and not self.installed:
err_msg = "Non-file path URLs cannot be used unless the "
err_msg += "executable is a bundled standalone executable. "
err_msg += "If this is the case, then add the "
err_msg += "pycbc.installed=True property."
raise ValueError(err_msg)
if self.installed:
# Is installed, so copy from local site, like other inputs
self.exe_pfns['local'] = exe_path
else:
# We must rely on the executables, and accompanying libraries,
# being directly accessible on the execution site.
# CVMFS is perfect for this! As is singularity.
self.exe_pfns[exe_site] = exe_path
logging.info("Using %s executable "
"at %s on site %s" % (name, exe_url.path, exe_site))
# FIXME: This hasn't yet been ported to pegasus5 and won't work.
# Pegasus describes two ways to work with containers, and I need
# to figure out which is most appropriate and use that.
# Determine if this executables should be run in a container
try:
self.container_type = cp.get('pegasus_profile-%s' % name,
'container|type')
except:
pass
if self.container_type is not None:
# FIXME: Move the actual container setup into pegasus_workflow
self.container_img = cp.get('pegasus_profile-%s' % name,
'container|image')
try:
self.container_site = cp.get('pegasus_profile-%s' % name,
'container|image_site')
except:
self.container_site = 'local'
try:
self.container_mount = cp.get('pegasus_profile-%s' % name,
'container|mount').split(',')
except:
self.container_mount = None
self.container_cls = Pegasus.api.Container("{}-container".format(
name),
self.container_type,
self.container_img,
imagesite=self.container_site,
mount=self.container_mount)
super(Executable, self).__init__(self.pegasus_name,
installed=self.installed,
container=self.container_cls)
else:
super(Executable, self).__init__(self.pegasus_name,
installed=self.installed)
if hasattr(self, "group_jobs"):
self.add_profile('pegasus', 'clusters.size', self.group_jobs)
# This sets up the sub-directory to use in the submit directory
if set_submit_subdir:
self.add_profile('pegasus', 'relative.submit.dir',
self.pegasus_name)
# Set configurations from the config file, these should override all
# other settings
self._set_pegasus_profile_options()
self.execution_site = exe_site
self.executable_url = exe_path
@property
def ifo(self):
"""Return the ifo.
If only one ifo in the ifo list this will be that ifo. Otherwise an
error is raised.
"""
if self.ifo_list and len(self.ifo_list) == 1:
return self.ifo_list[0]
else:
errMsg = "self.ifoList must contain only one ifo to access the "
errMsg += "ifo property. %s." %(str(self.ifo_list),)
raise TypeError(errMsg)
def get_transformation(self):
if self.execution_site in self.transformations:
return self.transformations[self.execution_site]
else:
self.create_transformation(self.execution_site,
self.executable_url)
return self.get_transformation()
def add_ini_profile(self, cp, sec):
"""Add profile from configuration file.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration settings
sec : string
The section containing options for this job.
"""
for opt in cp.options(sec):
namespace = opt.split('|')[0]
if namespace == 'pycbc' or namespace == 'container':
continue
value = cp.get(sec, opt).strip()
key = opt.split('|')[1]
self.add_profile(namespace, key, value)
def _add_ini_opts(self, cp, sec, ignore_existing=False):
"""Add job-specific options from configuration file.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration
settings
sec : string
The section containing options for this job.
"""
for opt in cp.options(sec):
if opt in self.all_added_options:
if ignore_existing:
continue
else:
raise ValueError("Option %s has already been added" % opt)
self.all_added_options.add(opt)
value = cp.get(sec, opt).strip()
opt = f'--{opt}'
if opt in self.file_input_options:
# This now expects the option to be a file
# Check if we have a list of files
values = [path for path in value.split(' ') if path]
self.common_raw_options.append(opt)
self.common_raw_options.append(' ')
# Get LFN and PFN
for path in values:
# Here I decide if the path is URL or
# IFO:/path/to/file or IFO:url://path/to/file
# That's somewhat tricksy as we used : as delimiter
split_path = path.split(':', 1)
if len(split_path) == 1:
ifo = None
path = path
else:
# Have I split a URL or not?
if split_path[1].startswith('//'):
# URL
ifo = None
path = path
else:
#IFO:path or IFO:URL
ifo = split_path[0]
path = split_path[1]
# If the file exists make sure to use the
# fill path as a file:// URL
if os.path.isfile(path):
curr_pfn = urljoin('file:',
pathname2url(os.path.abspath(path)))
else:
curr_pfn = path
curr_file = resolve_url_to_file(curr_pfn)
self.common_input_files.append(curr_file)
if ifo:
self.common_raw_options.append(ifo + ':')
self.common_raw_options.append(curr_file.dax_repr)
else:
self.common_raw_options.append(curr_file.dax_repr)
self.common_raw_options.append(' ')
elif opt in self.time_dependent_options:
# There is a possibility of time-dependent, file options.
# For now we will avoid supporting that complication unless
# it is needed. This would require resolving the file first
# in this function, and then dealing with the time-dependent
# stuff later.
self.unresolved_td_options[opt] = value
else:
# This option comes from the config file(s)
self.common_options += [opt, value]
def add_opt(self, opt, value=None):
"""Add option to job.
Parameters
-----------
opt : string
Name of option (e.g. --output-file-format)
value : string, (default=None)
The value for the option (no value if set to None).
"""
if value is None:
self.common_options += [opt]
else:
self.common_options += [opt, value]
def get_opt(self, opt):
"""Get value of option from configuration file
Parameters
-----------
opt : string
Name of option (e.g. output-file-format)
Returns
--------
value : string
The value for the option. Returns None if option not present.
"""
for sec in self.sections:
try:
key = self.cp.get(sec, opt)
if key:
return key
except ConfigParser.NoOptionError:
pass
return None
def has_opt(self, opt):
"""Check if option is present in configuration file
Parameters
-----------
opt : string
Name of option (e.g. output-file-format)
"""
for sec in self.sections:
val = self.cp.has_option(sec, opt)
if val:
return val
return False
def create_node(self, **kwargs):
"""Default node constructor.
This is usually overridden by subclasses of Executable.
"""
return Node(self, **kwargs)
def update_current_retention_level(self, value):
"""Set a new value for the current retention level.
This updates the value of self.retain_files for an updated value of the
retention level.
Parameters
-----------
value : int
The new value to use for the retention level.
"""
# Determine the level at which output files should be kept
self.current_retention_level = value
try:
global_retention_level = \
self.cp.get_opt_tags("workflow", "file-retention-level",
self.tags+[self.name])
except ConfigParser.Error:
msg="Cannot find file-retention-level in [workflow] section "
msg+="of the configuration file. Setting a default value of "
msg+="retain all files."
logging.warn(msg)
self.retain_files = True
self.global_retention_threshold = 1
self.cp.set("workflow", "file-retention-level", "all_files")
else:
# FIXME: Are these names suitably descriptive?
retention_choices = {
'all_files' : 1,
'all_triggers' : 2,
'merged_triggers' : 3,
'results' : 4
}
try:
self.global_retention_threshold = \
retention_choices[global_retention_level]
except KeyError:
err_msg = "Cannot recognize the file-retention-level in the "
err_msg += "[workflow] section of the ini file. "
err_msg += "Got : {0}.".format(global_retention_level)
err_msg += "Valid options are: 'all_files', 'all_triggers',"
err_msg += "'merged_triggers' or 'results' "
raise ValueError(err_msg)
if self.current_retention_level == 5:
self.retain_files = True
if type(self).__name__ in Executable._warned_classes_list:
pass
else:
warn_msg = "Attribute current_retention_level has not "
warn_msg += "been set in class {0}. ".format(type(self))
warn_msg += "This value should be set explicitly. "
warn_msg += "All output from this class will be stored."
logging.warn(warn_msg)
Executable._warned_classes_list.append(type(self).__name__)
elif self.global_retention_threshold > self.current_retention_level:
self.retain_files = False
else:
self.retain_files = True
def update_current_tags(self, tags):
"""Set a new set of tags for this executable.
Update the set of tags that this job will use. This updated default
file naming and shared options. It will *not* update the pegasus
profile, which belong to the executable and cannot be different for
different nodes.
Parameters
-----------
tags : list
The new list of tags to consider.
"""
if tags is None:
tags = []
if '' in tags:
logging.warn('DO NOT GIVE ME EMPTY TAGS (in %s)', self.name)
tags.remove('')
tags = [tag.upper() for tag in tags]
self.tags = tags
if len(tags) > 6:
warn_msg = "This job has way too many tags. "
warn_msg += "Current tags are {}. ".format(' '.join(tags))
warn_msg += "Current executable {}.".format(self.name)
logging.info(warn_msg)
if len(tags) != 0:
self.tagged_name = "{0}-{1}".format(self.name, '_'.join(tags))
else:
self.tagged_name = self.name
if self.ifo_string is not None:
self.tagged_name = "{0}-{1}".format(self.tagged_name,
self.ifo_string)
# Determine the sections from the ini file that will configure
# this executable
sections = [self.name]
if self.ifo_list is not None:
if len(self.ifo_list) > 1:
sec_tags = tags + self.ifo_list + [self.ifo_string]
else:
sec_tags = tags + self.ifo_list
else:
sec_tags = tags
for sec_len in range(1, len(sec_tags)+1):
for tag_permutation in permutations(sec_tags, sec_len):
joined_name = '-'.join(tag_permutation)
section = '{0}-{1}'.format(self.name, joined_name.lower())
if self.cp.has_section(section):
sections.append(section)
self.sections = sections
# Do some basic sanity checking on the options
for sec1, sec2 in combinations(sections, 2):
self.cp.check_duplicate_options(sec1, sec2, raise_error=True)
# collect the options and profile information
# from the ini file section(s)
self.all_added_options = set()
self.common_options = []
self.common_raw_options = []
self.unresolved_td_options = {}
self.common_input_files = []
for sec in sections:
if self.cp.has_section(sec):
self._add_ini_opts(self.cp, sec)
else:
warn_string = "warning: config file is missing section "
warn_string += "[{0}]".format(sec)
logging.warn(warn_string)
# get uppermost section
if self.cp.has_section(f'{self.name}-defaultvalues'):
self._add_ini_opts(self.cp, f'{self.name}-defaultvalues',
ignore_existing=True)
def update_output_directory(self, out_dir=None):
"""Update the default output directory for output files.
Parameters
-----------
out_dir : string (optional, default=None)
If provided use this as the output directory. Else choose this
automatically from the tags.
"""
# Determine the output directory
if out_dir is not None:
self.out_dir = out_dir
elif len(self.tags) == 0:
self.out_dir = self.name
else:
self.out_dir = self.tagged_name
if not os.path.isabs(self.out_dir):
self.out_dir = os.path.join(os.getcwd(), self.out_dir)
def _set_pegasus_profile_options(self):
"""Set the pegasus-profile settings for this Executable.
These are a property of the Executable and not of nodes that it will
spawn. Therefore it *cannot* be updated without also changing values
for nodes that might already have been created. Therefore this is
only called once in __init__. Second calls to this will fail.
"""
# Executable- and tag-specific profile information
for sec in self.sections:
if self.cp.has_section('pegasus_profile-{0}'.format(sec)):
self.add_ini_profile(self.cp,
'pegasus_profile-{0}'.format(sec))
class Workflow(pegasus_workflow.Workflow):
"""
This class manages a pycbc workflow. It provides convenience
functions for finding input files using time and keywords. It can also
generate cache files from the inputs.
"""
def __init__(self, args, name=None):
"""
Create a pycbc workflow
Parameters
----------
args : argparse.ArgumentParser
The command line options to initialize a CBC workflow.
"""
# Parse ini file
self.cp = WorkflowConfigParser.from_cli(args)
self.args = args
if hasattr(args, 'dax_file'):
dax_file = args.dax_file or None
else:
dax_file = None
if hasattr(args, 'dax_file_directory'):
output_dir = args.dax_file_directory or args.output_dir or None
else:
output_dir = args.output_dir or None
super(Workflow, self).__init__(
name=name if name is not None else args.workflow_name,
directory=output_dir,
cache_file=args.cache_file,
dax_file_name=dax_file,
)
# Set global values
start_time = end_time = 0
if self.cp.has_option('workflow', 'start-time'):
start_time = int(self.cp.get("workflow", "start-time"))
if self.cp.has_option('workflow', 'end-time'):
end_time = int(self.cp.get("workflow", "end-time"))
self.analysis_time = segments.segment([start_time, end_time])
# Set the ifos to analyse
ifos = []
if self.cp.has_section('workflow-ifos'):
for ifo in self.cp.options('workflow-ifos'):
ifos.append(ifo.upper())
self.ifos = ifos
self.ifos.sort(key=str.lower)
self.get_ifo_combinations()
self.ifo_string = ''.join(self.ifos)
# Set up input and output file lists for workflow
self._inputs = FileList([])
self._outputs = FileList([])
# FIXME: Should this be in pegasus_workflow?
@property
def output_map(self):
args = self.args
if hasattr(args, 'output_map') and args.output_map is not None:
return args.output_map
if self.in_workflow is not False:
name = self.name + '.map'
else:
name = 'output.map'
path = os.path.join(self.out_dir, name)
return path
@property
def sites(self):
"""List of all possible exucution sites for jobs in this workflow"""
sites = set()
sites.add('local')
if self.cp.has_option('pegasus_profile', 'pycbc|primary_site'):
site = self.cp.get('pegasus_profile', 'pycbc|primary_site')
else:
# The default if not chosen
site = 'condorpool_symlink'
sites.add(site)
subsections = [sec for sec in self.cp.sections()
if sec.startswith('pegasus_profile-')]
for subsec in subsections:
if self.cp.has_option(subsec, 'pycbc|site'):
site = self.cp.get(subsec, 'pycbc|site')
sites.add(site)
return list(sites)
@property
def staging_site(self):
"""Site to use for staging to/from each site"""
staging_site = {}
for site in self.sites:
if site in ['condorpool_shared']:
staging_site[site] = site
else:
staging_site[site] = 'local'
return staging_site
@property
def staging_site_str(self):
return ','.join(['='.join(x) for x in self.staging_site.items()])
@property
def exec_sites_str(self):
return ','.join(self.sites)
def execute_node(self, node, verbatim_exe = False):
""" Execute this node immediately on the local machine
"""
node.executed = True
# Check that the PFN is for a file or path
if node.executable.needs_fetching:
try:
# The pfn may have been marked local...
pfn = node.executable.get_pfn()
except:
# or it may have been marked nonlocal. That's
# fine, we'll resolve the URL and make a local
# entry.
pfn = node.executable.get_pfn('nonlocal')
resolved = resolve_url(
pfn,
permissions=stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
)
node.executable.clear_pfns()
node.executable.add_pfn(urljoin('file:', pathname2url(resolved)),
site='local')
cmd_list = node.get_command_line()
# Must execute in output directory.
curr_dir = os.getcwd()
out_dir = node.executable.out_dir
os.chdir(out_dir)
# Make call
make_external_call(cmd_list, out_dir=os.path.join(out_dir, 'logs'),
out_basename=node.executable.name)
# Change back
os.chdir(curr_dir)
for fil in node._outputs:
fil.node = None
fil.add_pfn(urljoin('file:', pathname2url(fil.storage_path)),
site='local')
def save(self, filename=None, output_map_path=None, root=True):
# FIXME: Too close to pegasus to live here and not in pegasus_workflow
if output_map_path is None:
output_map_path = self.output_map
output_map_file = pegasus_workflow.File(os.path.basename(output_map_path))
output_map_file.add_pfn(output_map_path, site='local')
self.output_map_file = output_map_file
if self.in_workflow:
self._as_job.set_subworkflow_properties(
output_map_file,
staging_site=self.staging_site,
cache_file=self.cache_file
)
self._as_job.add_planner_args(**self._as_job.pycbc_planner_args)
# add transformations to dax
for transform in self._transformations:
self.add_transformation(transform)
for container in self._containers:
self.add_container(container)
# save the configuration file
ini_file = os.path.join(self.out_dir, self.name + '.ini')
# This shouldn't already exist, but just in case
if os.path.isfile(ini_file):
err_msg = "Refusing to overwrite configuration file that "
err_msg += "shouldn't be there: "
err_msg += ini_file
raise ValueError(err_msg)
with open(ini_file, 'w') as fp:
self.cp.write(fp)
# save the sites file
#FIXME change to check also for submit_now if we drop pycbc_submit_dax
# this would prevent sub-workflows from making extra unused sites.yml
if not self.in_workflow:
catalog_path = os.path.join(self.out_dir, 'sites.yml')
make_catalog(self.cp, self.out_dir).write(catalog_path)
# save the dax file
super(Workflow, self).save(filename=filename,
output_map_path=output_map_path,
submit_now=self.args.submit_now,
plan_now=self.args.plan_now,
root=root)
def save_config(self, fname, output_dir, cp=None):
""" Writes configuration file to disk and returns a pycbc.workflow.File
instance for the configuration file.
Parameters
-----------
fname : string
The filename of the configuration file written to disk.
output_dir : string
The directory where the file is written to disk.
cp : ConfigParser object
The ConfigParser object to write. If None then uses self.cp.
Returns
-------
FileList
The FileList object with the configuration file.
"""
cp = self.cp if cp is None else cp
ini_file_path = os.path.abspath(os.path.join(output_dir, fname))
with open(ini_file_path, "w") as fp:
cp.write(fp)
ini_file = File(self.ifos, "", self.analysis_time,
file_url="file://" + ini_file_path)
# set the physical file name
ini_file.add_pfn(ini_file_path, "local")
# set the storage path to be the same
ini_file.storage_path = ini_file_path
return FileList([ini_file])
def get_ifo_combinations(self):
"""
Get a list of strings for all possible combinations of IFOs
in the workflow
"""
self.ifo_combinations = []
for n in range(len(self.ifos)):
self.ifo_combinations += [''.join(ifos).lower() for ifos in
combinations(self.ifos, n + 1)]
class Node(pegasus_workflow.Node):
def __init__(self, executable, valid_seg=None):
super(Node, self).__init__(executable.get_transformation())
self.executable = executable
self.executed = False
self.set_category(executable.name)
self.valid_seg = valid_seg
self._options += self.executable.common_options
self._raw_options += self.executable.common_raw_options
for inp in self.executable.common_input_files:
self.add_input(inp)
if len(self.executable.time_dependent_options):
# Resolving these options requires the concept of a valid time.
# To keep backwards compatibility we will allow this to work if
# valid_seg is not supplied and no option actually needs resolving.
# It would be good to get this from the workflow's valid_seg if
# not overriden. But the Node is not connected to the Workflow
# until the dax starts to be written.
self.resolve_td_options(self.executable.unresolved_td_options)
def get_command_line(self):
# FIXME: Put in pegasus_workflow??
self._finalize()
arglist = self._dax_node.arguments
tmpargs = []
for a in arglist:
if not isinstance(a, File):
tmpargs += a.split(' ')
else:
tmpargs.append(a)
arglist = tmpargs
arglist = [a for a in arglist if a != '']
arglist = [a.storage_path if isinstance(a, File) else a for a in arglist]
# This allows the pfn to be an http(s) URL, which will be
# downloaded by resolve_url
exe_path = urllib.parse.urlsplit(self.executable.get_pfn()).path
return [exe_path] + arglist
def new_output_file_opt(self, valid_seg, extension, option_name, tags=None,
store_file=None, use_tmp_subdirs=False):
"""
This function will create a workflow.File object corresponding to the given
information and then add that file as output of this node.
Parameters
-----------
valid_seg : ligo.segments.segment
The time span over which the job is valid for.
extension : string
The extension to be used at the end of the filename.
E.g. '.xml' or '.sqlite'.
option_name : string
The option that is used when setting this job as output. For e.g.
'output-name' or 'output-file', whatever is appropriate for the
current executable.
tags : list of strings, (optional, default=[])
These tags will be added to the list of tags already associated with
the job. They can be used to uniquely identify this output file.
store_file : Boolean, (optional, default=True)
This file is to be added to the output mapper and will be stored
in the specified output location if True. If false file will be
removed when no longer needed in the workflow.
"""
if tags is None:
tags = []
# Changing this from set(tags) to enforce order. It might make sense
# for all jobs to have file names with tags in the same order.
all_tags = copy.deepcopy(self.executable.tags)
for tag in tags:
if tag not in all_tags:
all_tags.append(tag)
store_file = store_file if store_file is not None else self.executable.retain_files
fil = File(self.executable.ifo_list, self.executable.name,
valid_seg, extension=extension, store_file=store_file,
directory=self.executable.out_dir, tags=all_tags,
use_tmp_subdirs=use_tmp_subdirs)
self.add_output_opt(option_name, fil)
return fil
def add_multiifo_input_list_opt(self, opt, inputs):
""" Add an option that determines a list of inputs from multiple
detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2
.....
"""
# NOTE: Here we have to use the raw arguments functionality as the
# file and ifo are not space separated.
self.add_raw_arg(opt)
self.add_raw_arg(' ')
for infile in inputs:
self.add_raw_arg(infile.ifo)
self.add_raw_arg(':')
self.add_raw_arg(infile.name)
self.add_raw_arg(' ')
self.add_input(infile)
def add_multiifo_output_list_opt(self, opt, outputs):
""" Add an option that determines a list of outputs from multiple
detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2
.....
"""
# NOTE: Here we have to use the raw arguments functionality as the
# file and ifo are not space separated.
self.add_raw_arg(opt)
self.add_raw_arg(' ')
for outfile in outputs:
self.add_raw_arg(outfile.ifo)
self.add_raw_arg(':')
self.add_raw_arg(outfile.name)
self.add_raw_arg(' ')
self.add_output(outfile)
def new_multiifo_output_list_opt(self, opt, ifos, analysis_time, extension,
tags=None, store_file=None,
use_tmp_subdirs=False):
""" Add an option that determines a list of outputs from multiple
detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2
.....
File names are created internally from the provided extension and
analysis time.
"""
if tags is None:
tags = []
all_tags = copy.deepcopy(self.executable.tags)
for tag in tags:
if tag not in all_tags:
all_tags.append(tag)
output_files = FileList([])
store_file = store_file if store_file is not None \
else self.executable.retain_files
for ifo in ifos:
curr_file = File(ifo, self.executable.name, analysis_time,
extension=extension, store_file=store_file,
directory=self.executable.out_dir, tags=all_tags,
use_tmp_subdirs=use_tmp_subdirs)
output_files.append(curr_file)
self.add_multiifo_output_list_opt(opt, output_files)
def resolve_td_options(self, td_options):
for opt in td_options:
new_opt = resolve_td_option(td_options[opt], self.valid_seg)
self._options += [opt, new_opt]
@property
def output_files(self):
return FileList(self._outputs)
@property
def output_file(self):
"""
If only one output file return it. Otherwise raise an exception.
"""
out_files = self.output_files
if len(out_files) != 1:
err_msg = "output_file property is only valid if there is a single"
err_msg += " output file. Here there are "
err_msg += "%d output files." %(len(out_files))
raise ValueError(err_msg)
return out_files[0]
class File(pegasus_workflow.File):
'''
This class holds the details of an individual output file
This file(s) may be pre-supplied, generated from within the workflow
command line script, or generated within the workflow. The important stuff
is:
* The ifo that the File is valid for
* The time span that the OutFile is valid for
* A short description of what the file is
* The extension that the file should have
* The url where the file should be located
An example of initiating this class:
>> c = File("H1", "INSPIRAL_S6LOWMASS", segments.segment(815901601, 815902001), file_url="file://localhost/home/spxiwh/H1-INSPIRAL_S6LOWMASS-815901601-400.xml.gz" )
another where the file url is generated from the inputs:
>> c = File("H1", "INSPIRAL_S6LOWMASS", segments.segment(815901601, 815902001), directory="/home/spxiwh", extension="xml.gz" )
'''
def __init__(self, ifos, exe_name, segs, file_url=None,
extension=None, directory=None, tags=None,
store_file=True, use_tmp_subdirs=False):
"""
Create a File instance
Parameters
----------
ifos : string or list
The ifo(s) that the File is valid for. If the file is
independently valid for multiple ifos it can be provided as a list.
Ie. ['H1',L1','V1'], if the file is only valid for the combination
of ifos (for e.g. ligolw_thinca output) then this can be supplied
as, for e.g. "H1L1V1".
exe_name: string
A short description of the executable description, tagging
only the program that ran this job.
segs : ligo.segments.segment or ligo.segments.segmentlist
The time span that the OutFile is valid for. Note that this is
*not* the same as the data that the job that made the file reads in.
Lalapps_inspiral jobs do not analyse the first an last 72s of the
data that is read, and are therefore not valid at those times. If
the time is not continuous a segmentlist can be supplied.
file_url : url (optional, default=None)
If this is *not* supplied, extension and directory must be given.
If specified this explicitly points to the url of the file, or the
url where the file will be generated when made in the workflow.
extension : string (optional, default=None)
Either supply this *and* directory *or* supply only file_url.
If given this gives the extension at the end of the file name. The
full file name will be inferred from the other arguments
following the workflow standard.
directory : string (optional, default=None)
Either supply this *and* extension *or* supply only file_url.
If given this gives the directory in which the file exists, or will
exists. The file name will be inferred from the other arguments
following the workflow standard.
tags : list of strings (optional, default=None)
This is a list of descriptors describing what this file is. For
e.g. this might be ["BNSINJECTIONS" ,"LOWMASS","CAT_2_VETO"].
These are used in file naming.
"""
self.metadata = {}
# Set the science metadata on the file
if isinstance(ifos, str):
self.ifo_list = [ifos]
else:
self.ifo_list = ifos
self.ifo_string = ''.join(self.ifo_list)
self.description = exe_name
if isinstance(segs, segments.segment):
self.segment_list = segments.segmentlist([segs])
elif isinstance(segs, (segments.segmentlist)):
self.segment_list = segs
else:
err = "segs input must be either ligo.segments.segment or "
err += "segments.segmentlist. Got %s." %(str(type(segs)),)
raise ValueError(err)
if tags is None:
tags = []
if '' in tags:
logging.warn('DO NOT GIVE EMPTY TAGS (from %s)', exe_name)
tags.remove('')
self.tags = tags
if len(self.tags):
self.tag_str = '_'.join(tags)
tagged_description = '_'.join([self.description] + tags)
else:
tagged_description = self.description
# Follow the capitals-for-naming convention
self.ifo_string = self.ifo_string.upper()
self.tagged_description = tagged_description.upper()
if not file_url:
if not extension:
raise TypeError("a file extension required if a file_url "
"is not provided")
if not directory:
raise TypeError("a directory is required if a file_url is "
"not provided")
filename = self._filename(self.ifo_string, self.tagged_description,
extension, self.segment_list.extent())
path = os.path.join(directory, filename)
if not os.path.isabs(path):
path = os.path.join(os.getcwd(), path)
file_url = urllib.parse.urlunparse(['file', 'localhost', path,
None, None, None])
if use_tmp_subdirs and len(self.segment_list):
pegasus_lfn = str(int(self.segment_list.extent()[0]))[:-4]
pegasus_lfn = pegasus_lfn + '/' + os.path.basename(file_url)
else:
pegasus_lfn = os.path.basename(file_url)
super(File, self).__init__(pegasus_lfn)
if store_file:
self.storage_path = urllib.parse.urlsplit(file_url).path
else:
self.storage_path = None
def __getstate__(self):
""" Allow the workflow.File to be picklable. This disables the usage of
the internal cache entry.
"""
for i, seg in enumerate(self.segment_list):
self.segment_list[i] = segments.segment(float(seg[0]), float(seg[1]))
self.cache_entry = None
safe_dict = copy.copy(self.__dict__)
safe_dict['cache_entry'] = None
return safe_dict
# FIXME: This is a pegasus_workflow thing (don't think it's needed at all!)
# use the pegasus function directly (maybe not).
def add_metadata(self, key, value):
""" Add arbitrary metadata to this file """
self.metadata[key] = value
@property
def ifo(self):
"""
If only one ifo in the ifo_list this will be that ifo. Otherwise an
error is raised.
"""
if len(self.ifo_list) == 1:
return self.ifo_list[0]
else:
err = "self.ifo_list must contain only one ifo to access the "
err += "ifo property. %s." %(str(self.ifo_list),)
raise TypeError(err)
@property
def segment(self):
"""
If only one segment in the segmentlist this will be that segment.
Otherwise an error is raised.
"""
if len(self.segment_list) == 1:
return self.segment_list[0]
else:
err = "self.segment_list must only contain one segment to access"
err += " the segment property. %s." %(str(self.segment_list),)
raise TypeError(err)
@property
def cache_entry(self):
"""
Returns a CacheEntry instance for File.
"""
if self.storage_path is None:
raise ValueError('This file is temporary and so a lal '
'cache entry cannot be made')
file_url = urllib.parse.urlunparse(['file', 'localhost',
self.storage_path, None,
None, None])
cache_entry = lal.utils.CacheEntry(self.ifo_string,
self.tagged_description, self.segment_list.extent(), file_url)
cache_entry.workflow_file = self
return cache_entry
def _filename(self, ifo, description, extension, segment):
"""
Construct the standard output filename. Should only be used internally
of the File class.
"""
if extension.startswith('.'):
extension = extension[1:]
# Follow the frame convention of using integer filenames,
# but stretching to cover partially covered seconds.
start = int(segment[0])
end = int(math.ceil(segment[1]))
duration = str(end-start)
start = str(start)
return "%s-%s-%s-%s.%s" % (ifo, description.upper(), start,
duration, extension)
@classmethod
def from_path(cls, path, attrs=None, **kwargs):
"""
Create an output File object from path, with optional attributes.
"""
if attrs is None:
attrs = {}
if attrs and 'ifos' in attrs:
ifos = attrs['ifos']
else:
ifos = ['H1', 'K1', 'L1', 'V1']
if attrs and 'exe_name' in attrs:
exe_name = attrs['exe_name']
else:
exe_name = 'INPUT'
if attrs and 'segs' in attrs:
segs = attrs['segs']
else:
segs = segments.segment([1, 2000000000])
if attrs and 'tags' in attrs:
tags = attrs['tags']
else:
tags = []
curr_file = cls(ifos, exe_name, segs, path, tags=tags, **kwargs)
return curr_file
class FileList(list):
'''
This class holds a list of File objects. It inherits from the
built-in list class, but also allows a number of features. ONLY
pycbc.workflow.File instances should be within a FileList instance.
'''
entry_class = File
def categorize_by_attr(self, attribute):
'''
Function to categorize a FileList by a File object
attribute (eg. 'segment', 'ifo', 'description').
Parameters
-----------
attribute : string
File object attribute to categorize FileList
Returns
--------
keys : list
A list of values for an attribute
groups : list
A list of FileLists
'''
# need to sort FileList otherwise using groupby without sorting does
# 'AAABBBCCDDAABB' -> ['AAA','BBB','CC','DD','AA','BB']
# and using groupby with sorting does
# 'AAABBBCCDDAABB' -> ['AAAAA','BBBBB','CC','DD']
flist = sorted(self, key=attrgetter(attribute), reverse=True)
# use groupby to create lists
groups = []
keys = []
for k, g in groupby(flist, attrgetter(attribute)):
groups.append(FileList(g))
keys.append(k)
return keys, groups
def find_output(self, ifo, time):
'''Returns one File most appropriate at the given time/time range.
Return one File that covers the given time, or is most
appropriate for the supplied time range.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the file should be valid for.
time : int/float/LIGOGPStime or tuple containing two values
If int/float/LIGOGPStime (or similar may of specifying one time) is
given, return the File corresponding to the time. This calls
self.find_output_at_time(ifo,time).
If a tuple of two values is given, return the File that is
**most appropriate** for the time range given. This calls
self.find_output_in_range
Returns
--------
pycbc_file : pycbc.workflow.File instance
The File that corresponds to the time or time range
'''
# Determine whether I have a specific time, or a range of times
try:
lenTime = len(time)
except TypeError:
# This is if I have a single time
outFile = self.find_output_at_time(ifo,time)
else:
# This is if I have a range of times
if lenTime == 2:
outFile = self.find_output_in_range(ifo,time[0],time[1])
# This is if I got a list that had more (or less) than 2 entries
if len(time) != 2:
raise TypeError("I do not understand the input variable time")
return outFile
def find_output_at_time(self, ifo, time):
'''
Return File that covers the given time.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
time : int/float/LIGOGPStime
Return the Files that covers the supplied time. If no
File covers the time this will return None.
Returns
--------
list of File classes
The Files that corresponds to the time.
'''
# Get list of Files that overlap time, for given ifo
outFiles = [i for i in self if ifo in i.ifo_list and time in i.segment_list]
if len(outFiles) == 0:
# No OutFile at this time
return None
elif len(outFiles) == 1:
# 1 OutFile at this time (good!)
return outFiles
else:
# Multiple output files. Currently this is valid, but we may want
# to demand exclusivity later, or in certain cases. Hence the
# separation.
return outFiles
def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False):
"""
Return the list of Files that is most appropriate for the supplied
time range. That is, the Files whose coverage time has the
largest overlap with the supplied time range.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
current_segment : ligo.segments.segment
The segment of time that files must intersect.
Returns
--------
FileList class
The list of Files that are most appropriate for the time range
"""
currsegment_list = segments.segmentlist([current_segment])
# Get all files overlapping the window
overlap_files = self.find_all_output_in_range(ifo, current_segment,
useSplitLists=useSplitLists)
# By how much do they overlap?
overlap_windows = [abs(i.segment_list & currsegment_list) for i in overlap_files]
if not overlap_windows:
return []
# Return the File with the biggest overlap
# Note if two File have identical overlap, the first is used
# to define the valid segment
overlap_windows = numpy.array(overlap_windows, dtype = int)
segmentLst = overlap_files[overlap_windows.argmax()].segment_list
# Get all output files with the exact same segment definition
output_files = [f for f in overlap_files if f.segment_list==segmentLst]
return output_files
def find_output_in_range(self, ifo, start, end):
'''
Return the File that is most appropriate for the supplied
time range. That is, the File whose coverage time has the
largest overlap with the supplied time range. If no Files
overlap the supplied time window, will return None.
Parameters
-----------
ifo : string
Name of the ifo (or ifos) that the File should correspond to
start : int/float/LIGOGPStime
The start of the time range of interest.
end : int/float/LIGOGPStime
The end of the time range of interest
Returns
--------
File class
The File that is most appropriate for the time range
'''
currsegment_list = segments.segmentlist([segments.segment(start, end)])
# First filter Files corresponding to ifo
outFiles = [i for i in self if ifo in i.ifo_list]
if len(outFiles) == 0:
# No OutFiles correspond to that ifo
return None
# Filter OutFiles to those overlapping the given window
currSeg = segments.segment([start,end])
outFiles = [i for i in outFiles \
if i.segment_list.intersects_segment(currSeg)]
if len(outFiles) == 0:
# No OutFile overlap that time period
return None
elif len(outFiles) == 1:
# One OutFile overlaps that period
return outFiles[0]
else:
overlap_windows = [abs(i.segment_list & currsegment_list) \
for i in outFiles]
# Return the File with the biggest overlap
# Note if two File have identical overlap, this will return
# the first File in the list
overlap_windows = numpy.array(overlap_windows, dtype = int)
return outFiles[overlap_windows.argmax()]
def find_all_output_in_range(self, ifo, currSeg, useSplitLists=False):
"""
Return all files that overlap the specified segment.
"""
if not useSplitLists:
# Slower, but simpler method
outFiles = [i for i in self if ifo in i.ifo_list]
outFiles = [i for i in outFiles
if i.segment_list.intersects_segment(currSeg)]
else:
# Faster, but more complicated
# Basically only check if a subset of files intersects_segment by
# using a presorted list. Sorting only happens once.
if not self._check_split_list_validity():
# FIXME: DO NOT hard code this.
self._temporal_split_list(100)
startIdx = int((currSeg[0] - self._splitListsStart) /
self._splitListsStep)
# Add some small rounding here
endIdx = (currSeg[1] - self._splitListsStart) / self._splitListsStep
endIdx = int(endIdx - 0.000001)
outFiles = []
for idx in range(startIdx, endIdx + 1):
if idx < 0 or idx >= self._splitListsNum:
continue
outFilesTemp = [i for i in self._splitLists[idx]
if ifo in i.ifo_list]
outFiles.extend([i for i in outFilesTemp
if i.segment_list.intersects_segment(currSeg)])
# Remove duplicates
outFiles = list(set(outFiles))
return self.__class__(outFiles)
def find_output_with_tag(self, tag):
"""
Find all files who have tag in self.tags
"""
# Enforce upper case
tag = tag.upper()
return FileList([i for i in self if tag in i.tags])
def find_output_without_tag(self, tag):
"""
Find all files who do not have tag in self.tags
"""
# Enforce upper case
tag = tag.upper()
return FileList([i for i in self if tag not in i.tags])
def find_output_with_ifo(self, ifo):
"""
Find all files who have ifo = ifo
"""
# Enforce upper case
ifo = ifo.upper()
return FileList([i for i in self if ifo in i.ifo_list])
def get_times_covered_by_files(self):
"""
Find the coalesced intersection of the segments of all files in the
list.
"""
times = segments.segmentlist([])
for entry in self:
times.extend(entry.segment_list)
times.coalesce()
return times
def convert_to_lal_cache(self):
"""
Return all files in this object as a glue.lal.Cache object
"""
lal_cache = gluelal.Cache([])
for entry in self:
try:
lal_cache.append(entry.cache_entry)
except ValueError:
pass
return lal_cache
def _temporal_split_list(self,numSubLists):
"""
This internal function is used to speed the code up in cases where a
number of operations are being made to determine if files overlap a
specific time. Normally such operations are done on *all* entries with
*every* call. However, if we predetermine which files are at which
times, we can avoid testing *every* file every time.
We therefore create numSubLists distinct and equal length time windows
equally spaced from the first time entry in the list until the last.
A list is made for each window and files are added to lists which they
overlap.
If the list changes it should be captured and these split lists become
invalid. Currently the testing for this is pretty basic
"""
# Assume segment lists are coalesced!
startTime = float( min([i.segment_list[0][0] for i in self]))
endTime = float( max([i.segment_list[-1][-1] for i in self]))
step = (endTime - startTime) / float(numSubLists)
# Set up storage
self._splitLists = []
for idx in range(numSubLists):
self._splitLists.append(FileList([]))
# Sort the files
for currFile in self:
segExtent = currFile.segment_list.extent()
startIdx = (segExtent[0] - startTime) / step
endIdx = (segExtent[1] - startTime) / step
# Add some small rounding here
startIdx = int(startIdx - 0.001)
endIdx = int(endIdx + 0.001)
if startIdx < 0:
startIdx = 0
if endIdx >= numSubLists:
endIdx = numSubLists - 1
for idx in range(startIdx, endIdx + 1):
self._splitLists[idx].append(currFile)
# Set information needed to detect changes and to be used elsewhere
self._splitListsLength = len(self)
self._splitListsNum = numSubLists
self._splitListsStart = startTime
self._splitListsEnd = endTime
self._splitListsStep = step
self._splitListsSet = True
def _check_split_list_validity(self):
"""
See _temporal_split_list above. This function checks if the current
split lists are still valid.
"""
# FIXME: Currently very primitive, but needs to be fast
if not (hasattr(self,"_splitListsSet") and (self._splitListsSet)):
return False
elif len(self) != self._splitListsLength:
return False
else:
return True
@classmethod
def load(cls, filename):
"""
Load a FileList from a pickle file
"""
f = open(filename, 'r')
return pickle.load(f)
def dump(self, filename):
"""
Output this FileList to a pickle file
"""
f = open(filename, 'w')
pickle.dump(self, f)
def to_file_object(self, name, out_dir):
"""Dump to a pickle file and return an File object reference
Parameters
----------
name : str
An identifier of this file. Needs to be unique.
out_dir : path
path to place this file
Returns
-------
file : AhopeFile
"""
make_analysis_dir(out_dir)
file_ref = File('ALL', name, self.get_times_covered_by_files(),
extension='.pkl', directory=out_dir)
self.dump(file_ref.storage_path)
return file_ref
class SegFile(File):
'''
This class inherits from the File class, and is designed to store
workflow output files containing a segment dict. This is identical in
usage to File except for an additional kwarg for holding the
segment dictionary, if it is known at workflow run time.
'''
def __init__(self, ifo_list, description, valid_segment,
segment_dict=None, seg_summ_dict=None, **kwargs):
"""
See File.__init__ for a full set of documentation for how to
call this class. The only thing unique and added to this class is
the optional segment_dict. NOTE that while segment_dict is a
ligo.segments.segmentlistdict rather than the usual dict[ifo]
we key by dict[ifo:name].
Parameters
------------
ifo_list : string or list (required)
See File.__init__
description : string (required)
See File.__init__
segment : ligo.segments.segment or ligo.segments.segmentlist
See File.__init__
segment_dict : ligo.segments.segmentlistdict (optional, default=None)
A ligo.segments.segmentlistdict covering the times covered by the
segmentlistdict associated with this file.
Can be added by setting self.segment_dict after initializing an
instance of the class.
"""
super(SegFile, self).__init__(ifo_list, description, valid_segment,
**kwargs)
# To avoid confusion with the segment_list property of the parent class
# we refer to this as valid_segments here
self.valid_segments = self.segment_list
self.segment_dict = segment_dict
self.seg_summ_dict = seg_summ_dict
@classmethod
def from_segment_list(cls, description, segmentlist, name, ifo,
seg_summ_list=None, **kwargs):
""" Initialize a SegFile object from a segmentlist.
Parameters
------------
description : string (required)
See File.__init__
segmentlist : ligo.segments.segmentslist
The segment list that will be stored in this file.
name : str
The name of the segment lists to be stored in the file.
ifo : str
The ifo of the segment lists to be stored in this file.
seg_summ_list : ligo.segments.segmentslist (OPTIONAL)
Specify the segment_summary segmentlist that goes along with the
segmentlist. Default=None, in this case segment_summary is taken
from the valid_segment of the SegFile class.
"""
seglistdict = segments.segmentlistdict()
seglistdict[ifo + ':' + name] = segmentlist
seg_summ_dict = None
if seg_summ_list is not None:
seg_summ_dict = segments.segmentlistdict()
seg_summ_dict[ifo + ':' + name] = seg_summ_list
return cls.from_segment_list_dict(description, seglistdict,
seg_summ_dict=seg_summ_dict, **kwargs)
@classmethod
def from_multi_segment_list(cls, description, segmentlists, names, ifos,
seg_summ_lists=None, **kwargs):
""" Initialize a SegFile object from a list of segmentlists.
Parameters
------------
description : string (required)
See File.__init__
segmentlists : List of ligo.segments.segmentslist
List of segment lists that will be stored in this file.
names : List of str
List of names of the segment lists to be stored in the file.
ifos : str
List of ifos of the segment lists to be stored in this file.
seg_summ_lists : ligo.segments.segmentslist (OPTIONAL)
Specify the segment_summary segmentlists that go along with the
segmentlists. Default=None, in this case segment_summary is taken
from the valid_segment of the SegFile class.
"""
seglistdict = segments.segmentlistdict()
for name, ifo, segmentlist in zip(names, ifos, segmentlists):
seglistdict[ifo + ':' + name] = segmentlist
if seg_summ_lists is not None:
seg_summ_dict = segments.segmentlistdict()
for name, ifo, seg_summ_list in zip(names, ifos, seg_summ_lists):
seg_summ_dict[ifo + ':' + name] = seg_summ_list
else:
seg_summ_dict = None
return cls.from_segment_list_dict(description, seglistdict,
seg_summ_dict=seg_summ_dict, **kwargs)
@classmethod
def from_segment_list_dict(cls, description, segmentlistdict,
ifo_list=None, valid_segment=None,
file_exists=False, seg_summ_dict=None,
**kwargs):
""" Initialize a SegFile object from a segmentlistdict.
Parameters
------------
description : string (required)
See File.__init__
segmentlistdict : ligo.segments.segmentslistdict
See SegFile.__init__
ifo_list : string or list (optional)
See File.__init__, if not given a list of all ifos in the
segmentlistdict object will be used
valid_segment : ligo.segments.segment or ligo.segments.segmentlist
See File.__init__, if not given the extent of all segments in the
segmentlistdict is used.
file_exists : boolean (default = False)
If provided and set to True it is assumed that this file already
exists on disk and so there is no need to write again.
seg_summ_dict : ligo.segments.segmentslistdict
Optional. See SegFile.__init__.
"""
if ifo_list is None:
ifo_set = set([i.split(':')[0] for i in segmentlistdict.keys()])
ifo_list = list(ifo_set)
ifo_list.sort()
if valid_segment is None:
if seg_summ_dict and \
numpy.any([len(v) for _, v in seg_summ_dict.items()]):
# Only come here if seg_summ_dict is supplied and it is
# not empty.
valid_segment = seg_summ_dict.extent_all()
else:
try:
valid_segment = segmentlistdict.extent_all()
except:
# Numpty probably didn't supply a
# ligo.segments.segmentlistdict
segmentlistdict=segments.segmentlistdict(segmentlistdict)
try:
valid_segment = segmentlistdict.extent_all()
except ValueError:
# No segment_summary and segment list is empty
# Setting valid segment now is hard!
warn_msg = "No information with which to set valid "
warn_msg += "segment."
logging.warn(warn_msg)
valid_segment = segments.segment([0,1])
instnc = cls(ifo_list, description, valid_segment,
segment_dict=segmentlistdict, seg_summ_dict=seg_summ_dict,
**kwargs)
if not file_exists:
instnc.to_segment_xml()
else:
instnc.add_pfn(urljoin('file:', pathname2url(instnc.storage_path)),
site='local')
return instnc
@classmethod
def from_segment_xml(cls, xml_file, **kwargs):
"""
Read a ligo.segments.segmentlist from the file object file containing an
xml segment table.
Parameters
-----------
xml_file : file object
file object for segment xml file
"""
# load xmldocument and SegmentDefTable and SegmentTables
fp = open(xml_file, 'rb')
xmldoc = ligolw_utils.load_fileobj(
fp, compress='auto', contenthandler=LIGOLWContentHandler)
seg_def_table = lsctables.SegmentDefTable.get_table(xmldoc)
seg_table = lsctables.SegmentTable.get_table(xmldoc)
seg_sum_table = lsctables.SegmentSumTable.get_table(xmldoc)
segs = segments.segmentlistdict()
seg_summ = segments.segmentlistdict()
seg_id = {}
for seg_def in seg_def_table:
# Here we want to encode ifo and segment name
full_channel_name = ':'.join([str(seg_def.ifos),
str(seg_def.name)])
seg_id[int(seg_def.segment_def_id)] = full_channel_name
segs[full_channel_name] = segments.segmentlist()
seg_summ[full_channel_name] = segments.segmentlist()
for seg in seg_table:
seg_obj = segments.segment(
lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),
lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))
segs[seg_id[int(seg.segment_def_id)]].append(seg_obj)
for seg in seg_sum_table:
seg_obj = segments.segment(
lal.LIGOTimeGPS(seg.start_time, seg.start_time_ns),
lal.LIGOTimeGPS(seg.end_time, seg.end_time_ns))
seg_summ[seg_id[int(seg.segment_def_id)]].append(seg_obj)
for seg_name in seg_id.values():
segs[seg_name] = segs[seg_name].coalesce()
xmldoc.unlink()
fp.close()
curr_url = urllib.parse.urlunparse(['file', 'localhost', xml_file,
None, None, None])
return cls.from_segment_list_dict('SEGMENTS', segs, file_url=curr_url,
file_exists=True,
seg_summ_dict=seg_summ, **kwargs)
def remove_short_sci_segs(self, minSegLength):
"""
Function to remove all science segments
shorter than a specific length. Also updates the file on disk to remove
these segments.
Parameters
-----------
minSegLength : int
Maximum length of science segments. Segments shorter than this will
be removed.
"""
newsegment_list = segments.segmentlist()
for key, seglist in self.segment_dict.items():
newsegment_list = segments.segmentlist()
for seg in seglist:
if abs(seg) > minSegLength:
newsegment_list.append(seg)
newsegment_list.coalesce()
self.segment_dict[key] = newsegment_list
self.to_segment_xml(override_file_if_exists=True)
def return_union_seglist(self):
return self.segment_dict.union(self.segment_dict.keys())
def parse_segdict_key(self, key):
"""
Return ifo and name from the segdict key.
"""
splt = key.split(':')
if len(splt) == 2:
return splt[0], splt[1]
else:
err_msg = "Key should be of the format 'ifo:name', got %s." %(key,)
raise ValueError(err_msg)
def to_segment_xml(self, override_file_if_exists=False):
"""
Write the segment list in self.segmentList to self.storage_path.
"""
# create XML doc and add process table
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
process = create_process_table(outdoc)
for key, seglist in self.segment_dict.items():
ifo, name = self.parse_segdict_key(key)
# Ensure we have LIGOTimeGPS
fsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) for seg in seglist]
if self.seg_summ_dict is None:
vsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) \
for seg in self.valid_segments]
else:
vsegs = [(lal.LIGOTimeGPS(seg[0]),
lal.LIGOTimeGPS(seg[1])) \
for seg in self.seg_summ_dict[key]]
# Add using glue library to set all segment tables
with ligolw_segments.LigolwSegments(outdoc, process) as x:
x.add(ligolw_segments.LigolwSegmentList(active=fsegs,
instruments=set([ifo]), name=name,
version=1, valid=vsegs))
# write file
url = urljoin('file:', pathname2url(self.storage_path))
if not override_file_if_exists or not self.has_pfn(url, site='local'):
self.add_pfn(url, site='local')
ligolw_utils.write_filename(outdoc, self.storage_path)
def make_external_call(cmdList, out_dir=None, out_basename='external_call',
shell=False, fail_on_error=True):
"""
Use this to make an external call using the python subprocess module.
See the subprocess documentation for more details of how this works.
http://docs.python.org/2/library/subprocess.html
Parameters
-----------
cmdList : list of strings
This list of strings contains the command to be run. See the subprocess
documentation for more details.
out_dir : string
If given the stdout and stderr will be redirected to
os.path.join(out_dir,out_basename+[".err",".out])
If not given the stdout and stderr will not be recorded
out_basename : string
The value of out_basename used to construct the file names used to
store stderr and stdout. See out_dir for more information.
shell : boolean, default=False
This value will be given as the shell kwarg to the subprocess call.
**WARNING** See the subprocess documentation for details on this
Kwarg including a warning about a serious security exploit. Do not
use this unless you are sure it is necessary **and** safe.
fail_on_error : boolean, default=True
If set to true an exception will be raised if the external command does
not return a code of 0. If set to false such failures will be ignored.
Stderr and Stdout can be stored in either case using the out_dir
and out_basename options.
Returns
--------
exitCode : int
The code returned by the process.
"""
if out_dir:
outBase = os.path.join(out_dir,out_basename)
errFile = outBase + '.err'
errFP = open(errFile, 'w')
outFile = outBase + '.out'
outFP = open(outFile, 'w')
cmdFile = outBase + '.sh'
cmdFP = open(cmdFile, 'w')
cmdFP.write(' '.join(cmdList))
cmdFP.close()
else:
errFile = None
outFile = None
cmdFile = None
errFP = None
outFP = None
msg = "Making external call %s" %(' '.join(cmdList))
logging.info(msg)
errCode = subprocess.call(cmdList, stderr=errFP, stdout=outFP,\
shell=shell)
if errFP:
errFP.close()
if outFP:
outFP.close()
if errCode and fail_on_error:
raise CalledProcessErrorMod(errCode, ' '.join(cmdList),
errFile=errFile, outFile=outFile, cmdFile=cmdFile)
logging.info("Call successful, or error checking disabled.")
class CalledProcessErrorMod(Exception):
"""
This exception is raised when subprocess.call returns a non-zero exit code
and checking has been requested. This should not be accessed by the user
it is used only within make_external_call.
"""
def __init__(self, returncode, cmd, errFile=None, outFile=None,
cmdFile=None):
self.returncode = returncode
self.cmd = cmd
self.errFile = errFile
self.outFile = outFile
self.cmdFile = cmdFile
def __str__(self):
msg = "Command '%s' returned non-zero exit status %d.\n" \
%(self.cmd, self.returncode)
if self.errFile:
msg += "Stderr can be found in %s .\n" %(self.errFile)
if self.outFile:
msg += "Stdout can be found in %s .\n" %(self.outFile)
if self.cmdFile:
msg += "The failed command has been printed in %s ." %(self.cmdFile)
return msg
def resolve_url_to_file(curr_pfn, attrs=None):
"""
Resolves a PFN into a workflow.File object.
This function will resolve a PFN to a workflow.File object. If a File
object already exists for that PFN that will be returned, otherwise a new
object is returned. We will implement default site schemes here as needed,
for example cvfms paths will be added to the osg and nonfsio sites in
addition to local. If the LFN is a duplicate of an existing one, but with a
different PFN an AssertionError is raised. The attrs keyword-argument can
be used to specify attributes of a file. All files have 4 possible
attributes. A list of ifos, an identifying string - usually used to give
the name of the executable that created the file, a segmentlist over which
the file is valid and tags specifying particular details about those files.
If attrs['ifos'] is set it will be used as the ifos, otherwise this will
default to ['H1', 'K1', 'L1', 'V1']. If attrs['exe_name'] is given this
will replace the "exe_name" sent to File.__init__ otherwise 'INPUT' will
be given. segs will default to [[1,2000000000]] unless overridden with
attrs['segs']. tags will default to an empty list unless overriden
with attrs['tag']. If attrs is None it will be ignored and all defaults
will be used. It is emphasized that these attributes are for the most part
not important with input files. Exceptions include things like input
template banks, where ifos and valid times will be checked in the workflow
and used in the naming of child job output files.
"""
cvmfsstr1 = 'file:///cvmfs/'
cvmfsstr2 = 'file://localhost/cvmfs/'
cvmfsstrs = (cvmfsstr1, cvmfsstr2)
# Get LFN
urlp = urllib.parse.urlparse(curr_pfn)
curr_lfn = os.path.basename(urlp.path)
# Does this already exist as a File?
if curr_lfn in file_input_from_config_dict.keys():
file_pfn = file_input_from_config_dict[curr_lfn][2]
# If the PFNs are different, but LFNs are the same then fail.
assert(file_pfn == curr_pfn)
curr_file = file_input_from_config_dict[curr_lfn][1]
else:
# Use resolve_url to download file/symlink as appropriate
local_file_path = resolve_url(curr_pfn)
# Create File object with default local path
curr_file = File.from_path(local_file_path, attrs=attrs)
if curr_pfn.startswith(cvmfsstrs):
# Add PFNs for nonlocal sites for special cases (e.g. CVMFS).
# This block could be extended as needed
curr_file.add_pfn(curr_pfn, site='all')
else:
pfn_local = urljoin('file:', pathname2url(local_file_path))
curr_file.add_pfn(pfn_local, 'local')
# Store the file to avoid later duplication
tuple_val = (local_file_path, curr_file, curr_pfn)
file_input_from_config_dict[curr_lfn] = tuple_val
return curr_file
def configparser_value_to_file(cp, sec, opt, attrs=None):
"""
Fetch a file given its url location via the section
and option in the workflow configuration parser.
Parameters
-----------
cp : ConfigParser object
The ConfigParser object holding the workflow configuration settings
sec : string
The section containing options for this job.
opt : string
Name of option (e.g. --output-file)
attrs : list to specify the 4 attributes of the file.
Returns
--------
fileobj_from_path : workflow.File object obtained from the path
specified by opt, within sec, in cp.
"""
path = cp.get(sec, opt)
fileobj_from_path = resolve_url_to_file(path, attrs=attrs)
return fileobj_from_path
def get_full_analysis_chunk(science_segs):
"""
Function to find the first and last time point contained in the science segments
and return a single segment spanning that full time.
Parameters
-----------
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
Returns
--------
fullSegment : ligo.segments.segment
The segment spanning the first and last time point contained in science_segs.
"""
extents = [science_segs[ifo].extent() for ifo in science_segs.keys()]
min, max = extents[0]
for lo, hi in extents:
if min > lo:
min = lo
if max < hi:
max = hi
fullSegment = segments.segment(min, max)
return fullSegment
def get_random_label():
"""
Get a random label string to use when clustering jobs.
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) \
for _ in range(15))
def resolve_td_option(val_str, valid_seg):
"""
Take an option which might be time-dependent and resolve it
Some options might take different values depending on the GPS time. For
example if you want opt_1 to take value_a if the time is between 10 and
100, value_b if between 100 and 250, and value_c if between 250 and 500 you
can supply:
value_a[10:100],value_b[100:250],value_c[250:500].
This function will parse that string (as opt) and return the value fully
contained in valid_seg. If valid_seg is not full contained in one, and only
one, of these options. The code will fail. If given a simple option like:
value_a
The function will just return value_a.
"""
# Track if we've already found a matching option
output = ''
# Strip any whitespace, and split on comma
curr_vals = val_str.replace(' ', '').strip().split(',')
# Resolving the simple case is trivial and can be done immediately.
if len(curr_vals) == 1 and '[' not in curr_vals[0]:
return curr_vals[0]
# Loop over all possible values
for cval in curr_vals:
start = int(valid_seg[0])
end = int(valid_seg[1])
# Extract limits for each case, and check overlap with valid_seg
if '[' in cval:
bopt = cval.split('[')[1].split(']')[0]
start, end = bopt.split(':')
cval = cval.replace('[' + bopt + ']', '')
curr_seg = segments.segment(int(start), int(end))
# The segments module is a bit weird so we need to check if the two
# overlap using the following code. If valid_seg is fully within
# curr_seg this will be true.
if curr_seg.intersects(valid_seg) and \
(curr_seg & valid_seg == valid_seg):
if output:
err_msg = "Time-dependent options must be disjoint."
raise ValueError(err_msg)
output = cval
if not output:
err_msg = "Could not resolve option {}".format(val_str)
raise ValueError
return output
def add_workflow_settings_cli(parser, include_subdax_opts=False):
"""Adds workflow options to an argument parser.
Parameters
----------
parser : argparse.ArgumentParser
Argument parser to add the options to.
include_subdax_opts : bool, optional
If True, will add output-map and dax-file-directory options
to the parser. These can be used for workflows that are
generated as a subdax of another workflow. Default is False.
"""
wfgrp = parser.add_argument_group("Options for setting workflow files")
wfgrp.add_argument("--workflow-name", required=True,
help="Name of the workflow.")
wfgrp.add_argument("--tags", nargs="+", default=[],
help="Append the given tags to file names.")
wfgrp.add_argument("--output-dir", default=None,
help="Path to directory where the workflow will be "
"written. Default is to use "
"{workflow-name}_output.")
wfgrp.add_argument("--cache-file", default=None,
help="Path to input file containing list of files to "
"be reused (the 'input_map' file)")
wfgrp.add_argument("--plan-now", default=False, action='store_true',
help="If given, workflow will immediately be planned "
"on completion of workflow generation but not "
"submitted to the condor pool. A start script "
"will be created to submit to condor.")
wfgrp.add_argument("--submit-now", default=False, action='store_true',
help="If given, workflow will immediately be submitted "
"on completion of workflow generation")
wfgrp.add_argument("--dax-file", default=None,
help="Path to DAX file. Default is to write to the "
"output directory with name "
"{workflow-name}.dax.")
if include_subdax_opts:
wfgrp.add_argument("--output-map", default=None,
help="Path to an output map file.")
wfgrp.add_argument("--dax-file-directory", default=None,
help="Put dax files (including output map, "
"sites.yml etc. in this directory. The use "
"case for this is when running a sub-workflow "
"under pegasus the outputs need to be copied "
"back to the appropriate directory, and "
"using this as --dax-file-directory . allows "
"that to be done.")
| 90,638
| 39.088014
| 168
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/plotting.py
|
# Copyright (C) 2015 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up plotting jobs.
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
"""
from urllib.request import pathname2url
from urllib.parse import urljoin
from pycbc.workflow.core import File, FileList, makedir, Executable
def excludestr(tags, substr):
if substr is None:
return tags
if isinstance(substr, list):
if len(substr) > 1:
tags = excludestr(tags, substr[1:])
substr = substr[0]
return [tag for tag in tags if substr not in tag]
def requirestr(tags, substr):
if substr is None:
return tags
return [tag for tag in tags if substr in tag]
class PlotExecutable(Executable):
""" plot executable
"""
current_retention_level = Executable.FINAL_RESULT
# plots and final results should get the highest priority
# on the job queue
def create_node(self, **kwargs):
node = Executable.create_node(self, **kwargs)
node.set_priority(1000)
return node
def make_template_plot(workflow, bank_file, out_dir,bins=None,
tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'plot_bank', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--bank-file', bank_file)
if workflow.cp.has_option_tags('workflow-coincidence', 'background-bins', tags=tags):
bins = workflow.cp.get_opt_tags('workflow-coincidence', 'background-bins', tags=tags)
if bins:
node.add_opt('--background-bins', bins)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files[0]
def make_range_plot(workflow, psd_files, out_dir, exclude=None, require=None,
tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_range'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
node = PlotExecutable(workflow.cp, 'plot_range', ifos=workflow.ifos,
out_dir=out_dir, tags=[tag] + tags).create_node()
node.add_input_list_opt('--psd-files', psd_files)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_spectrum_plot(workflow, psd_files, out_dir, tags=None,
hdf_group=None, precalc_psd_files=None):
tags = [] if tags is None else tags
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'plot_spectrum', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--psd-files', psd_files)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
if hdf_group is not None:
node.add_opt('--hdf-group', hdf_group)
if precalc_psd_files is not None and len(precalc_psd_files) == 1:
node.add_input_list_opt('--psd-file', precalc_psd_files)
workflow += node
return node.output_files[0]
def make_segments_plot(workflow, seg_files, out_dir, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'plot_segments', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--segment-files', seg_files)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
def make_gating_plot(workflow, insp_files, out_dir, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'plot_gating', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--input-file', insp_files)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
def make_throughput_plot(workflow, insp_files, out_dir, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'plot_throughput', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--input-file', insp_files)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
def make_foreground_table(workflow, trig_file, bank_file, out_dir,
singles=None, extension='.html', tags=None,
hierarchical_level=None):
if hierarchical_level is not None and tags:
tags = [("HIERARCHICAL_LEVEL_{:02d}".format(
hierarchical_level))] + tags
elif hierarchical_level is not None and not tags:
tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)]
elif hierarchical_level is None and not tags:
tags = []
makedir(out_dir)
exe = PlotExecutable(workflow.cp, 'page_foreground',
ifos=trig_file.ifo_list,
out_dir=out_dir, tags=tags)
node = exe.create_node()
node.add_input_opt('--bank-file', bank_file)
node.add_input_opt('--trigger-file', trig_file)
if hierarchical_level is not None:
node.add_opt('--use-hierarchical-level', hierarchical_level)
if singles is not None:
node.add_input_list_opt('--single-detector-triggers', singles)
node.new_output_file_opt(bank_file.segment, extension, '--output-file')
workflow += node
return node.output_files[0]
def make_sensitivity_plot(workflow, inj_file, out_dir, exclude=None,
require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_sensitivity'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
node = PlotExecutable(workflow.cp, 'plot_sensitivity', ifos=workflow.ifos,
out_dir=out_dir, tags=[tag] + tags).create_node()
node.add_input_opt('--injection-file', inj_file)
node.new_output_file_opt(inj_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_coinc_snrchi_plot(workflow, inj_file, inj_trig, stat_file, trig_file,
out_dir, exclude=None, require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_coinc_snrchi'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
exe = PlotExecutable(workflow.cp, 'plot_coinc_snrchi',
ifos=inj_trig.ifo_list,
out_dir=out_dir, tags=[tag] + tags)
node = exe.create_node()
node.add_input_opt('--found-injection-file', inj_file)
node.add_input_opt('--single-injection-file', inj_trig)
node.add_input_opt('--coinc-statistic-file', stat_file)
node.add_input_opt('--single-trigger-file', trig_file)
node.new_output_file_opt(inj_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_inj_table(workflow, inj_file, out_dir, missed=False, singles=None,
tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_injections', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--injection-file', inj_file)
if missed:
node.add_opt('--show-missed')
if singles is not None:
node.add_multiifo_input_list_opt('--single-trigger-files', singles)
node.new_output_file_opt(inj_file.segment, '.html', '--output-file')
workflow += node
return node.output_files[0]
def make_seg_table(workflow, seg_files, seg_names, out_dir, tags=None,
title_text=None, description=None):
""" Creates a node in the workflow for writing the segment summary
table. Returns a File instances for the output file.
"""
seg_files = list(seg_files)
seg_names = list(seg_names)
if tags is None: tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_segtable', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--segment-files', seg_files)
quoted_seg_names = []
for s in seg_names:
quoted_seg_names.append("'" + s + "'")
node.add_opt('--segment-names', ' '.join(quoted_seg_names))
node.add_opt('--ifos', ' '.join(workflow.ifos))
if description:
node.add_opt('--description', "'" + description + "'")
if title_text:
node.add_opt('--title-text', "'" + title_text + "'")
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
return node.output_files[0]
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None):
""" Creates a node in the workflow for writing the veto_definer
table. Returns a File instances for the output file.
"""
if vetodef_file is None:
if not workflow.cp.has_option_tags("workflow-segments",
"segments-veto-definer-file", []):
return None
vetodef_file = workflow.cp.get_opt_tags("workflow-segments",
"segments-veto-definer-file", [])
file_url = urljoin('file:', pathname2url(vetodef_file))
vdf_file = File(workflow.ifos, 'VETO_DEFINER',
workflow.analysis_time, file_url=file_url)
vdf_file.add_pfn(file_url, site='local')
else:
vdf_file = vetodef_file
if tags is None: tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_vetotable', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--veto-definer-file', vdf_file)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
return node.output_files[0]
def make_seg_plot(workflow, seg_files, out_dir, seg_names=None, tags=None):
""" Creates a node in the workflow for plotting science, and veto segments.
"""
seg_files = list(seg_files)
if tags is None: tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_segplot', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--segment-files', seg_files)
quoted_seg_names = []
for s in seg_names:
quoted_seg_names.append("'" + s + "'")
node.add_opt('--segment-names', ' '.join(quoted_seg_names))
node.add_opt('--ifos', ' '.join(workflow.ifos))
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
return node.output_files[0]
def make_ifar_plot(workflow, trigger_file, out_dir, tags=None,
hierarchical_level=None, executable='page_ifar'):
""" Creates a node in the workflow for plotting cumulative histogram
of IFAR values.
"""
if hierarchical_level is not None and tags:
tags = [("HIERARCHICAL_LEVEL_{:02d}".format(
hierarchical_level))] + tags
elif hierarchical_level is not None and not tags:
tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)]
elif hierarchical_level is None and not tags:
tags = []
makedir(out_dir)
exe = PlotExecutable(workflow.cp, executable, ifos=trigger_file.ifo_list,
out_dir=out_dir, tags=tags)
node = exe.create_node()
node.add_input_opt('--trigger-file', trigger_file)
if hierarchical_level is not None:
node.add_opt('--use-hierarchical-level', hierarchical_level)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files[0]
def make_snrchi_plot(workflow, trig_files, veto_file, veto_name,
out_dir, exclude=None, require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_snrchi'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
for trig_file in trig_files:
exe = PlotExecutable(workflow.cp, 'plot_snrchi',
ifos=trig_file.ifo_list,
out_dir=out_dir,
tags=[tag] + tags)
node = exe.create_node()
node.set_memory(15000)
node.add_input_opt('--trigger-file', trig_file)
if veto_file is not None:
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--segment-name', veto_name)
node.new_output_file_opt(trig_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_foundmissed_plot(workflow, inj_file, out_dir, exclude=None,
require=None, tags=None):
if tags is None:
tags = []
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_foundmissed'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
exe = PlotExecutable(workflow.cp, 'plot_foundmissed', ifos=workflow.ifos,
out_dir=out_dir, tags=[tag] + tags)
node = exe.create_node()
ext = '.html' if exe.has_opt('dynamic') else '.png'
node.add_input_opt('--injection-file', inj_file)
node.new_output_file_opt(inj_file.segment, ext, '--output-file')
workflow += node
files += node.output_files
return files
def make_snrratehist_plot(workflow, bg_file, out_dir, closed_box=False,
tags=None, hierarchical_level=None):
if hierarchical_level is not None and tags:
tags = [("HIERARCHICAL_LEVEL_{:02d}".format(
hierarchical_level))] + tags
elif hierarchical_level is not None and not tags:
tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)]
elif hierarchical_level is None and not tags:
tags = []
makedir(out_dir)
exe = PlotExecutable(workflow.cp, 'plot_snrratehist',
ifos=bg_file.ifo_list,
out_dir=out_dir, tags=tags)
node = exe.create_node()
node.add_input_opt('--trigger-file', bg_file)
if hierarchical_level is not None:
node.add_opt('--use-hierarchical-level', hierarchical_level)
if closed_box:
node.add_opt('--closed-box')
node.new_output_file_opt(bg_file.segment, '.png', '--output-file')
workflow += node
return node.output_files[0]
def make_snrifar_plot(workflow, bg_file, out_dir, closed_box=False,
cumulative=True, tags=None, hierarchical_level=None):
if hierarchical_level is not None and tags:
tags = [("HIERARCHICAL_LEVEL_{:02d}".format(
hierarchical_level))] + tags
elif hierarchical_level is not None and not tags:
tags = ["HIERARCHICAL_LEVEL_{:02d}".format(hierarchical_level)]
elif hierarchical_level is None and not tags:
tags = []
makedir(out_dir)
exe = PlotExecutable(workflow.cp, 'plot_snrifar', ifos=bg_file.ifo_list,
out_dir=out_dir, tags=tags)
node = exe.create_node()
node.add_input_opt('--trigger-file', bg_file)
if hierarchical_level is not None:
node.add_opt('--use-hierarchical-level', hierarchical_level)
if closed_box:
node.add_opt('--closed-box')
if not cumulative:
node.add_opt('--not-cumulative')
node.new_output_file_opt(bg_file.segment, '.png', '--output-file')
workflow += node
return node.output_files[0]
def make_results_web_page(workflow, results_dir, template='orange',
explicit_dependencies=None):
template_path = 'templates/'+template+'.html'
out_dir = workflow.cp.get('results_page', 'output-path')
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'results_page', ifos=workflow.ifos,
out_dir=out_dir).create_node()
node.add_opt('--plots-dir', results_dir)
node.add_opt('--template-file', template_path)
workflow += node
if explicit_dependencies is not None:
for dep in explicit_dependencies:
workflow.add_explicit_dependancy(dep, node)
def make_single_hist(workflow, trig_file, veto_file, veto_name,
out_dir, bank_file=None, exclude=None,
require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_hist'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
node = PlotExecutable(workflow.cp, 'plot_hist',
ifos=trig_file.ifo,
out_dir=out_dir,
tags=[tag] + tags).create_node()
if veto_file is not None:
node.add_opt('--segment-name', veto_name)
node.add_input_opt('--veto-file', veto_file)
node.add_input_opt('--trigger-file', trig_file)
if bank_file:
node.add_input_opt('--bank-file', bank_file)
node.new_output_file_opt(trig_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_binned_hist(workflow, trig_file, veto_file, veto_name,
out_dir, bank_file, exclude=None,
require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_binnedhist'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
node = PlotExecutable(workflow.cp, 'plot_binnedhist',
ifos=trig_file.ifo,
out_dir=out_dir,
tags=[tag] + tags).create_node()
node.add_opt('--ifo', trig_file.ifo)
if veto_file is not None:
node.add_opt('--veto-segment-name', veto_name)
node.add_input_opt('--veto-file', veto_file)
node.add_input_opt('--trigger-file', trig_file)
node.add_input_opt('--bank-file', bank_file)
node.new_output_file_opt(trig_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_singles_plot(workflow, trig_files, bank_file, veto_file, veto_name,
out_dir, exclude=None, require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
secs = requirestr(workflow.cp.get_subsections('plot_singles'), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
for trig_file in trig_files:
node = PlotExecutable(workflow.cp, 'plot_singles',
ifos=trig_file.ifo,
out_dir=out_dir,
tags=[tag] + tags).create_node()
node.set_memory(15000)
node.add_input_opt('--bank-file', bank_file)
if veto_file is not None:
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--segment-name', veto_name)
node.add_opt('--detector', trig_file.ifo)
node.add_input_opt('--single-trig-file', trig_file)
node.new_output_file_opt(trig_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_dq_trigger_rate_plot(workflow, dq_files, out_dir, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
files = FileList([])
for dq_file in dq_files:
if workflow.cp.has_option_tags('bin_trigger_rates_dq',
'background-bins', tags=tags):
background_bins = \
workflow.cp.get_opt_tags('bin_trigger_rates_dq',
'background-bins', tags=tags)
bin_names = [tuple(bbin.split(':'))[0] for bbin
in background_bins.split(' ')]
else: bin_names = ['all_bin']
for bbin in bin_names:
plot_tags = [bbin] + tags
node = PlotExecutable(workflow.cp, 'plot_dq_likelihood_vs_time',
ifos=dq_file.ifo,
out_dir=out_dir,
tags=plot_tags).create_node()
node.add_opt('--ifo', dq_file.ifo)
node.add_opt('--background-bin', bbin)
node.add_input_opt('--dq-file', dq_file)
node.new_output_file_opt(dq_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
def make_dq_percentile_plot(workflow, dq_files, out_dir, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
files = FileList([])
for dq_file in dq_files:
if workflow.cp.has_option_tags('bin_trigger_rates_dq',
'background-bins', tags=tags):
background_bins = \
workflow.cp.get_opt_tags('bin_trigger_rates_dq',
'background-bins', tags=tags)
bin_names = [tuple(bbin.split(':'))[0] for bbin
in background_bins.split(' ')]
else: bin_names = ['all_bin']
for bbin in bin_names:
plot_tags = [bbin] + tags
node = PlotExecutable(workflow.cp, 'plot_dq_percentiles',
ifos=dq_file.ifo,
out_dir=out_dir,
tags=plot_tags).create_node()
node.add_opt('--ifo', dq_file.ifo)
node.add_opt('--background-bin', bbin)
node.add_input_opt('--dq-file', dq_file)
node.new_output_file_opt(dq_file.segment, '.png', '--output-file')
workflow += node
files += node.output_files
return files
| 23,833
| 41.409253
| 93
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/configparser_test.py
|
import re
import copy
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
def parse_workflow_ini_file(cpFile,parsed_filepath=None):
"""Read a .ini file in, parse it as described in the documentation linked
to above, and return the parsed ini file.
Parameters
----------
cpFile : The path to a .ini file to be read in
parsed_filepath: Boolean, optional
If provided, the .ini file, after parsing, will be written to this
location
Returns
-------
cp: The parsed ConfigParser class containing the read in .ini file
"""
# First read the .ini file
cp = read_ini_file(cpFile)
print(cp.sections())
# Check for any substitutions that can be made
# FIXME: The python 3 version of ConfigParser can do this automatically
# move over to that if it can be backported to python2.X.
# We use the same formatting as the new configparser module when doing
# ExtendedInterpolation
# This is described at http://docs.python.org/3.4/library/configparser.html
#cp = perform_extended_interpolation(cp)
# Split sections like [inspiral&tmplt] into [inspiral] and [tmplt]
cp = split_multi_sections(cp)
print(cp.sections())
# Check for duplicate options in sub-sections
sanity_check_subsections(cp)
print(cp.sections())
# Dump parsed .ini file if needed
if parsed_filepath:
fp = open(parsed_filepath,'w')
cp.write(fp)
fp.close()
return cp
def read_ini_file(cpFile):
"""Read a .ini file and return it as a ConfigParser class.
This function does none of the parsing/combining of sections. It simply
reads the file and returns it unedited
Parameters
----------
cpFile : The path to a .ini file to be read in
Returns
-------
cp: The ConfigParser class containing the read in .ini file
"""
# Initialise ConfigParser class
cp = ConfigParser.ConfigParser(\
interpolation=ConfigParser.ExtendedInterpolation())
# Read the file
fp = open(cpFile,'r')
cp.read_file(fp)
fp.close()
return cp
def perform_extended_interpolation(cp,preserve_orig_file=False):
"""Filter through an ini file and replace all examples of
ExtendedInterpolation formatting with the exact value. For values like
${example} this is replaced with the value that corresponds to the option
called example ***in the same section***
For values like ${common|example} this is replaced with the value that
corresponds to the option example in the section [common]. Note that
in the python3 config parser this is ${common:example} but python2.7
interprets the : the same as a = and this breaks things
Nested interpolation is not supported here.
Parameters
----------
cp: ConfigParser object
preserve_orig_file: Boolean, optional
By default the input ConfigParser object will be modified in place. If
this is set deepcopy will be used and the input will be preserved.
Default = False
Returns
-------
cp: parsed ConfigParser object
"""
# Deepcopy the cp object if needed
if preserve_orig_file:
cp = copy.deepcopy(cp)
# Do not allow any interpolation of the section names
for section in cp.sections():
for option,value in cp.items(section):
# Check the option name
newStr = interpolate_string(option,cp,section)
if newStr != option:
cp.set(section,newStr,value)
cp.remove_option(section,option)
# Check the value
newStr = interpolate_string(value,cp,section)
if newStr != value:
cp.set(section,option,newStr)
return cp
def interpolate_string(testString,cp,section):
"""Take a string and replace all example of ExtendedInterpolation formatting
within the string with the exact value.
For values like ${example} this is replaced with the value that corresponds
to the option called example ***in the same section***
For values like ${common|example} this is replaced with the value that
corresponds to the option example in the section [common]. Note that
in the python3 config parser this is ${common:example} but python2.7
interprets the : the same as a = and this breaks things
Nested interpolation is not supported here.
Parameters
----------
testString: String
The string to parse and interpolate
cp: ConfigParser
The ConfigParser object to look for the interpolation strings within
section: String
The current section of the ConfigParser object
Returns
----------
testString: String
Interpolated string
"""
# First check if any interpolation is needed and abort if not
reObj = re.search(r"\$\{.*?\}", testString)
while reObj:
# Not really sure how this works, but this will obtain the first
# instance of a string contained within ${....}
repString = (reObj).group(0)[2:-1]
# Need to test which of the two formats we have
splitString = repString.split('|')
if len(splitString) == 1:
testString = testString.replace('${'+repString+'}',\
cp.get(section,splitString[0]))
if len(splitString) == 2:
testString = testString.replace('${'+repString+'}',\
cp.get(splitString[0],splitString[1]))
reObj = re.search(r"\$\{.*?\}", testString)
return testString
def split_multi_sections(cp,preserve_orig_file=False):
"""Parse through a supplied ConfigParser object and splits any sections
labelled with an "&" sign (for e.g. [inspiral&tmpltbank]) into [inspiral]
and [tmpltbank] sections. If these individual sections already exist they
will be appended to. If an option exists in both the [inspiral] and
[inspiral&tmpltbank] sections an error will be thrown
Parameters
----------
cp: The ConfigParser class
preserve_orig_file: Boolean, optional
By default the input ConfigParser object will be modified in place. If
this is set deepcopy will be used and the input will be preserved.
Default = False
Returns
----------
cp: The ConfigParser class
"""
# Deepcopy the cp object if needed
if preserve_orig_file:
cp = copy.deepcopy(cp)
# Begin by looping over all sections
for section in cp.sections():
# Only continue if section needs splitting
if '&' not in section:
continue
# Get list of section names to add these options to
splitSections = section.split('&')
for newSec in splitSections:
# Add sections if they don't already exist
if not cp.has_section(newSec):
cp.add_section(newSec)
add_options_to_section(cp,newSec,cp.items(section))
cp.remove_section(section)
return cp
def sanity_check_subsections(cp):
"""This function goes through the ConfigParset and checks that any options
given in the [SECTION_NAME] section are not also given in any
[SECTION_NAME-SUBSECTION] sections.
Parameters
----------
cp: The ConfigParser class
Returns
----------
None
"""
# Loop over the sections in the ini file
for section in cp.sections():
# Loop over the sections again
for section2 in cp.sections():
# Check if any are subsections of section
if section2.startswith(section + '-'):
# Check for duplicate options whenever this exists
check_duplicate_options(cp,section,section2,raise_error=True)
def add_options_to_section(cp,section,items,preserve_orig_file=False,\
overwrite_options=False):
"""Add a set of options and values to a section of a ConfigParser object.
Will throw an error if any of the options being added already exist, this
behaviour can be overridden if desired
Parameters
----------
cp: The ConfigParser class
section: string
The name of the section to add options+values to
items: list of tuples
Each tuple contains (at [0]) the option and (at [1]) the value to add
to the section of the ini file
preserve_orig_file: Boolean, optional
By default the input ConfigParser object will be modified in place. If
this is set deepcopy will be used and the input will be preserved.
Default = False
overwrite_options: Boolean, optional
By default this function will throw a ValueError if an option exists in
both the original section in the ConfigParser *and* in the provided
items. This will override so that the options+values given in items
will replace the original values if the value is set to True.
Default = True
Returns
----------
cp: The ConfigParser class
"""
# Sanity checking
if not cp.has_section(section):
raise ValueError('Section %s not present in ConfigParser.' %(section,))
# Deepcopy the cp object if needed
if preserve_orig_file:
cp = copy.deepcopy(cp)
# Check for duplicate options first
for option,value in items:
if not overwrite_options:
if option in cp.options(section):
raise ValueError('Option %s exists in both original' + \
'ConfigParser and input list' %(option,))
cp.set(section,option,value)
return cp
def check_duplicate_options(cp,section1,section2,raise_error=False):
"""Check for duplicate options in two sections, section1 and section2.
Will return True if there are duplicate options and False if not
Parameters
----------
cp: The ConfigParser class
section1: string
The name of the first section to compare
section2: string
The name of the second section to compare
raise_error: Boolean, optional
If True, raise an error if duplicates are present.
Default = False
Returns
----------
duplicate: List
List of duplicate options
"""
# Sanity checking
if not cp.has_section(section1):
raise ValueError('Section %s not present in ConfigParser.'%(section1,))
if not cp.has_section(section2):
raise ValueError('Section %s not present in ConfigParser.'%(section2,))
items1 = cp.items(section1)
items2 = cp.items(section2)
# The list comprehension here creates a list of all duplicate items
duplicates = [x for x in items1 if x in items2]
if duplicates and raise_error:
raise ValueError('The following options appear in both section ' +\
'%s and %s: %s' \
%(section1,section2,duplicates.join(' ')))
return duplicates
| 10,939
| 34.176849
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/versioning.py
|
# Copyright (C) 2023 Gareth Cabourn Davies
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Module to generate/manage the executable used for version information
in workflows
"""
import os
from pycbc.workflow.core import Executable
class VersioningExecutable(Executable):
"""
Executable for getting version information
"""
current_retention_level = Executable.FINAL_RESULT
def make_versioning_page(workflow, config_parser, out_dir, tags=None):
"""
Make executable for versioning information
"""
vers_exe = VersioningExecutable(
workflow.cp,
'page_versioning',
out_dir=out_dir,
ifos=workflow.ifos,
tags=tags,
)
node = vers_exe.create_node()
config_names = []
exes = []
for name, path in config_parser.items('executables'):
exe_to_test = os.path.basename(path)
if exe_to_test in exes:
# executable is already part of the list,
# find which index and add the name to the
# one already stored
path_idx = exes.index(exe_to_test)
name_orig = config_names[path_idx]
config_names[path_idx] = f"{name_orig},{name}"
else:
config_names.append(name)
exes.append(exe_to_test)
node.add_list_opt('--executables', exes)
node.add_list_opt('--executables-names', config_names)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow.add_node(node)
return node
| 2,384
| 32.591549
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/coincidence.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the coincidence stage of pycbc
workflows. For details about this module and its capabilities see here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/coincidence.html
"""
import os
import logging
from ligo import segments
from pycbc.workflow.core import FileList, make_analysis_dir, Executable, Node, File
class PyCBCBank2HDFExecutable(Executable):
"""Converts xml tmpltbank to hdf format"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, bank_file):
node = Node(self)
node.add_input_opt('--bank-file', bank_file)
node.new_output_file_opt(bank_file.segment, '.hdf', '--output-file')
return node
class PyCBCTrig2HDFExecutable(Executable):
"""Converts xml triggers to hdf format, grouped by template hash"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, trig_files, bank_file):
node = Node(self)
node.add_input_opt('--bank-file', bank_file)
node.add_input_list_opt('--trigger-files', trig_files)
node.new_output_file_opt(trig_files[0].segment, '.hdf',
'--output-file', use_tmp_subdirs=True)
return node
class PyCBCFitByTemplateExecutable(Executable):
"""Calculates values that describe the background distribution template by template"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, trig_file, bank_file, veto_file, veto_name):
node = Node(self)
# Executable objects are initialized with ifo information
node.add_opt('--ifo', self.ifo_string)
node.add_input_opt('--trigger-file', trig_file)
node.add_input_opt('--bank-file', bank_file)
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--veto-segment-name', veto_name)
node.new_output_file_opt(trig_file.segment, '.hdf', '--output')
return node
class PyCBCFitOverParamExecutable(Executable):
"""Smooths the background distribution parameters over a continuous parameter"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, raw_fit_file, bank_file):
node = Node(self)
node.add_input_opt('--template-fit-file', raw_fit_file)
node.add_input_opt('--bank-file', bank_file)
node.new_output_file_opt(raw_fit_file.segment, '.hdf', '--output')
return node
class PyCBCFindCoincExecutable(Executable):
"""Find coinc triggers using a folded interval method"""
current_retention_level = Executable.ALL_TRIGGERS
def create_node(self, trig_files, bank_file, stat_files, veto_file,
veto_name, template_str, pivot_ifo, fixed_ifo, tags=None):
if tags is None:
tags = []
segs = trig_files.get_times_covered_by_files()
seg = segments.segment(segs[0][0], segs[-1][1])
node = Node(self)
node.add_input_opt('--template-bank', bank_file)
node.add_input_list_opt('--trigger-files', trig_files)
if len(stat_files) > 0:
node.add_input_list_opt('--statistic-files', stat_files)
if veto_file is not None:
node.add_input_opt('--veto-files', veto_file)
node.add_opt('--segment-name', veto_name)
node.add_opt('--pivot-ifo', pivot_ifo)
node.add_opt('--fixed-ifo', fixed_ifo)
node.add_opt('--template-fraction-range', template_str)
node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
return node
class PyCBCFindSnglsExecutable(Executable):
"""Calculate single-detector ranking statistic for triggers"""
current_retention_level = Executable.ALL_TRIGGERS
file_input_options = ['--statistic-files']
def create_node(self, trig_files, bank_file, stat_files, veto_file,
veto_name, template_str, tags=None):
if tags is None:
tags = []
segs = trig_files.get_times_covered_by_files()
seg = segments.segment(segs[0][0], segs[-1][1])
node = Node(self)
node.add_input_opt('--template-bank', bank_file)
node.add_input_list_opt('--trigger-files', trig_files)
if len(stat_files) > 0:
node.add_input_list_opt('--statistic-files', stat_files)
if veto_file is not None:
node.add_input_opt('--veto-files', veto_file)
node.add_opt('--segment-name', veto_name)
node.add_opt('--template-fraction-range', template_str)
node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
return node
class PyCBCStatMapExecutable(Executable):
"""Calculate FAP, IFAR, etc for coincs"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, coinc_files, ifos, tags=None):
if tags is None:
tags = []
segs = coinc_files.get_times_covered_by_files()
seg = segments.segment(segs[0][0], segs[-1][1])
node = Node(self)
node.add_input_list_opt('--coinc-files', coinc_files)
node.add_opt('--ifos', ifos)
node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
return node
class PyCBCSnglsStatMapExecutable(Executable):
"""Calculate FAP, IFAR, etc for singles"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, sngls_files, ifo, tags=None):
if tags is None:
tags = []
segs = sngls_files.get_times_covered_by_files()
seg = segments.segment(segs[0][0], segs[-1][1])
node = Node(self)
node.add_input_list_opt('--sngls-files', sngls_files)
node.add_opt('--ifos', ifo)
node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
return node
class PyCBCStatMapInjExecutable(Executable):
"""Calculate FAP, IFAR, etc for coincs for injections"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, zerolag, full_data,
injfull, fullinj, ifos, tags=None):
if tags is None:
tags = []
segs = zerolag.get_times_covered_by_files()
seg = segments.segment(segs[0][0], segs[-1][1])
node = Node(self)
node.add_input_list_opt('--zero-lag-coincs', zerolag)
if isinstance(full_data, list):
node.add_input_list_opt('--full-data-background', full_data)
else:
node.add_input_opt('--full-data-background', full_data)
node.add_input_list_opt('--mixed-coincs-inj-full', injfull)
node.add_input_list_opt('--mixed-coincs-full-inj', fullinj)
node.add_opt('--ifos', ifos)
node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
return node
class PyCBCSnglsStatMapInjExecutable(Executable):
"""Calculate FAP, IFAR, etc for singles for injections"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, sngls_files, background_file,
ifos, tags=None):
if tags is None:
tags = []
segs = sngls_files.get_times_covered_by_files()
seg = segments.segment(segs[0][0], segs[-1][1])
node = Node(self)
node.add_input_list_opt('--sngls-files', sngls_files)
node.add_input_opt('--full-data-background', background_file)
node.add_opt('--ifos', ifos)
node.new_output_file_opt(seg, '.hdf', '--output-file', tags=tags)
return node
class PyCBCHDFInjFindExecutable(Executable):
"""Find injections in the hdf files output"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, inj_coinc_file, inj_xml_file, veto_file, veto_name, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_list_opt('--trigger-file', inj_coinc_file)
node.add_input_list_opt('--injection-file', inj_xml_file)
if veto_name is not None:
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--segment-name', veto_name)
node.new_output_file_opt(inj_xml_file[0].segment, '.hdf',
'--output-file', tags=tags)
return node
class PyCBCDistributeBackgroundBins(Executable):
"""Distribute coinc files among different background bins"""
current_retention_level = Executable.ALL_TRIGGERS
def create_node(self, coinc_files, bank_file, background_bins, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_list_opt('--coinc-files', coinc_files)
node.add_input_opt('--bank-file', bank_file)
node.add_opt('--background-bins', ' '.join(background_bins))
names = [b.split(':')[0] for b in background_bins]
output_files = [File(coinc_files[0].ifo_list,
self.name,
coinc_files[0].segment,
directory=self.out_dir,
tags = tags + ['mbin-%s' % i],
extension='.hdf') for i in range(len(background_bins))]
node.add_output_list_opt('--output-files', output_files)
node.names = names
return node
class PyCBCCombineStatmap(Executable):
"""Combine coincs over different bins and apply trials factor"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, statmap_files, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_list_opt('--statmap-files', statmap_files)
node.new_output_file_opt(statmap_files[0].segment, '.hdf',
'--output-file', tags=tags)
return node
class PyCBCAddStatmap(PyCBCCombineStatmap):
"""Combine statmap files and add FARs over different coinc types"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, statmap_files, background_files, tags=None):
if tags is None:
tags = []
node = super(PyCBCAddStatmap, self).create_node(statmap_files,
tags=tags)
# Enforce upper case
ctags = [t.upper() for t in (tags + self.tags)]
if 'INJECTIONS' in ctags:
node.add_input_list_opt('--background-files', background_files)
return node
class PyCBCExcludeZerolag(Executable):
""" Remove times of zerolag coincidences of all types from exclusive
background """
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, statmap_file, other_statmap_files, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_opt('--statmap-file', statmap_file)
node.add_input_list_opt('--other-statmap-files',
other_statmap_files)
node.new_output_file_opt(statmap_file.segment, '.hdf',
'--output-file', tags=None)
return node
class MergeExecutable(Executable):
current_retention_level = Executable.MERGED_TRIGGERS
class CensorForeground(Executable):
current_retention_level = Executable.MERGED_TRIGGERS
def make_foreground_censored_veto(workflow, bg_file, veto_file, veto_name,
censored_name, out_dir, tags=None):
tags = [] if tags is None else tags
node = CensorForeground(workflow.cp, 'foreground_censor', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--foreground-triggers', bg_file)
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--segment-name', veto_name)
node.add_opt('--output-segment-name', censored_name)
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0]
def merge_single_detector_hdf_files(workflow, bank_file, trigger_files, out_dir, tags=None):
if tags is None:
tags = []
make_analysis_dir(out_dir)
out = FileList()
for ifo in workflow.ifos:
node = MergeExecutable(workflow.cp, 'hdf_trigger_merge',
ifos=ifo, out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--bank-file', bank_file)
node.add_input_list_opt('--trigger-files', trigger_files.find_output_with_ifo(ifo))
node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file')
workflow += node
out += node.output_files
return out
def setup_trigger_fitting(workflow, insps, hdfbank, veto_file, veto_name,
output_dir=None, tags=None):
if not workflow.cp.has_option('workflow-coincidence', 'do-trigger-fitting'):
return FileList()
else:
smoothed_fit_files = FileList()
for i in workflow.ifos:
ifo_insp = [insp for insp in insps if (insp.ifo == i)]
assert len(ifo_insp)==1
ifo_insp = ifo_insp[0]
raw_exe = PyCBCFitByTemplateExecutable(workflow.cp,
'fit_by_template', ifos=i,
out_dir=output_dir,
tags=tags)
raw_node = raw_exe.create_node(ifo_insp, hdfbank,
veto_file, veto_name)
workflow += raw_node
smooth_exe = PyCBCFitOverParamExecutable(workflow.cp,
'fit_over_param', ifos=i,
out_dir=output_dir,
tags=tags)
smooth_node = smooth_exe.create_node(raw_node.output_file,
hdfbank)
workflow += smooth_node
smoothed_fit_files += smooth_node.output_files
return smoothed_fit_files
def find_injections_in_hdf_coinc(workflow, inj_coinc_file, inj_xml_file,
veto_file, veto_name, out_dir, tags=None):
if tags is None:
tags = []
make_analysis_dir(out_dir)
exe = PyCBCHDFInjFindExecutable(workflow.cp, 'hdfinjfind',
ifos=workflow.ifos,
out_dir=out_dir, tags=tags)
node = exe.create_node(inj_coinc_file, inj_xml_file, veto_file, veto_name)
workflow += node
return node.output_files[0]
def convert_bank_to_hdf(workflow, xmlbank, out_dir, tags=None):
"""Return the template bank in hdf format"""
if tags is None:
tags = []
#FIXME, make me not needed
if len(xmlbank) > 1:
raise ValueError('Can only convert a single template bank')
logging.info('convert template bank to HDF')
make_analysis_dir(out_dir)
bank2hdf_exe = PyCBCBank2HDFExecutable(workflow.cp, 'bank2hdf',
ifos=workflow.ifos,
out_dir=out_dir, tags=tags)
bank2hdf_node = bank2hdf_exe.create_node(xmlbank[0])
workflow.add_node(bank2hdf_node)
return bank2hdf_node.output_files
def convert_trig_to_hdf(workflow, hdfbank, xml_trigger_files, out_dir, tags=None):
"""Return the list of hdf5 trigger files outputs"""
if tags is None:
tags = []
#FIXME, make me not needed
logging.info('convert single inspiral trigger files to hdf5')
make_analysis_dir(out_dir)
trig_files = FileList()
for ifo, insp_group in zip(*xml_trigger_files.categorize_by_attr('ifo')):
trig2hdf_exe = PyCBCTrig2HDFExecutable(workflow.cp, 'trig2hdf',
ifos=ifo, out_dir=out_dir, tags=tags)
_, insp_bundles = insp_group.categorize_by_attr('segment')
for insps in insp_bundles:
trig2hdf_node = trig2hdf_exe.create_node(insps, hdfbank[0])
workflow.add_node(trig2hdf_node)
trig_files += trig2hdf_node.output_files
return trig_files
def setup_statmap(workflow, ifos, coinc_files, out_dir, tags=None):
tags = [] if tags is None else tags
statmap_exe = PyCBCStatMapExecutable(workflow.cp, 'statmap',
ifos=ifos,
tags=tags, out_dir=out_dir)
ifolist = ' '.join(ifos)
stat_node = statmap_exe.create_node(coinc_files, ifolist)
workflow.add_node(stat_node)
return stat_node.output_file
def setup_sngls_statmap(workflow, ifo, sngls_files, out_dir, tags=None):
tags = [] if tags is None else tags
statmap_exe = PyCBCSnglsStatMapExecutable(workflow.cp, 'sngls_statmap',
ifos=ifo,
tags=tags, out_dir=out_dir)
stat_node = statmap_exe.create_node(sngls_files, ifo)
workflow.add_node(stat_node)
return stat_node.output_file
def setup_statmap_inj(workflow, ifos, coinc_files, background_file,
out_dir, tags=None):
tags = [] if tags is None else tags
statmap_exe = PyCBCStatMapInjExecutable(workflow.cp,
'statmap_inj',
ifos=ifos,
tags=tags, out_dir=out_dir)
ifolist = ' '.join(ifos)
stat_node = statmap_exe.create_node(FileList(coinc_files['injinj']),
background_file,
FileList(coinc_files['injfull']),
FileList(coinc_files['fullinj']),
ifolist)
workflow.add_node(stat_node)
return stat_node.output_files[0]
def setup_sngls_statmap_inj(workflow, ifo, sngls_inj_files, background_file,
out_dir, tags=None):
tags = [] if tags is None else tags
statmap_exe = PyCBCSnglsStatMapInjExecutable(workflow.cp,
'sngls_statmap_inj',
ifos=ifo,
tags=tags,
out_dir=out_dir)
stat_node = statmap_exe.create_node(sngls_inj_files,
background_file,
ifo)
workflow.add_node(stat_node)
return stat_node.output_files[0]
def setup_interval_coinc_inj(workflow, hdfbank, full_data_trig_files,
inj_trig_files, stat_files,
background_file, veto_file, veto_name,
out_dir, pivot_ifo, fixed_ifo, tags=None):
"""
This function sets up exact match coincidence for injections
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up coincidence for injections')
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence',
'parallelization-factor', tags))
ffiles = {}
ifiles = {}
for ifo, ffi in zip(*full_data_trig_files.categorize_by_attr('ifo')):
ffiles[ifo] = ffi[0]
for ifo, ifi in zip(*inj_trig_files.categorize_by_attr('ifo')):
ifiles[ifo] = ifi[0]
injinj_files = FileList()
injfull_files = FileList()
fullinj_files = FileList()
# For the injfull and fullinj separation we take the pivot_ifo on one side,
# and the rest that are attached to the fixed_ifo on the other side
for ifo in ifiles: # ifiles is keyed on ifo
if ifo == pivot_ifo:
injinj_files.append(ifiles[ifo])
injfull_files.append(ifiles[ifo])
fullinj_files.append(ffiles[ifo])
else:
injinj_files.append(ifiles[ifo])
injfull_files.append(ffiles[ifo])
fullinj_files.append(ifiles[ifo])
combo = [(injinj_files, "injinj"),
(injfull_files, "injfull"),
(fullinj_files, "fullinj"),
]
bg_files = {'injinj':[], 'injfull':[], 'fullinj':[]}
for trig_files, ctag in combo:
findcoinc_exe = PyCBCFindCoincExecutable(workflow.cp,
'coinc',
ifos=ifiles.keys(),
tags=tags + [ctag],
out_dir=out_dir)
for i in range(factor):
group_str = '%s/%s' % (i, factor)
coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
pivot_ifo,
fixed_ifo,
tags=['JOB'+str(i)])
bg_files[ctag] += coinc_node.output_files
workflow.add_node(coinc_node)
logging.info('...leaving coincidence for injections')
return setup_statmap_inj(workflow, ifiles.keys(), bg_files,
background_file, out_dir,
tags=tags + [veto_name])
def setup_interval_coinc(workflow, hdfbank, trig_files, stat_files,
veto_file, veto_name, out_dir, pivot_ifo,
fixed_ifo, tags=None):
"""
This function sets up exact match coincidence
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up coincidence')
ifos, _ = trig_files.categorize_by_attr('ifo')
findcoinc_exe = PyCBCFindCoincExecutable(workflow.cp, 'coinc',
ifos=ifos,
tags=tags, out_dir=out_dir)
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence',
'parallelization-factor',
[findcoinc_exe.ifo_string] + tags))
statmap_files = []
bg_files = FileList()
for i in range(factor):
group_str = '%s/%s' % (i, factor)
coinc_node = findcoinc_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
pivot_ifo,
fixed_ifo,
tags=['JOB'+str(i)])
bg_files += coinc_node.output_files
workflow.add_node(coinc_node)
statmap_files = setup_statmap(workflow, ifos, bg_files,
out_dir, tags=tags)
logging.info('...leaving coincidence ')
return statmap_files
def setup_sngls(workflow, hdfbank, trig_files, stat_files,
veto_file, veto_name, out_dir, tags=None):
"""
This function sets up getting statistic values for single-detector triggers
"""
ifos, _ = trig_files.categorize_by_attr('ifo')
findsngls_exe = PyCBCFindSnglsExecutable(workflow.cp, 'sngls', ifos=ifos,
tags=tags, out_dir=out_dir)
# Wall time knob and memory knob
factor = int(workflow.cp.get_opt_tags('workflow-coincidence',
'parallelization-factor',
[findsngls_exe.ifo_string] + tags))
statmap_files = []
bg_files = FileList()
for i in range(factor):
group_str = '%s/%s' % (i, factor)
sngls_node = findsngls_exe.create_node(trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
tags=['JOB'+str(i)])
bg_files += sngls_node.output_files
workflow.add_node(sngls_node)
statmap_files = setup_sngls_statmap(workflow, ifos[0], bg_files,
out_dir, tags=tags)
logging.info('...leaving coincidence ')
return statmap_files
def setup_sngls_inj(workflow, hdfbank, inj_trig_files,
stat_files, background_file, veto_file, veto_name,
out_dir, tags=None):
"""
This function sets up getting statistic values for single-detector triggers
from injections
"""
ifos, _ = inj_trig_files.categorize_by_attr('ifo')
findsnglsinj_exe = PyCBCFindSnglsExecutable(workflow.cp, 'sngls', ifos=ifos,
tags=tags, out_dir=out_dir)
# Wall time knob and memory knob
exe_str_tags = [findsnglsinj_exe.ifo_string] + tags
factor = int(workflow.cp.get_opt_tags('workflow-coincidence',
'parallelization-factor',
exe_str_tags))
statmap_files = []
bg_files = FileList()
for i in range(factor):
group_str = '%s/%s' % (i, factor)
sngls_node = findsnglsinj_exe.create_node(inj_trig_files, hdfbank,
stat_files,
veto_file, veto_name,
group_str,
tags=['JOB'+str(i)])
bg_files += sngls_node.output_files
workflow.add_node(sngls_node)
statmap_files = setup_sngls_statmap_inj(workflow, ifos[0], bg_files,
background_file,
out_dir, tags=tags)
logging.info('...leaving coincidence ')
return statmap_files
def select_files_by_ifo_combination(ifocomb, insps):
"""
This function selects single-detector files ('insps') for a given ifo combination
"""
inspcomb = FileList()
for ifo, ifile in zip(*insps.categorize_by_attr('ifo')):
if ifo in ifocomb:
inspcomb += ifile
return inspcomb
def get_ordered_ifo_list(ifocomb, ifo_ids):
"""
This function sorts the combination of ifos (ifocomb) based on the given
precedence list (ifo_ids dictionary) and returns the first ifo as pivot
the second ifo as fixed, and the ordered list joined as a string.
"""
if len(ifocomb) == 1:
# Single-detector combinations don't have fixed/pivot IFOs
return None, None, ifocomb[0]
# combination_prec stores precedence info for the detectors in the combination
combination_prec = {ifo: ifo_ids[ifo] for ifo in ifocomb}
ordered_ifo_list = sorted(combination_prec, key = combination_prec.get)
pivot_ifo = ordered_ifo_list[0]
fixed_ifo = ordered_ifo_list[1]
return pivot_ifo, fixed_ifo, ''.join(ordered_ifo_list)
def setup_combine_statmap(workflow, final_bg_file_list, bg_file_list,
out_dir, tags=None):
"""
Combine the statmap files into one background file
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up combine statmap')
cstat_exe_name = os.path.basename(workflow.cp.get("executables",
"combine_statmap"))
if cstat_exe_name == 'pycbc_combine_statmap':
cstat_class = PyCBCCombineStatmap
elif cstat_exe_name == 'pycbc_add_statmap':
cstat_class = PyCBCAddStatmap
else:
raise NotImplementedError('executable should be '
'pycbc_combine_statmap or pycbc_add_statmap')
cstat_exe = cstat_class(workflow.cp, 'combine_statmap', ifos=workflow.ifos,
tags=tags, out_dir=out_dir)
if cstat_exe_name == 'pycbc_combine_statmap':
combine_statmap_node = cstat_exe.create_node(final_bg_file_list)
elif cstat_exe_name == 'pycbc_add_statmap':
combine_statmap_node = cstat_exe.create_node(final_bg_file_list,
bg_file_list)
workflow.add_node(combine_statmap_node)
return combine_statmap_node.output_file
def setup_exclude_zerolag(workflow, statmap_file, other_statmap_files,
out_dir, ifos, tags=None):
"""
Exclude single triggers close to zerolag triggers from forming any
background events
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
logging.info('Setting up exclude zerolag')
exc_zerolag_exe = PyCBCExcludeZerolag(workflow.cp, 'exclude_zerolag',
ifos=ifos, tags=tags,
out_dir=out_dir)
exc_zerolag_node = exc_zerolag_exe.create_node(statmap_file,
other_statmap_files,
tags=None)
workflow.add_node(exc_zerolag_node)
return exc_zerolag_node.output_file
def rerank_coinc_followup(workflow, statmap_file, bank_file, out_dir,
tags=None,
injection_file=None,
ranking_file=None):
if tags is None:
tags = []
make_analysis_dir(out_dir)
if not workflow.cp.has_section("workflow-rerank"):
logging.info("No reranking done in this workflow")
return statmap_file
else:
logging.info("Setting up reranking of candidates")
# Generate reduced data files (maybe this could also be used elsewhere?)
stores = FileList([])
for ifo in workflow.ifos:
make_analysis_dir('strain_files')
node = Executable(workflow.cp, 'strain_data_reduce', ifos=[ifo],
out_dir='strain_files', tags=tags).create_node()
node.add_opt('--gps-start-time', workflow.analysis_time[0])
node.add_opt('--gps-end-time', workflow.analysis_time[1])
if injection_file:
node.add_input_opt('--injection-file', injection_file)
fil = node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file')
stores.append(fil)
workflow += node
# Generate trigger input file
node = Executable(workflow.cp, 'rerank_trigger_input', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--statmap-file', statmap_file)
node.add_input_opt('--bank-file', bank_file)
trigfil = node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file')
workflow += node
# Parallelize coinc trigger followup
factor = int(workflow.cp.get_opt_tags("workflow-rerank",
"parallelization-factor", tags))
exe = Executable(workflow.cp, 'coinc_followup', ifos=workflow.ifos,
out_dir=out_dir, tags=tags)
stat_files = FileList([])
for i in range(factor):
node = exe.create_node()
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file', tags=[str(i)])
node.add_multiifo_input_list_opt('--hdf-store', stores)
node.add_input_opt('--input-file', trigfil)
node.add_opt('--start-index', str(i))
node.add_opt('--stride', factor)
workflow += node
stat_files += node.output_files
exe = Executable(workflow.cp, 'rerank_coincs', ifos=workflow.ifos,
out_dir=out_dir, tags=tags)
node = exe.create_node()
node.add_input_list_opt('--stat-files', stat_files)
node.add_input_opt('--statmap-file', statmap_file)
node.add_input_opt('--followup-file', trigfil)
if ranking_file:
node.add_input_opt('--ranking-file', ranking_file)
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file')
workflow += node
return node.output_file
| 33,288
| 39.695599
| 92
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/inference_followups.py
|
# Copyright (C) 2016 Christopher M. Biwer, Alexander Harvey Nitz, Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Module that contains functions for setting up the inference workflow.
"""
from pycbc.workflow.core import (Executable, makedir)
from pycbc.workflow.plotting import PlotExecutable
from pycbc.results import layout
def make_inference_plot(workflow, input_file, output_dir,
name, analysis_seg=None,
tags=None, input_file_opt='input-file',
output_file_extension='.png',
add_to_workflow=False):
"""Boiler-plate function for creating a standard plotting job.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
input_file: (list of) pycbc.workflow.File
The file used for the input. May provide either a single file or a
list of files.
output_dir: str
The directory to store result plots.
name: str
The name in the [executables] section of the configuration file
to use.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
input_file_opt : str, optional
The name of the input-file option used by the executable. Default
is ``input-file``.
output_file_extension : str, optional
What file type to create. Default is ``.png``.
add_to_workflow : bool, optional
If True, the node will be added to the workflow before being returned.
**This means that no options may be added to the node afterward.**
Default is ``False``.
Returns
-------
pycbc.workflow.plotting.PlotExecutable
The job node for creating the plot.
"""
# default values
if tags is None:
tags = []
if analysis_seg is None:
analysis_seg = workflow.analysis_time
# make the directory that will contain the output files
makedir(output_dir)
# Catch if a parameters option was specified:
# we need to do this because PlotExecutable will automatically add any
# option in the section to the node. However, we need to add the
# appropriate escapes to the parameters option so pegasus will render it
# properly (see _params_for_pegasus for details).
parameters = None
if workflow.cp.has_option(name, 'parameters'):
parameters = workflow.cp.get(name, 'parameters')
workflow.cp.remove_option(name, 'parameters')
# make a node for plotting the posterior as a corner plot
node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,
out_dir=output_dir,
tags=tags).create_node()
# add back the parameters option if it was specified
if parameters is not None:
node.add_opt("--parameters", _params_for_pegasus(parameters))
# and put the opt back in the config file in memory
workflow.cp.set(name, 'parameters', parameters)
# add input and output options
if isinstance(input_file, list):
# list of input files are given, use input_list_opt
node.add_input_list_opt("--{}".format(input_file_opt), input_file)
else:
# assume just a single file
node.add_input_opt("--{}".format(input_file_opt), input_file)
node.new_output_file_opt(analysis_seg, output_file_extension,
"--output-file")
# add node to workflow
if add_to_workflow:
workflow += node
return node
def make_inference_prior_plot(workflow, config_file, output_dir,
name="plot_prior",
analysis_seg=None, tags=None):
"""Sets up the corner plot of the priors in the workflow.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
config_file: pycbc.workflow.File
The WorkflowConfigParser parasable inference configuration file..
output_dir: str
The directory to store result plots and files.
name: str
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_prior``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of the output files.
"""
node = make_inference_plot(workflow, config_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
input_file_opt='config-file',
add_to_workflow=True)
return node.output_files
def create_posterior_files(workflow, samples_files, output_dir,
parameters=None, name="extract_posterior",
analysis_seg=None, tags=None):
"""Sets up job to create posterior files from some given samples files.
Parameters
----------
workflow: pycbc.workflow.Workflow
The workflow instance we are populating
samples_files : str or list of str
One or more files to extract the posterior samples from.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``extract_posterior``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
if analysis_seg is None:
analysis_seg = workflow.analysis_time
if tags is None:
tags = []
# Catch if a parameters option was specified:
# we need to do this because Executable will automatically add any
# option in the section to the node. However, we need to add the
# appropriate escapes to the parameters option so pegasus will render it
# properly (see _params_for_pegasus for details).
parameters = None
if workflow.cp.has_option(name, 'parameters'):
parameters = workflow.cp.get(name, 'parameters')
workflow.cp.remove_option(name, 'parameters')
extract_posterior_exe = Executable(workflow.cp, name,
ifos=workflow.ifos,
out_dir=output_dir)
node = extract_posterior_exe.create_node()
# add back the parameters option if it was specified
if parameters is not None:
node.add_opt("--parameters", _params_for_pegasus(parameters))
# and put the opt back in the config file in memory
workflow.cp.set(name, 'parameters', parameters)
if not isinstance(samples_files, list):
samples_files = [samples_files]
node.add_input_list_opt("--input-file", samples_files)
node.new_output_file_opt(analysis_seg, ".hdf", "--output-file", tags=tags)
# add node to workflow
workflow += node
return node.output_files
def create_fits_file(workflow, inference_file, output_dir,
name="create_fits_file",
analysis_seg=None, tags=None):
"""Sets up job to create fits files from some given samples files.
Parameters
----------
workflow: pycbc.workflow.Workflow
The workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``create_fits_file``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
if analysis_seg is None:
analysis_seg = workflow.analysis_time
if tags is None:
tags = []
create_fits_exe = Executable(workflow.cp, name,
ifos=workflow.ifos,
out_dir=output_dir)
node = create_fits_exe.create_node()
node.add_input_opt("--input-file", inference_file)
node.new_output_file_opt(analysis_seg, ".fits", "--output-file", tags=tags)
# add node to workflow
workflow += node
return node.output_files
def make_inference_skymap(workflow, fits_file, output_dir,
name="plot_skymap", analysis_seg=None,
tags=None):
"""Sets up the skymap plot.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
fits_file: pycbc.workflow.File
The fits file with the sky location.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_skymap``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of result and output files.
"""
node = make_inference_plot(workflow, fits_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=True)
return node.output_files
def make_inference_summary_table(workflow, inference_file, output_dir,
parameters=None, print_metadata=None,
name="table_summary",
analysis_seg=None, tags=None):
"""Sets up the html table summarizing parameter estimates.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
parameters : list or str
A list or string of parameters to generate the table for. If a string
is provided, separate parameters should be space or new-line separated.
print_metadata : list or str
A list or string of metadata parameters to print. Syntax is the same
as for ``parameters``.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``table_summary``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
# we'll use make_inference_plot even though this isn't a plot; the
# setup is the same, we just change the file extension
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
output_file_extension='.html',
add_to_workflow=False)
# now add the parameters and print metadata options; these are pulled
# from separate sections in the workflow config file, which is why we
# add them separately here
if parameters is not None:
node.add_opt("--parameters", _params_for_pegasus(parameters))
if print_metadata is not None:
node.add_opt("--print-metadata", _params_for_pegasus(print_metadata))
workflow += node
return node.output_files
def make_inference_posterior_plot(workflow, inference_file, output_dir,
parameters=None, plot_prior_from_file=None,
name="plot_posterior",
analysis_seg=None, tags=None):
"""Sets up the corner plot of the posteriors in the workflow.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
parameters : list or str
The parameters to plot.
plot_prior_from_file : str, optional
Plot the prior from the given config file on the 1D marginal plots.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_posterior``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
# create the node, but delay adding it to the workflow so we can add
# the prior file if it is requested
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=False)
if parameters is not None:
node.add_opt("--parameters", _params_for_pegasus(parameters))
if plot_prior_from_file is not None:
node.add_input_opt('--plot-prior', plot_prior_from_file)
# now add the node to workflow
workflow += node
return node.output_files
def make_inference_samples_plot(workflow, inference_file, output_dir,
name="plot_samples",
analysis_seg=None, tags=None):
"""Sets up a plot of the samples versus iteration (for MCMC samplers).
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_samples``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=True)
return node.output_files
def make_inference_acceptance_rate_plot(workflow, inference_file, output_dir,
name="plot_acceptance_rate",
analysis_seg=None, tags=None):
"""Sets up a plot of the acceptance rate (for MCMC samplers).
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_acceptance_rate``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=True)
return node.output_files
def make_inference_plot_mcmc_history(workflow, inference_file, output_dir,
name="plot_mcmc_history",
analysis_seg=None, tags=None):
"""Sets up a plot showing the checkpoint history of an MCMC sampler.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_mcmc_history``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=True)
return node.output_files
def make_inference_dynesty_run_plot(workflow, inference_file, output_dir,
name="plot_dynesty_run",
analysis_seg=None, tags=None):
"""Sets up a debugging plot for the dynesty run (for Dynesty sampler).
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_dynesty_run``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=True)
return node.output_files
def make_inference_dynesty_trace_plot(workflow, inference_file, output_dir,
name="plot_dynesty_traceplot",
analysis_seg=None, tags=None):
"""Sets up a trace plot for the dynesty run (for Dynesty sampler).
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_file: pycbc.workflow.File
The file with posterior samples.
output_dir: str
The directory to store result plots and files.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_dynesty_traceplot``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
node = make_inference_plot(workflow, inference_file, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=True)
return node.output_files
def make_inference_pp_table(workflow, posterior_files, output_dir,
parameters=None, injection_samples_map=None,
name="pp_table_summary",
analysis_seg=None, tags=None):
"""Performs a PP, writing results to an html table.
Parameters
----------
workflow : pycbc.workflow.Workflow
The core workflow instance we are populating
posterior_files : pycbc.workflow.core.FileList
List of files with posteriors of injections.
output_dir : str
The directory to store result plots and files.
parameters : list or str, optional
A list or string of parameters to generate the table for. If a string
is provided, separate parameters should be space or new-line separated.
injection_samples_map : (list of) str, optional
Map between injection parameters and parameters in the posterior file.
Format is ``INJECTION_PARAM:SAMPLES_PARAM``.
name : str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``table_summary``.
analysis_segs : ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags : list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
# we'll use make_inference_plot even though this isn't a plot; the
# setup is the same, we just change the file extension
node = make_inference_plot(workflow, posterior_files, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
output_file_extension='.html',
add_to_workflow=False)
# add the parameters and inj/samples map
if parameters is not None:
node.add_opt("--parameters", _params_for_pegasus(parameters))
if injection_samples_map is not None:
node.add_opt("--injection-samples-map",
_params_for_pegasus(injection_samples_map))
workflow += node
return node.output_files
def make_inference_pp_plot(workflow, posterior_files, output_dir,
parameters=None, injection_samples_map=None,
name="plot_pp",
analysis_seg=None, tags=None):
"""Sets up a pp plot in the workflow.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
posterior_files: pycbc.workflow.core.FileList
List of files with posteriors of injections.
output_dir: str
The directory to store result plots and files.
parameters : list or str, optional
The parameters to plot.
injection_samples_map : (list of) str, optional
Map between injection parameters and parameters in the posterior file.
Format is ``INJECTION_PARAM:SAMPLES_PARAM``.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``plot_pp``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
node = make_inference_plot(workflow, posterior_files, output_dir,
name, analysis_seg=analysis_seg, tags=tags,
add_to_workflow=False)
# add the parameters and inj/samples map
if parameters is not None:
node.add_opt("--parameters", _params_for_pegasus(parameters))
if injection_samples_map is not None:
node.add_opt("--injection-samples-map",
_params_for_pegasus(injection_samples_map))
# now add the node to workflow
workflow += node
return node.output_files
def make_inference_inj_recovery_plot(workflow, posterior_files, output_dir,
parameter, injection_samples_map=None,
name="inj_recovery",
analysis_seg=None, tags=None):
"""Sets up the recovered versus injected parameter plot in the workflow.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
inference_files: pycbc.workflow.core.FileList
List of files with posteriors of injections.
output_dir: str
The directory to store result plots and files.
parameter : str
The parameter to plot.
injection_samples_map : (list of) str, optional
Map between injection parameters and parameters in the posterior file.
Format is ``INJECTION_PARAM:SAMPLES_PARAM``.
name: str, optional
The name in the [executables] section of the configuration file
to use, and the section to read for additional arguments to pass to
the executable. Default is ``inj_recovery``.
analysis_segs: ligo.segments.Segment, optional
The segment this job encompasses. If None then use the total analysis
time from the workflow.
tags: list, optional
Tags to add to the inference executables.
Returns
-------
pycbc.workflow.FileList
A list of output files.
"""
# arguments are the same as plot_pp, so just call that with the
# different executable name
return make_inference_pp_plot(
workflow, posterior_files, output_dir, parameters=parameter,
injection_samples_map=injection_samples_map,
name=name, analysis_seg=analysis_seg, tags=tags)
def get_plot_group(cp, section_tag):
"""Gets plotting groups from ``[workflow-section_tag]``."""
group_prefix = "plot-group-"
# parameters for the summary plots
plot_groups = {}
opts = [opt for opt in cp.options("workflow-{}".format(section_tag))
if opt.startswith(group_prefix)]
for opt in opts:
group = opt.replace(group_prefix, "").replace("-", "_")
plot_groups[group] = cp.get_opt_tag("workflow", opt, section_tag)
return plot_groups
def get_diagnostic_plots(workflow):
"""Determines what diagnostic plots to create based on workflow.
The plots to create are based on what executable's are specified in the
workflow's config file. A list of strings is returned giving the diagnostic
plots to create. This list may contain:
* ``samples``: For MCMC samplers, a plot of the sample chains as a function
of iteration. This will be created if ``plot_samples`` is in the
executables section.
* ``acceptance_rate``: For MCMC samplers, a plot of the acceptance rate.
This will be created if ``plot_acceptance_rate`` is in the executables
section.
Returns
-------
list :
List of names of diagnostic plots.
"""
diagnostics = []
if "plot_samples" in workflow.cp.options("executables"):
diagnostics.append('samples')
if "plot_acceptance_rate" in workflow.cp.options("executables"):
diagnostics.append('acceptance_rate')
if "plot_mcmc_history" in workflow.cp.options("executables"):
diagnostics.append('mcmc_history')
if "plot_dynesty_run" in workflow.cp.options("executables"):
diagnostics.append('dynesty_run')
if "plot_dynesty_traceplot" in workflow.cp.options("executables"):
diagnostics.append('dynesty_traceplot')
return diagnostics
def make_diagnostic_plots(workflow, diagnostics, samples_file, label, rdir,
tags=None):
"""Makes diagnostic plots.
Diagnostic plots are sampler-specific plots the provide information on
how the sampler performed. All diagnostic plots use the output file
produced by ``pycbc_inference`` as their input. Diagnostic plots are added
to the results directory ``rdir/NAME`` where ``NAME`` is the name of the
diagnostic given in ``diagnostics``.
Parameters
----------
workflow : pycbc.workflow.core.Workflow
The workflow to add the plotting jobs to.
diagnostics : list of str
The names of the diagnostic plots to create. See
:py:func:`get_diagnostic_plots` for recognized names.
samples_file : (list of) pycbc.workflow.File
One or more samples files with which to create the diagnostic plots.
If a list of files is provided, a diagnostic plot for each file will
be created.
label : str
Event label for the diagnostic plots.
rdir : pycbc.results.layout.SectionNumber
Results directory layout.
tags : list of str, optional
Additional tags to add to the file names.
Returns
-------
dict :
Dictionary of diagnostic name -> list of files giving the plots that
will be created.
"""
if tags is None:
tags = []
out = {}
if not isinstance(samples_file, list):
samples_file = [samples_file]
if 'samples' in diagnostics:
# files for samples summary subsection
base = "samples/{}".format(label)
samples_plots = []
for kk, sf in enumerate(samples_file):
samples_plots += make_inference_samples_plot(
workflow, sf, rdir[base],
analysis_seg=workflow.analysis_time,
tags=tags+[label, str(kk)])
out['samples'] = samples_plots
layout.group_layout(rdir[base], samples_plots)
if 'acceptance_rate' in diagnostics:
# files for samples acceptance_rate subsection
base = "acceptance_rate/{}".format(label)
acceptance_plots = []
for kk, sf in enumerate(samples_file):
acceptance_plots += make_inference_acceptance_rate_plot(
workflow, sf, rdir[base],
analysis_seg=workflow.analysis_time,
tags=tags+[label, str(kk)])
out['acceptance_rate'] = acceptance_plots
layout.single_layout(rdir[base], acceptance_plots)
if 'mcmc_history' in diagnostics:
# files for samples mcmc history subsection
base = "mcmc_history/{}".format(label)
history_plots = []
for kk, sf in enumerate(samples_file):
history_plots += make_inference_plot_mcmc_history(
workflow, sf, rdir[base],
analysis_seg=workflow.analysis_time,
tags=tags+[label, str(kk)])
out['mcmc_history'] = history_plots
layout.single_layout(rdir[base], history_plots)
if 'dynesty_run' in diagnostics:
# files for dynesty run subsection
base = "dynesty_run/{}".format(label)
dynesty_run_plots = []
for kk, sf in enumerate(samples_file):
dynesty_run_plots += make_inference_dynesty_run_plot(
workflow, sf, rdir[base],
analysis_seg=workflow.analysis_time,
tags=tags+[label, str(kk)])
out['dynesty_run'] = dynesty_run_plots
layout.single_layout(rdir[base], dynesty_run_plots)
if 'dynesty_traceplot' in diagnostics:
# files for samples dynesty tyrace plots subsection
base = "dynesty_traceplot/{}".format(label)
dynesty_trace_plots = []
for kk, sf in enumerate(samples_file):
dynesty_trace_plots += make_inference_dynesty_trace_plot(
workflow, sf, rdir[base],
analysis_seg=workflow.analysis_time,
tags=tags+[label, str(kk)])
out['dynesty_traceplot'] = dynesty_trace_plots
layout.single_layout(rdir[base], dynesty_trace_plots)
return out
def make_posterior_workflow(workflow, samples_files, config_file, label,
rdir, posterior_file_dir='posterior_files',
tags=None):
"""Adds jobs to a workflow that make a posterior file and subsequent plots.
A posterior file is first created from the given samples file(s). The
settings for extracting the posterior are set by the
``[extract_posterior]`` section. If that section has a ``parameters``
argument, then the parameters in the posterior file (and for use in all
subsequent plotting) will be whatever that option is set to. Otherwise,
the parameters in the posterior file will be whatever is common to
all of the given samples file(s).
Except for prior plots (which use the given inference config file), all
subsequent jobs use the posterior file. The following are created:
* **Summary table**: an html table created using the ``table_summary``
executable. The parameters to print in the table are retrieved from the
``table-params`` option in the ``[workflow-summary_table]`` section.
Metadata may also be printed by adding a ``print-metadata`` option to
that section.
* **Summary posterior plots**: a collection of posterior plots to include
in the summary page, after the summary table. The parameters to plot
are read from ``[workflow-summary_plots]``. Parameters should be grouped
together by providing
``plot-group-NAME = PARAM1[:LABEL1] PARAM2[:LABEL2]`` in that section,
where ``NAME`` is a unique name for each group. One posterior plot will
be created for each plot group. For clarity, only one or two parameters
should be plotted in each summary group, but this is not enforced.
Settings for the plotting executable are read from the
``plot_posterior_summary`` section; likewise, the executable used
is read from ``plot_posterior_summary`` in the
``[executables]`` section.
* **Sky maps**: if *both* ``create_fits_file`` and ``plot_skymap``
are listed in the ``[executables]`` section, then a ``.fits`` file and
sky map plot will be produced. The sky map plot will be included in
the summary plots. You must be running in a python 3 environment to
create these.
* **Prior plots**: plots of the prior will be created using the
``plot_prior`` executable. By default, all of the variable
parameters will be plotted. The prior plots are added to
``priors/LALBEL/`` in the results directory, where ``LABEL`` is the
given ``label``.
* **Posterior plots**: additional posterior plots are created using the
``plot_posterior`` executable. The parameters to plot are
read from ``[workflow-plot_params]`` section. As with the summary
posterior plots, parameters are grouped together by providing
``plot-group-NAME`` options in that section. A posterior plot will be
created for each group, and added to the ``posteriors/LABEL/`` directory.
Plot settings are read from the ``[plot_posterior]`` section; this
is kept separate from the posterior summary so that different settings
can be used. For example, you may want to make a density plot for the
summary plots, but a scatter plot colored by SNR for the posterior plots.
Parameters
----------
samples_file : pycbc.workflow.core.FileList
List of samples files to combine into a single posterior file.
config_file : pycbc.worfkow.File
The inference configuration file used to generate the samples file(s).
This is needed to make plots of the prior.
label : str
Unique label for the plots. Used in file names.
rdir : pycbc.results.layout.SectionNumber
The results directory to save the plots to.
posterior_file_dir : str, optional
The name of the directory to save the posterior file to. Default is
``posterior_files``.
tags : list of str, optional
Additional tags to add to the file names.
Returns
-------
posterior_file : pycbc.workflow.File
The posterior file that was created.
summary_files : list
List of files to go on the summary results page.
prior_plots : list
List of prior plots that will be created. These will be saved to
``priors/LABEL/`` in the resuls directory, where ``LABEL`` is the
provided label.
posterior_plots : list
List of posterior plots that will be created. These will be saved to
``posteriors/LABEL/`` in the results directory.
"""
# the list of plots to go in the summary
summary_files = []
if tags is None:
tags = []
analysis_seg = workflow.analysis_time
# figure out what parameters user wants to plot from workflow configuration
# parameters for the summary plots
summary_plot_params = get_plot_group(workflow.cp, 'summary_plots')
# parameters to plot in large corner plots
plot_params = get_plot_group(workflow.cp, 'plot_params')
# get parameters for the summary tables
table_params = workflow.cp.get_opt_tag('workflow', 'table-params',
'summary_table')
# get any metadata that should be printed
if workflow.cp.has_option('workflow-summary_table', 'print-metadata'):
table_metadata = workflow.cp.get_opt_tag('workflow', 'print-metadata',
'summary_table')
else:
table_metadata = None
# figure out if we are making a skymap
make_skymap = ("create_fits_file" in workflow.cp.options("executables") and
"plot_skymap" in workflow.cp.options("executables"))
make_prior = ("plot_prior" in workflow.cp.options("executables"))
_config = None
if make_prior:
_config = config_file
# make node for running extract samples
posterior_file = create_posterior_files(
workflow, samples_files, posterior_file_dir,
analysis_seg=analysis_seg, tags=tags+[label])[0]
# summary table
summary_files += (make_inference_summary_table(
workflow, posterior_file, rdir.base,
parameters=table_params, print_metadata=table_metadata,
analysis_seg=analysis_seg,
tags=tags+[label]),)
# summary posteriors
summary_plots = []
for group, params in summary_plot_params.items():
summary_plots += make_inference_posterior_plot(
workflow, posterior_file, rdir.base,
name='plot_posterior_summary',
parameters=params, plot_prior_from_file=_config,
analysis_seg=analysis_seg,
tags=tags+[label, group])
# sky map
if make_skymap:
# create the fits file
fits_file = create_fits_file(
workflow, posterior_file, rdir.base, analysis_seg=analysis_seg,
tags=tags+[label])[0]
# now plot the skymap
skymap_plot = make_inference_skymap(
workflow, fits_file, rdir.base, analysis_seg=analysis_seg,
tags=tags+[label])
summary_plots += skymap_plot
summary_files += list(layout.grouper(summary_plots, 2))
# files for posteriors summary subsection
base = "posteriors/{}".format(label)
posterior_plots = []
for group, params in plot_params.items():
posterior_plots += make_inference_posterior_plot(
workflow, posterior_file, rdir[base],
parameters=params, plot_prior_from_file=_config,
analysis_seg=analysis_seg,
tags=tags+[label, group])
layout.single_layout(rdir[base], posterior_plots)
prior_plots = []
# files for priors summary section
if make_prior:
base = "priors/{}".format(label)
prior_plots += make_inference_prior_plot(
workflow, config_file, rdir[base],
analysis_seg=workflow.analysis_time, tags=tags+[label])
layout.single_layout(rdir[base], prior_plots)
return posterior_file, summary_files, prior_plots, posterior_plots
def _params_for_pegasus(parameters):
"""Escapes $ and escapes in parameters string for pegasus.
Pegaus kickstart tries to do variable substitution if it sees a ``$``, and
it will strip away back slashes. This can be problematic when trying to use
LaTeX in parameter labels. This function adds escapes to all ``$`` and
backslashes in a parameters argument, so the argument can be safely passed
through pegasus-kickstart.
Parameters
----------
parameters : list or str
The parameters argument to modify. If a list, the output will be
converted to a space-separated string.
"""
if isinstance(parameters, list):
parameters = " ".join(parameters)
return parameters.replace('\\', '\\\\').replace('$', '\$')
| 42,899
| 40.812865
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/psdfiles.py
|
# Copyright (C) 2015 Larne Pekowsky
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the psd files used by CBC
workflows.
"""
# FIXME: Is this module still relevant for any code? Can it be removed?
import logging
import configparser as ConfigParser
from pycbc.workflow.core import FileList
from pycbc.workflow.core import make_analysis_dir, resolve_url_to_file
def setup_psd_workflow(workflow, science_segs, datafind_outs,
output_dir=None, tags=None):
'''
Setup static psd section of CBC workflow. At present this only supports pregenerated
psd files, in the future these could be created within the workflow.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
science_segs : Keyed dictionary of ligo.segments.segmentlist objects
scienceSegs[ifo] holds the science segments to be analysed for each
ifo.
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
output_dir : path string
The directory where data products will be placed.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
psd_files : pycbc.workflow.core.FileList
The FileList holding the psd files, 0 or 1 per ifo
'''
if tags is None:
tags = []
logging.info("Entering static psd module.")
make_analysis_dir(output_dir)
cp = workflow.cp
# Parse for options in ini file.
try:
psdMethod = cp.get_opt_tags("workflow-psd", "psd-method",
tags)
except:
# Predefined PSD sare optional, just return an empty list if not
# provided.
return FileList([])
if psdMethod == "PREGENERATED_FILE":
logging.info("Setting psd from pre-generated file(s).")
psd_files = setup_psd_pregenerated(workflow, tags=tags)
else:
errMsg = "PSD method not recognized. Only "
errMsg += "PREGENERATED_FILE is currently supported."
raise ValueError(errMsg)
logging.info("Leaving psd module.")
return psd_files
def setup_psd_pregenerated(workflow, tags=None):
'''
Setup CBC workflow to use pregenerated psd files.
The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will
be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
and pycbc_plot_psd_file.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
psd_files : pycbc.workflow.core.FileList
The FileList holding the gating files
'''
if tags is None:
tags = []
psd_files = FileList([])
cp = workflow.cp
global_seg = workflow.analysis_time
file_attrs = {'segs': global_seg, 'tags': tags}
# Check for one psd for all ifos
try:
pre_gen_file = cp.get_opt_tags('workflow-psd',
'psd-pregenerated-file', tags)
file_attrs['ifos'] = workflow.ifos
curr_file = resolve_url_to_file(pre_gen_file, attrs=file_attrs)
psd_files.append(curr_file)
except ConfigParser.Error:
# Check for one psd per ifo
for ifo in workflow.ifos:
try:
pre_gen_file = cp.get_opt_tags('workflow-psd',
'psd-pregenerated-file-%s' % ifo.lower(),
tags)
file_attrs['ifos'] = [ifo]
curr_file = resolve_url_to_file(pre_gen_file, attrs=file_attrs)
psd_files.append(curr_file)
except ConfigParser.Error:
# It's unlikely, but not impossible, that only some ifos
# will have pregenerated PSDs
logging.warn("No psd file specified for IFO %s." % (ifo,))
pass
return psd_files
| 5,160
| 34.840278
| 88
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/minifollowups.py
|
# Copyright (C) 2015 Christopher M. Biwer, Alexander Harvey Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging, os.path
from ligo import segments
from pycbc.workflow.core import Executable, FileList
from pycbc.workflow.core import makedir, resolve_url_to_file
from pycbc.workflow.plotting import PlotExecutable, requirestr, excludestr
try:
# Python 3
from itertools import zip_longest
except ImportError:
# Python 2
from itertools import izip_longest as zip_longest
from pycbc.workflow.pegasus_workflow import SubWorkflow
def grouper(iterable, n, fillvalue=None):
""" Create a list of n length tuples
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers,
tmpltbank_file, insp_segs, insp_data_name,
insp_anal_name, dax_output, out_dir, tags=None):
""" Create plots that followup the Nth loudest coincident injection
from a statmap produced HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read and analyzed by each inspiral
job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
"""
logging.info('Entering minifollowups module')
if not workflow.cp.has_section('workflow-minifollowups'):
logging.info('There is no [workflow-minifollowups] section in configuration file')
logging.info('Leaving minifollowups')
return
tags = [] if tags is None else tags
makedir(dax_output)
# turn the config file into a File class
config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = resolve_url_to_file(config_path)
exe = Executable(workflow.cp, 'foreground_minifollowup',
ifos=workflow.ifos, out_dir=dax_output, tags=tags)
node = exe.create_node()
node.add_input_opt('--config-files', config_file)
node.add_input_opt('--bank-file', tmpltbank_file)
node.add_input_opt('--statmap-file', coinc_file)
node.add_multiifo_input_list_opt('--single-detector-triggers',
single_triggers)
node.add_input_opt('--inspiral-segments', insp_segs)
node.add_opt('--inspiral-data-read-name', insp_data_name)
node.add_opt('--inspiral-data-analyzed-name', insp_anal_name)
if tags:
node.add_list_opt('--tags', tags)
node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file')
node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map')
name = node.output_files[0].name
map_file = node.output_files[1]
node.add_opt('--workflow-name', name)
node.add_opt('--output-dir', out_dir)
node.add_opt('--dax-file-directory', '.')
workflow += node
# execute this in a sub-workflow
fil = node.output_files[0]
# determine if a staging site has been specified
job = SubWorkflow(fil.name, is_planned=False)
input_files = [tmpltbank_file, coinc_file, insp_segs] + single_triggers
job.add_inputs(*input_files)
job.set_subworkflow_properties(map_file,
staging_site=workflow.staging_site,
cache_file=workflow.cache_file)
job.add_into_workflow(workflow)
logging.info('Leaving minifollowups module')
def setup_single_det_minifollowups(workflow, single_trig_file, tmpltbank_file,
insp_segs, insp_data_name, insp_anal_name,
dax_output, out_dir, veto_file=None,
veto_segment_name=None, statfiles=None,
tags=None):
""" Create plots that followup the Nth loudest clustered single detector
triggers from a merged single detector trigger HDF file.
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
single_trig_file: pycbc.workflow.File
The File class holding the single detector triggers.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read by each inspiral job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
statfiles: FileList (optional, default=None)
Supplementary files necessary for computing the single-detector
statistic.
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
"""
logging.info('Entering minifollowups module')
if not workflow.cp.has_section('workflow-sngl_minifollowups'):
msg = 'There is no [workflow-sngl_minifollowups] section in '
msg += 'configuration file'
logging.info(msg)
logging.info('Leaving minifollowups')
return
tags = [] if tags is None else tags
makedir(dax_output)
# turn the config file into a File class
curr_ifo = single_trig_file.ifo
config_path = os.path.abspath(dax_output + '/' + curr_ifo + \
'_'.join(tags) + 'singles_minifollowup.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = resolve_url_to_file(config_path)
exe = Executable(workflow.cp, 'singles_minifollowup',
ifos=curr_ifo, out_dir=dax_output, tags=tags)
node = exe.create_node()
node.add_input_opt('--config-files', config_file)
node.add_input_opt('--bank-file', tmpltbank_file)
node.add_input_opt('--single-detector-file', single_trig_file)
node.add_input_opt('--inspiral-segments', insp_segs)
node.add_opt('--inspiral-data-read-name', insp_data_name)
node.add_opt('--inspiral-data-analyzed-name', insp_anal_name)
node.add_opt('--instrument', curr_ifo)
if veto_file is not None:
assert(veto_segment_name is not None)
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--veto-segment-name', veto_segment_name)
if statfiles:
statfiles = statfiles.find_output_with_ifo(curr_ifo)
node.add_input_list_opt('--statistic-files', statfiles)
if tags:
node.add_list_opt('--tags', tags)
node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file')
node.new_output_file_opt(workflow.analysis_time, '.dax.map',
'--output-map')
name = node.output_files[0].name
map_file = node.output_files[1]
node.add_opt('--workflow-name', name)
node.add_opt('--output-dir', out_dir)
node.add_opt('--dax-file-directory', '.')
workflow += node
# execute this in a sub-workflow
fil = node.output_files[0]
job = SubWorkflow(fil.name, is_planned=False)
input_files = [tmpltbank_file, insp_segs, single_trig_file]
if veto_file is not None:
input_files.append(veto_file)
if statfiles:
input_files += statfiles
job.add_inputs(*input_files)
job.set_subworkflow_properties(map_file,
staging_site=workflow.staging_site,
cache_file=workflow.cache_file)
job.add_into_workflow(workflow)
logging.info('Leaving minifollowups module')
def setup_injection_minifollowups(workflow, injection_file, inj_xml_file,
single_triggers, tmpltbank_file,
insp_segs, insp_data_name, insp_anal_name,
dax_output, out_dir, tags=None):
""" Create plots that followup the closest missed injections
Parameters
----------
workflow: pycbc.workflow.Workflow
The core workflow instance we are populating
coinc_file:
single_triggers: list of pycbc.workflow.File
A list cointaining the file objects associated with the merged
single detector trigger files for each ifo.
tmpltbank_file: pycbc.workflow.File
The file object pointing to the HDF format template bank
insp_segs: SegFile
The segment file containing the data read by each inspiral job.
insp_data_name: str
The name of the segmentlist storing data read.
insp_anal_name: str
The name of the segmentlist storing data analyzed.
out_dir: path
The directory to store minifollowups result plots and files
tags: {None, optional}
Tags to add to the minifollowups executables
Returns
-------
layout: list
A list of tuples which specify the displayed file layout for the
minifollops plots.
"""
logging.info('Entering injection minifollowups module')
if not workflow.cp.has_section('workflow-injection_minifollowups'):
logging.info('There is no [workflow-injection_minifollowups] section in configuration file')
logging.info('Leaving minifollowups')
return
tags = [] if tags is None else tags
makedir(dax_output)
# turn the config file into a File class
config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'injection_minifollowup.ini')
workflow.cp.write(open(config_path, 'w'))
config_file = resolve_url_to_file(config_path)
exe = Executable(workflow.cp, 'injection_minifollowup', ifos=workflow.ifos, out_dir=dax_output)
node = exe.create_node()
node.add_input_opt('--config-files', config_file)
node.add_input_opt('--bank-file', tmpltbank_file)
node.add_input_opt('--injection-file', injection_file)
node.add_input_opt('--injection-xml-file', inj_xml_file)
node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers)
node.add_input_opt('--inspiral-segments', insp_segs)
node.add_opt('--inspiral-data-read-name', insp_data_name)
node.add_opt('--inspiral-data-analyzed-name', insp_anal_name)
if tags:
node.add_list_opt('--tags', tags)
node.new_output_file_opt(workflow.analysis_time, '.dax', '--dax-file', tags=tags)
node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags)
name = node.output_files[0].name
map_file = node.output_files[1]
node.add_opt('--workflow-name', name)
node.add_opt('--output-dir', out_dir)
node.add_opt('--dax-file-directory', '.')
workflow += node
# execute this in a sub-workflow
fil = node.output_files[0]
job = SubWorkflow(fil.name, is_planned=False)
input_files = [tmpltbank_file, injection_file, inj_xml_file, insp_segs]
input_files += single_triggers
job.add_inputs(*input_files)
job.set_subworkflow_properties(map_file,
staging_site=workflow.staging_site,
cache_file=workflow.cache_file)
job.add_into_workflow(workflow)
logging.info('Leaving injection minifollowups module')
class SingleTemplateExecutable(PlotExecutable):
"""Class to be used for to create workflow.Executable instances for the
pycbc_single_template executable. Basically inherits directly from
PlotExecutable.
"""
time_dependent_options = ['--channel-name', '--frame-type']
class SingleTimeFreqExecutable(PlotExecutable):
"""Class to be used for to create workflow.Executable instances for the
pycbc_plot_singles_timefreq executable. Basically inherits directly from
PlotExecutable.
"""
time_dependent_options = ['--channel-name', '--frame-type']
class PlotQScanExecutable(PlotExecutable):
"""Class to be used for to create workflow.Executable instances for the
pycbc_plot_qscan executable. Basically inherits directly from
PlotExecutable.
"""
time_dependent_options = ['--channel-name', '--frame-type']
def make_single_template_plots(workflow, segs, data_read_name, analyzed_name,
params, out_dir, inj_file=None, exclude=None,
require=None, tags=None, params_str=None,
use_exact_inj_params=False):
"""Function for creating jobs to run the pycbc_single_template code and
to run the associated plotting code pycbc_single_template_plots and add
these jobs to the workflow.
Parameters
-----------
workflow : workflow.Workflow instance
The pycbc.workflow.Workflow instance to add these jobs to.
segs : workflow.File instance
The pycbc.workflow.File instance that points to the XML file containing
the segment lists of data read in and data analyzed.
data_read_name : str
The name of the segmentlist containing the data read in by each
inspiral job in the segs file.
analyzed_name : str
The name of the segmentlist containing the data analyzed by each
inspiral job in the segs file.
params : dictionary
A dictionary containing the parameters of the template to be used.
params[ifo+'end_time'] is required for all ifos in workflow.ifos.
If use_exact_inj_params is False then also need to supply values for
[mass1, mass2, spin1z, spin2x]. For precessing templates one also
needs to supply [spin1y, spin1x, spin2x, spin2y, inclination]
additionally for precession one must supply u_vals or
u_vals_+ifo for all ifos. u_vals is the ratio between h_+ and h_x to
use when constructing h(t). h(t) = (h_+ * u_vals) + h_x.
out_dir : str
Directory in which to store the output files.
inj_file : workflow.File (optional, default=None)
If given send this injection file to the job so that injections are
made into the data.
exclude : list (optional, default=None)
If given, then when considering which subsections in the ini file to
parse for options to add to single_template_plot, only use subsections
that *do not* match strings in this list.
require : list (optional, default=None)
If given, then when considering which subsections in the ini file to
parse for options to add to single_template_plot, only use subsections
matching strings in this list.
tags : list (optional, default=None)
Add this list of tags to all jobs.
params_str : str (optional, default=None)
If given add this string to plot title and caption to describe the
template that was used.
use_exact_inj_params : boolean (optional, default=False)
If True do not use masses and spins listed in the params dictionary
but instead use the injection closest to the filter time as a template.
Returns
--------
output_files : workflow.FileList
The list of workflow.Files created in this function.
"""
tags = [] if tags is None else tags
makedir(out_dir)
name = 'single_template_plot'
secs = requirestr(workflow.cp.get_subsections(name), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
for ifo in workflow.ifos:
if params['%s_end_time' % ifo] == -1.0:
continue
# Reanalyze the time around the trigger in each detector
curr_exe = SingleTemplateExecutable(workflow.cp, 'single_template',
ifos=[ifo], out_dir=out_dir,
tags=[tag] + tags)
start = int(params[ifo + '_end_time'])
end = start + 1
cseg = segments.segment([start, end])
node = curr_exe.create_node(valid_seg=cseg)
if use_exact_inj_params:
node.add_opt('--use-params-of-closest-injection')
else:
node.add_opt('--mass1', "%.6f" % params['mass1'])
node.add_opt('--mass2', "%.6f" % params['mass2'])
node.add_opt('--spin1z',"%.6f" % params['spin1z'])
node.add_opt('--spin2z',"%.6f" % params['spin2z'])
node.add_opt('--template-start-frequency',
"%.6f" % params['f_lower'])
# Is this precessing?
if 'u_vals' in params or 'u_vals_%s' % ifo in params:
node.add_opt('--spin1x',"%.6f" % params['spin1x'])
node.add_opt('--spin1y',"%.6f" % params['spin1y'])
node.add_opt('--spin2x',"%.6f" % params['spin2x'])
node.add_opt('--spin2y',"%.6f" % params['spin2y'])
node.add_opt('--inclination',"%.6f" % params['inclination'])
try:
node.add_opt('--u-val',"%.6f" % params['u_vals'])
except:
node.add_opt('--u-val',
"%.6f" % params['u_vals_%s' % ifo])
# str(numpy.float64) restricts to 2d.p. BE CAREFUL WITH THIS!!!
str_trig_time = '%.6f' %(params[ifo + '_end_time'])
node.add_opt('--trigger-time', str_trig_time)
node.add_input_opt('--inspiral-segments', segs)
if inj_file is not None:
node.add_input_opt('--injection-file', inj_file)
node.add_opt('--data-read-name', data_read_name)
node.add_opt('--data-analyzed-name', analyzed_name)
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file', store_file=False)
data = node.output_files[0]
workflow += node
# Make the plot for this trigger and detector
node = PlotExecutable(workflow.cp, name, ifos=[ifo],
out_dir=out_dir, tags=[tag] + tags).create_node()
node.add_input_opt('--single-template-file', data)
node.new_output_file_opt(workflow.analysis_time, '.png',
'--output-file')
title="'%s SNR and chi^2 timeseries" %(ifo)
if params_str is not None:
title+= " using %s" %(params_str)
title+="'"
node.add_opt('--plot-title', title)
caption = "'The SNR and chi^2 timeseries around the injection"
if params_str is not None:
caption += " using %s" %(params_str)
if use_exact_inj_params:
caption += ". The injection itself was used as the template.'"
else:
caption += ". The template used has the following parameters: "
caption += "mass1=%s, mass2=%s, spin1z=%s, spin2z=%s'"\
%(params['mass1'], params['mass2'], params['spin1z'],
params['spin2z'])
node.add_opt('--plot-caption', caption)
workflow += node
files += node.output_files
return files
def make_plot_waveform_plot(workflow, params, out_dir, ifos, exclude=None,
require=None, tags=None):
""" Add plot_waveform jobs to the workflow.
"""
tags = [] if tags is None else tags
makedir(out_dir)
name = 'single_template_plot'
secs = requirestr(workflow.cp.get_subsections(name), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
node = PlotExecutable(workflow.cp, 'plot_waveform', ifos=ifos,
out_dir=out_dir, tags=[tag] + tags).create_node()
node.add_opt('--mass1', "%.6f" % params['mass1'])
node.add_opt('--mass2', "%.6f" % params['mass2'])
node.add_opt('--spin1z',"%.6f" % params['spin1z'])
node.add_opt('--spin2z',"%.6f" % params['spin2z'])
if 'u_vals' in params:
# Precessing options
node.add_opt('--spin1x',"%.6f" % params['spin1x'])
node.add_opt('--spin2x',"%.6f" % params['spin2x'])
node.add_opt('--spin1y',"%.6f" % params['spin1y'])
node.add_opt('--spin2y',"%.6f" % params['spin2y'])
node.add_opt('--inclination',"%.6f" % params['inclination'])
node.add_opt('--u-val', "%.6f" % params['u_vals'])
node.new_output_file_opt(workflow.analysis_time, '.png',
'--output-file')
workflow += node
files += node.output_files
return files
def make_inj_info(workflow, injection_file, injection_index, num, out_dir,
tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
name = 'page_injinfo'
files = FileList([])
node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--injection-file', injection_file)
node.add_opt('--injection-index', str(injection_index))
node.add_opt('--n-nearest', str(num))
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
files += node.output_files
return files
def make_coinc_info(workflow, singles, bank, coinc, out_dir,
n_loudest=None, trig_id=None, file_substring=None,
sort_order=None, sort_var=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
name = 'page_coincinfo'
files = FileList([])
node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_list_opt('--single-trigger-files', singles)
node.add_input_opt('--statmap-file', coinc)
node.add_input_opt('--bank-file', bank)
if sort_order:
node.add_opt('--sort-order', sort_order)
if sort_var:
node.add_opt('--sort-variable', sort_var)
if n_loudest is not None:
node.add_opt('--n-loudest', str(n_loudest))
if trig_id is not None:
node.add_opt('--trigger-id', str(trig_id))
if file_substring is not None:
node.add_opt('--statmap-file-subspace-name', file_substring)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
files += node.output_files
return files
def make_sngl_ifo(workflow, sngl_file, bank_file, trigger_id, out_dir, ifo,
tags=None):
"""Setup a job to create sngl detector sngl ifo html summary snippet.
"""
tags = [] if tags is None else tags
makedir(out_dir)
name = 'page_snglinfo'
files = FileList([])
node = PlotExecutable(workflow.cp, name, ifos=[ifo],
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--single-trigger-file', sngl_file)
node.add_input_opt('--bank-file', bank_file)
node.add_opt('--trigger-id', str(trigger_id))
node.add_opt('--instrument', ifo)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
files += node.output_files
return files
def make_trigger_timeseries(workflow, singles, ifo_times, out_dir, special_tids=None,
exclude=None, require=None, tags=None):
tags = [] if tags is None else tags
makedir(out_dir)
name = 'plot_trigger_timeseries'
secs = requirestr(workflow.cp.get_subsections(name), require)
secs = excludestr(secs, exclude)
secs = excludestr(secs, workflow.ifo_combinations)
files = FileList([])
for tag in secs:
node = PlotExecutable(workflow.cp, name, ifos=workflow.ifos,
out_dir=out_dir, tags=[tag] + tags).create_node()
node.add_multiifo_input_list_opt('--single-trigger-files', singles)
node.add_opt('--times', ifo_times)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
if special_tids is not None:
node.add_opt('--special-trigger-ids', special_tids)
workflow += node
files += node.output_files
return files
def make_qscan_plot(workflow, ifo, trig_time, out_dir, injection_file=None,
data_segments=None, time_window=100, tags=None):
""" Generate a make_qscan node and add it to workflow.
This function generates a single node of the singles_timefreq executable
and adds it to the current workflow. Parent/child relationships are set by
the input/output files automatically.
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
The workflow class that stores the jobs that will be run.
ifo: str
Which interferometer are we using?
trig_time: int
The time of the trigger being followed up.
out_dir: str
Location of directory to output to
injection_file: pycbc.workflow.File (optional, default=None)
If given, add the injections in the file to strain before making the
plot.
data_segments: ligo.segments.segmentlist (optional, default=None)
The list of segments for which data exists and can be read in. If given
the start/end times given to singles_timefreq will be adjusted if
[trig_time - time_window, trig_time + time_window] does not completely
lie within a valid data segment. A ValueError will be raised if the
trig_time is not within a valid segment, or if it is not possible to
find 2*time_window (plus the padding) of continuous data around the
trigger. This **must** be coalesced.
time_window: int (optional, default=None)
The amount of data (not including padding) that will be read in by the
singles_timefreq job. The default value of 100s should be fine for most
cases.
tags: list (optional, default=None)
List of tags to add to the created nodes, which determine file naming.
"""
tags = [] if tags is None else tags
makedir(out_dir)
name = 'plot_qscan'
curr_exe = PlotQScanExecutable(workflow.cp, name, ifos=[ifo],
out_dir=out_dir, tags=tags)
# Determine start/end times, using data segments if needed.
# Begin by choosing "optimal" times
start = trig_time - time_window
end = trig_time + time_window
node = curr_exe.create_node(valid_seg=segments.segment([start, end]))
# Then if data_segments is available, check against that, and move if
# needed
if data_segments is not None:
# Assumes coalesced, so trig_time can only be within one segment
for seg in data_segments:
if trig_time in seg:
data_seg = seg
break
elif trig_time == -1.0:
node.add_opt('--gps-start-time', int(trig_time))
node.add_opt('--gps-end-time', int(trig_time))
node.add_opt('--center-time', trig_time)
caption_string = "'No trigger in %s'" % ifo
node.add_opt('--plot-caption', caption_string)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files
else:
err_msg = "Trig time {} ".format(trig_time)
err_msg += "does not seem to lie within any data segments. "
err_msg += "This shouldn't be possible, please ask for help!"
raise ValueError(err_msg)
# Check for pad-data
if curr_exe.has_opt('pad-data'):
pad_data = int(curr_exe.get_opt('pad-data'))
else:
pad_data = 0
# We only read data that's available. The code must handle the case
# of not much data being available.
if end > (data_seg[1] - pad_data):
end = data_seg[1] - pad_data
if start < (data_seg[0] + pad_data):
start = data_seg[0] + pad_data
node.add_opt('--gps-start-time', int(start))
node.add_opt('--gps-end-time', int(end))
node.add_opt('--center-time', trig_time)
if injection_file is not None:
node.add_input_opt('--injection-file', injection_file)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files
def make_singles_timefreq(workflow, single, bank_file, trig_time, out_dir,
veto_file=None, time_window=10, data_segments=None,
tags=None):
""" Generate a singles_timefreq node and add it to workflow.
This function generates a single node of the singles_timefreq executable
and adds it to the current workflow. Parent/child relationships are set by
the input/output files automatically.
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
The workflow class that stores the jobs that will be run.
single: pycbc.workflow.core.File instance
The File object storing the single-detector triggers to followup.
bank_file: pycbc.workflow.core.File instance
The File object storing the template bank.
trig_time: int
The time of the trigger being followed up.
out_dir: str
Location of directory to output to
veto_file: pycbc.workflow.core.File (optional, default=None)
If given use this file to veto triggers to determine the loudest event.
FIXME: Veto files *should* be provided a definer argument and not just
assume that all segments should be read.
time_window: int (optional, default=None)
The amount of data (not including padding) that will be read in by the
singles_timefreq job. The default value of 10s should be fine for most
cases.
data_segments: ligo.segments.segmentlist (optional, default=None)
The list of segments for which data exists and can be read in. If given
the start/end times given to singles_timefreq will be adjusted if
[trig_time - time_window, trig_time + time_window] does not completely
lie within a valid data segment. A ValueError will be raised if the
trig_time is not within a valid segment, or if it is not possible to
find 2*time_window (plus the padding) of continuous data around the
trigger. This **must** be coalesced.
tags: list (optional, default=None)
List of tags to add to the created nodes, which determine file naming.
"""
tags = [] if tags is None else tags
makedir(out_dir)
name = 'plot_singles_timefreq'
curr_exe = SingleTimeFreqExecutable(workflow.cp, name, ifos=[single.ifo],
out_dir=out_dir, tags=tags)
# Determine start/end times, using data segments if needed.
# Begin by choosing "optimal" times
start = trig_time - time_window
end = trig_time + time_window
node = curr_exe.create_node(valid_seg=segments.segment([start, end]))
node.add_input_opt('--trig-file', single)
node.add_input_opt('--bank-file', bank_file)
# Then if data_segments is available, check against that, and move if
# needed
if data_segments is not None:
# Assumes coalesced, so trig_time can only be within one segment
for seg in data_segments:
if trig_time in seg:
data_seg = seg
break
elif trig_time == -1.0:
node.add_opt('--gps-start-time', int(trig_time))
node.add_opt('--gps-end-time', int(trig_time))
node.add_opt('--center-time', trig_time)
if veto_file:
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--detector', single.ifo)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files
else:
err_msg = "Trig time {} ".format(trig_time)
err_msg += "does not seem to lie within any data segments. "
err_msg += "This shouldn't be possible, please ask for help!"
raise ValueError(err_msg)
# Check for pad-data
if curr_exe.has_opt('pad-data'):
pad_data = int(curr_exe.get_opt('pad-data'))
else:
pad_data = 0
if abs(data_seg) < (2 * time_window + 2 * pad_data):
tl = 2 * time_window + 2 * pad_data
err_msg = "I was asked to use {} seconds of data ".format(tl)
err_msg += "to run a plot_singles_timefreq job. However, I have "
err_msg += "only {} seconds available.".format(abs(data_seg))
raise ValueError(err_msg)
if data_seg[0] > (start - pad_data):
start = data_seg[0] + pad_data
end = start + 2 * time_window
if data_seg[1] < (end + pad_data):
end = data_seg[1] - pad_data
start = end - 2 * time_window
# Sanity check, shouldn't get here!
if data_seg[0] > (start - pad_data):
err_msg = "I shouldn't be here! Go ask Ian what he broke."
raise ValueError(err_msg)
node.add_opt('--gps-start-time', int(start))
node.add_opt('--gps-end-time', int(end))
node.add_opt('--center-time', trig_time)
if veto_file:
node.add_input_opt('--veto-file', veto_file)
node.add_opt('--detector', single.ifo)
node.new_output_file_opt(workflow.analysis_time, '.png', '--output-file')
workflow += node
return node.output_files
def make_skipped_html(workflow, skipped_data, out_dir, tags):
"""
Make a html snippet from the list of skipped background coincidences
"""
exe = Executable(workflow.cp, 'html_snippet',
ifos=workflow.ifos, out_dir=out_dir, tags=tags)
node = exe.create_node()
parsed_data = {}
for ifo, time in skipped_data:
if ifo not in parsed_data:
parsed_data[ifo] = {}
if time not in parsed_data[ifo]:
parsed_data[ifo][time] = 1
else:
parsed_data[ifo][time] = parsed_data[ifo][time] + 1
n_events = len(skipped_data)
html_string = '"{} background events have been skipped '.format(n_events)
html_string += 'because one of their single triggers already appears '
html_string += 'in the events followed up above. '
html_string += 'Specifically, the following single detector triggers '
html_string += 'were found in these coincidences. '
html_template = '{} event at time {} appeared {} times. '
for ifo in parsed_data:
for time in parsed_data[ifo]:
n_occurances = parsed_data[ifo][time]
html_string += html_template.format(ifo, time, n_occurances)
html_string += '"'
node.add_opt('--html-text', html_string)
node.add_opt('--title', '"Events were skipped"')
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
files = node.output_files
return files
| 36,594
| 42.307692
| 100
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/configuration.py
|
# Copyright (C) 2013,2017 Ian Harry, Duncan Brown
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides a wrapper to the ConfigParser utilities for pycbc
workflow construction. This module is described in the page here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html
"""
import os
import stat
import shutil
from shutil import which
from urllib.parse import urlparse
from pycbc.types.config import InterpolatingConfigParser
def resolve_url(url, directory=None, permissions=None, copy_to_cwd=True):
"""Resolves a URL to a local file, and returns the path to that file.
If a URL is given, the file will be copied to the current working
directory. If a local file path is given, the file will only be copied
to the current working directory if ``copy_to_cwd`` is ``True``
(the default).
"""
u = urlparse(url)
# determine whether the file exists locally
islocal = u.scheme == "" or u.scheme == "file"
if not islocal or copy_to_cwd:
# create the name of the destination file
if directory is None:
directory = os.getcwd()
filename = os.path.join(directory, os.path.basename(u.path))
else:
filename = u.path
if islocal:
# check that the file exists
if not os.path.isfile(u.path):
errmsg = "Cannot open file %s from URL %s" % (u.path, url)
raise ValueError(errmsg)
# for regular files, make a direct copy if requested
elif copy_to_cwd:
if os.path.isfile(filename):
# check to see if src and dest are the same file
src_inode = os.stat(u.path)[stat.ST_INO]
dst_inode = os.stat(filename)[stat.ST_INO]
if src_inode != dst_inode:
shutil.copy(u.path, filename)
else:
shutil.copy(u.path, filename)
elif u.scheme == "http" or u.scheme == "https":
# FIXME: Move to top and make optional once 4001 functionality is
# merged
import ciecplib
with ciecplib.Session() as s:
if u.netloc in ("git.ligo.org", "code.pycbc.phy.syr.edu"):
# authenticate with git.ligo.org using callback
s.get("https://git.ligo.org/users/auth/shibboleth/callback")
r = s.get(url, allow_redirects=True)
r.raise_for_status()
output_fp = open(filename, "wb")
output_fp.write(r.content)
output_fp.close()
else:
# TODO: We could support other schemes as needed
errmsg = "Unknown URL scheme: %s\n" % (u.scheme)
errmsg += "Currently supported are: file, http, and https."
raise ValueError(errmsg)
if not os.path.isfile(filename):
errmsg = "Error trying to create file %s from %s" % (filename, url)
raise ValueError(errmsg)
if permissions:
if os.access(filename, os.W_OK):
os.chmod(filename, permissions)
else:
# check that the file has at least the permissions requested
s = os.stat(filename)[stat.ST_MODE]
if (s & permissions) != permissions:
errmsg = "Could not change permissions on %s (read-only)" % url
raise ValueError(errmsg)
return filename
def add_workflow_command_line_group(parser):
"""
The standard way of initializing a ConfigParser object in workflow will be
to do it from the command line. This is done by giving a
--local-config-files filea.ini fileb.ini filec.ini
command. You can also set config file override commands on the command
line. This will be most useful when setting (for example) start and
end times, or active ifos. This is done by
--config-overrides section1:option1:value1 section2:option2:value2 ...
This can also be given as
--config-overrides section1:option1
where the value will be left as ''.
To remove a configuration option, use the command line argument
--config-delete section1:option1
which will delete option1 from [section1] or
--config-delete section1
to delete all of the options in [section1]
Deletes are implemented before overrides.
This function returns an argparse OptionGroup to ensure these options are
parsed correctly and can then be sent directly to initialize an
WorkflowConfigParser.
Parameters
-----------
parser : argparse.ArgumentParser instance
The initialized argparse instance to add the workflow option group to.
"""
workflowArgs = parser.add_argument_group(
"Configuration", "Options needed for parsing " "config file(s)."
)
workflowArgs.add_argument(
"--config-files",
nargs="+",
action="store",
metavar="CONFIGFILE",
help="List of config files to be used in " "analysis.",
)
workflowArgs.add_argument(
"--config-overrides",
nargs="*",
action="store",
metavar="SECTION:OPTION:VALUE",
help="List of section,option,value combinations to "
"add into the configuration file. Normally the gps "
"start and end times might be provided this way, "
"and user specific locations (ie. output directories). "
"This can also be provided as SECTION:OPTION or "
"SECTION:OPTION: both of which indicate that the "
"corresponding value is left blank.",
)
workflowArgs.add_argument(
"--config-delete",
nargs="*",
action="store",
metavar="SECTION:OPTION",
help="List of section,option combinations to delete "
"from the configuration file. This can also be "
"provided as SECTION which deletes the enture section"
" from the configuration file or SECTION:OPTION "
"which deletes a specific option from a given "
"section.",
)
class WorkflowConfigParser(InterpolatingConfigParser):
"""
This is a sub-class of InterpolatingConfigParser, which lets
us add a few additional helper features that are useful in workflows.
"""
def __init__(
self,
configFiles=None,
overrideTuples=None,
parsedFilePath=None,
deleteTuples=None,
copy_to_cwd=False,
):
"""
Initialize an WorkflowConfigParser. This reads the input configuration
files, overrides values if necessary and performs the interpolation.
See https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/initialization_inifile.html
Parameters
-----------
configFiles : Path to .ini file, or list of paths
The file(s) to be read in and parsed.
overrideTuples : List of (section, option, value) tuples
Add the (section, option, value) triplets provided
in this list to the provided .ini file(s). If the section, option
pair is already present, it will be overwritten.
parsedFilePath : Path, optional (default=None)
If given, write the parsed .ini file back to disk at this location.
deleteTuples : List of (section, option) tuples
Delete the (section, option) pairs provided
in this list from provided .ini file(s). If the section only
is provided, the entire section will be deleted.
copy_to_cwd : bool, optional
Copy the configuration files to the current working directory if
they are not already there, even if they already exist locally.
If False, files will only be copied to the current working
directory if they are remote. Default is False.
Returns
--------
WorkflowConfigParser
Initialized WorkflowConfigParser instance.
"""
if configFiles is not None:
configFiles = [
resolve_url(cFile, copy_to_cwd=copy_to_cwd)
for cFile in configFiles
]
InterpolatingConfigParser.__init__(
self,
configFiles,
overrideTuples,
parsedFilePath,
deleteTuples,
skip_extended=True,
)
# expand executable which statements
self.perform_exe_expansion()
# Resolve any URLs needing resolving
self.curr_resolved_files = {}
self.resolve_urls()
# Check for any substitutions that can be made
self.perform_extended_interpolation()
def perform_exe_expansion(self):
"""
This function will look through the executables section of the
ConfigParser object and replace any values using macros with full paths.
For any values that look like
${which:lalapps_tmpltbank}
will be replaced with the equivalent of which(lalapps_tmpltbank)
Otherwise values will be unchanged.
"""
# Only works on executables section
if self.has_section("executables"):
for option, value in self.items("executables"):
# Check the value
newStr = self.interpolate_exe(value)
if newStr != value:
self.set("executables", option, newStr)
def interpolate_exe(self, testString):
"""
Replace testString with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
testString : string
The input string
Returns
--------
newString : string
The output string.
"""
# First check if any interpolation is needed and abort if not
testString = testString.strip()
if not (testString.startswith("${") and testString.endswith("}")):
return testString
# This may not be an exe interpolation, so even if it has ${ ... } form
# I may not have to do anything
newString = testString
# Strip the ${ and }
testString = testString[2:-1]
testList = testString.split(":")
# Maybe we can add a few different possibilities for substitution
if len(testList) == 2:
if testList[0] == "which":
newString = which(testList[1])
if not newString:
errmsg = "Cannot find exe %s in your path " % (testList[1])
errmsg += "and you specified ${which:%s}." % (testList[1])
raise ValueError(errmsg)
return newString
def section_to_cli(self, section, skip_opts=None):
"""Converts a section into a command-line string.
For example:
.. code::
[section_name]
foo =
bar = 10
yields: `'--foo --bar 10'`.
Parameters
----------
section : str
The name of the section to convert.
skip_opts : list, optional
List of options to skip. Default (None) results in all options
in the section being converted.
Returns
-------
str :
The options as a command-line string.
"""
if skip_opts is None:
skip_opts = []
read_opts = [
opt for opt in self.options(section) if opt not in skip_opts
]
opts = []
for opt in read_opts:
opts.append("--{}".format(opt))
val = self.get(section, opt)
if val != "":
opts.append(val)
return " ".join(opts)
def get_cli_option(self, section, option_name, **kwds):
"""Return option using CLI action parsing
Parameters
----------
section: str
Section to find option to parse
option_name: str
Name of the option to parse from the config file
kwds: keywords
Additional keywords are passed directly to the argument parser.
Returns
-------
value:
The parsed value for this option
"""
import argparse
optstr = self.section_to_cli(section)
parser = argparse.ArgumentParser()
name = "--" + option_name.replace("_", "-")
parser.add_argument(name, **kwds)
args, _ = parser.parse_known_args(optstr.split())
return getattr(args, option_name)
def resolve_urls(self):
"""
This function will look through all sections of the
ConfigParser object and replace any URLs that are given the resolve
magic flag with a path on the local drive.
Specifically for any values that look like
${resolve:https://git.ligo.org/detchar/SOME_GATING_FILE.txt}
the file will be replaced with the output of resolve_url(URL)
Otherwise values will be unchanged.
"""
# Only works on executables section
for section in self.sections():
for option, value in self.items(section):
# Check the value
value_l = value.split(' ')
new_str_l = [self.resolve_file_url(val) for val in value_l]
new_str = ' '.join(new_str_l)
if new_str is not None and new_str != value:
self.set(section, option, new_str)
def resolve_file_url(self, test_string):
"""
Replace test_string with a path to an executable based on the format.
If this looks like
${which:lalapps_tmpltbank}
it will return the equivalent of which(lalapps_tmpltbank)
Otherwise it will return an unchanged string.
Parameters
-----------
test_string : string
The input string
Returns
--------
new_string : string
The output string.
"""
# First check if any interpolation is needed and abort if not
test_string = test_string.strip()
if not (test_string.startswith("${") and test_string.endswith("}")):
return test_string
# This may not be a "resolve" interpolation, so even if it has
# ${ ... } form I may not have to do anything
# Strip the ${ and }
test_string_strip = test_string[2:-1]
test_list = test_string_strip.split(":", 1)
if len(test_list) == 2:
if test_list[0] == "resolve":
curr_lfn = os.path.basename(test_list[1])
if curr_lfn in self.curr_resolved_files:
return self.curr_resolved_files[curr_lfn]
local_url = resolve_url(test_list[1])
self.curr_resolved_files[curr_lfn] = local_url
return local_url
return test_string
| 15,775
| 33.147186
| 96
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/matched_filter.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the matched-filtering stage of
workflows. For details about this module and its capabilities see here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
"""
import os, logging
from math import radians
from pycbc.workflow.core import FileList, make_analysis_dir
from pycbc.workflow.jobsetup import (select_matchedfilter_class,
sngl_ifo_job_setup,
multi_ifo_coherent_job_setup)
def setup_matchedfltr_workflow(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir=None,
injection_file=None, tags=None):
'''
This function aims to be the gateway for setting up a set of matched-filter
jobs in a workflow. This function is intended to support multiple
different ways/codes that could be used for doing this. For now the only
supported sub-module is one that runs the matched-filtering by setting up
a serious of matched-filtering jobs, from one executable, to create
matched-filter triggers covering the full range of science times for which
there is data and a template bank file.
Parameters
-----------
Workflow : pycbc.workflow.core.Workflow
The workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly.
'''
if tags is None:
tags = []
logging.info("Entering matched-filtering setup module.")
make_analysis_dir(output_dir)
cp = workflow.cp
# Parse for options in .ini file
mfltrMethod = cp.get_opt_tags("workflow-matchedfilter", "matchedfilter-method",
tags)
# Could have a number of choices here
if mfltrMethod == "WORKFLOW_INDEPENDENT_IFOS":
logging.info("Adding matched-filter jobs to workflow.")
inspiral_outs = setup_matchedfltr_dax_generated(workflow, science_segs,
datafind_outs, tmplt_banks, output_dir,
injection_file=injection_file,
tags=tags)
elif mfltrMethod == "WORKFLOW_MULTIPLE_IFOS":
logging.info("Adding matched-filter jobs to workflow.")
inspiral_outs = setup_matchedfltr_dax_generated_multi(workflow,
science_segs, datafind_outs, tmplt_banks,
output_dir, injection_file=injection_file,
tags=tags)
else:
errMsg = "Matched filter method not recognized. Must be one of "
errMsg += "WORKFLOW_INDEPENDENT_IFOS or WORKFLOW_MULTIPLE_IFOS."
raise ValueError(errMsg)
logging.info("Leaving matched-filtering setup module.")
return inspiral_outs
def setup_matchedfltr_dax_generated(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir,
injection_file=None,
tags=None):
'''
Setup matched-filter jobs that are generated as part of the workflow.
This
module can support any matched-filter code that is similar in principle to
lalapps_inspiral, but for new codes some additions are needed to define
Executable and Job sub-classes (see jobutils.py).
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly.
'''
if tags is None:
tags = []
# Need to get the exe to figure out what sections are analysed, what is
# discarded etc. This should *not* be hardcoded, so using a new executable
# will require a bit of effort here ....
cp = workflow.cp
ifos = science_segs.keys()
match_fltr_exe = os.path.basename(cp.get('executables','inspiral'))
# Select the appropriate class
exe_class = select_matchedfilter_class(match_fltr_exe)
# Set up class for holding the banks
inspiral_outs = FileList([])
# Matched-filtering is done independently for different ifos, but might not be!
# If we want to use multi-detector matched-filtering or something similar to this
# it would probably require a new module
for ifo in ifos:
logging.info("Setting up matched-filtering for %s." %(ifo))
job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifo,
out_dir=output_dir,
injection_file=injection_file,
tags=tags)
sngl_ifo_job_setup(workflow, ifo, inspiral_outs, job_instance,
science_segs[ifo], datafind_outs,
parents=tmplt_banks, allow_overlap=False)
return inspiral_outs
def setup_matchedfltr_dax_generated_multi(workflow, science_segs, datafind_outs,
tmplt_banks, output_dir,
injection_file=None,
tags=None):
'''
Setup matched-filter jobs that are generated as part of the workflow in
which a single job reads in and generates triggers over multiple ifos.
This module can support any matched-filter code that is similar in
principle to pycbc_multi_inspiral, but for new codes some additions are
needed to define Executable and Job sub-classes (see jobutils.py).
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the coincidence jobs will be added to.
science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances
The list of times that are being analysed in this workflow.
datafind_outs : pycbc.workflow.core.FileList
An FileList of the datafind files that are needed to obtain the
data used in the analysis.
tmplt_banks : pycbc.workflow.core.FileList
An FileList of the template bank files that will serve as input
in this stage.
output_dir : path
The directory in which output will be stored.
injection_file : pycbc.workflow.core.File, optional (default=None)
If given the file containing the simulation file to be sent to these
jobs on the command line. If not given no file will be sent.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. An example might be ['BNSINJECTIONS'] or
['NOINJECTIONANALYSIS']. This will be used in output names.
Returns
-------
inspiral_outs : pycbc.workflow.core.FileList
A list of output files written by this stage. This *will not* contain
any intermediate products produced within this stage of the workflow.
If you require access to any intermediate products produced at this
stage you can call the various sub-functions directly.
'''
if tags is None:
tags = []
# Need to get the exe to figure out what sections are analysed, what is
# discarded etc. This should *not* be hardcoded, so using a new executable
# will require a bit of effort here ....
cp = workflow.cp
ifos = sorted(science_segs.keys())
match_fltr_exe = os.path.basename(cp.get('executables','inspiral'))
# List for holding the output
inspiral_outs = FileList([])
logging.info("Setting up matched-filtering for %s." %(' '.join(ifos),))
if match_fltr_exe == 'pycbc_multi_inspiral':
exe_class = select_matchedfilter_class(match_fltr_exe)
cp.set('inspiral', 'ra',
str(radians(float(cp.get('workflow', 'ra')))))
cp.set('inspiral', 'dec',
str(radians(float(cp.get('workflow', 'dec')))))
# At the moment we aren't using sky grids, but when we do this code
# might be used then.
# from pycbc.workflow.grb_utils import get_sky_grid_scale
# if cp.has_option("jitter_skyloc", "apply-fermi-error"):
# cp.set('inspiral', 'sky-error',
# str(get_sky_grid_scale(float(cp.get('workflow',
# 'sky-error')))))
# else:
# cp.set('inspiral', 'sky-error',
# str(get_sky_grid_scale(float(cp.get('workflow',
# 'sky-error')),
# sigma_sys=0.0)))
# cp.set('inspiral', 'trigger-time',\
# cp.get('workflow', 'trigger-time'))
# cp.set('inspiral', 'block-duration',
# str(abs(science_segs[ifos[0]][0]) - \
# 2 * int(cp.get('inspiral', 'pad-data'))))
job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifos,
out_dir=output_dir,
injection_file=injection_file,
tags=tags)
if cp.has_option("workflow", "do-long-slides") and "slide" in tags[-1]:
slide_num = int(tags[-1].replace("slide", ""))
logging.info("Setting up matched-filtering for slide {}"
.format(slide_num))
slide_shift = int(cp.get("inspiral", "segment-length"))
time_slide_dict = {ifo: (slide_num + 1) * ix * slide_shift
for ix, ifo in enumerate(ifos)}
multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,
science_segs, datafind_outs,
output_dir, parents=tmplt_banks,
slide_dict=time_slide_dict)
else:
multi_ifo_coherent_job_setup(workflow, inspiral_outs, job_instance,
science_segs, datafind_outs,
output_dir, parents=tmplt_banks)
else:
# Select the appropriate class
raise ValueError("Not currently supported.")
return inspiral_outs
| 13,867
| 47.48951
| 85
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/segment.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the segment generation stage of
workflows. For details about this module and its capabilities see here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/segments.html
"""
import os, shutil, itertools
import logging
from ligo import segments
from ligo.segments import utils as segmentsUtils
from pycbc.workflow.core import SegFile, make_analysis_dir
from pycbc.workflow.core import resolve_url
def save_veto_definer(cp, out_dir, tags=None):
""" Retrieve the veto definer file and save it locally
Parameters
-----------
cp : ConfigParser instance
out_dir : path
tags : list of strings
Used to retrieve subsections of the ini file for
configuration options.
"""
if tags is None:
tags = []
make_analysis_dir(out_dir)
veto_def_url = cp.get_opt_tags("workflow-segments",
"segments-veto-definer-url", tags)
veto_def_base_name = os.path.basename(veto_def_url)
veto_def_new_path = os.path.abspath(os.path.join(out_dir,
veto_def_base_name))
# Don't need to do this if already done
resolve_url(veto_def_url,out_dir)
# and update location
cp.set("workflow-segments", "segments-veto-definer-file", veto_def_new_path)
return veto_def_new_path
def get_segments_file(workflow, name, option_name, out_dir):
"""Get cumulative segments from option name syntax for each ifo.
Use syntax of configparser string to define the resulting segment_file
e.x. option_name = +up_flag1,+up_flag2,+up_flag3,-down_flag1,-down_flag2
Each ifo may have a different string and is stored separately in the file.
Flags which add time must precede flags which subtract time.
Parameters
----------
workflow: pycbc.workflow.Workflow
name: string
Name of the segment list being created
option_name: str
Name of option in the associated config parser to get the flag list
returns
--------
seg_file: pycbc.workflow.SegFile
SegFile intance that points to the segment xml file on disk.
"""
from pycbc.dq import query_str
make_analysis_dir(out_dir)
cp = workflow.cp
start = workflow.analysis_time[0]
end = workflow.analysis_time[1]
# Check for veto definer file
veto_definer = None
if cp.has_option("workflow-segments", "segments-veto-definer-url"):
veto_definer = save_veto_definer(workflow.cp, out_dir, [])
# Check for provided server
server = "https://segments.ligo.org"
if cp.has_option("workflow-segments", "segments-database-url"):
server = cp.get("workflow-segments",
"segments-database-url")
if cp.has_option("workflow-segments", "segments-source"):
source = cp.get("workflow-segments", "segments-source")
else:
source = "any"
if source == "file":
local_file_path = \
resolve_url(cp.get("workflow-segments", option_name+"-file"))
pfn = os.path.join(out_dir, os.path.basename(local_file_path))
shutil.move(local_file_path, pfn)
return SegFile.from_segment_xml(pfn)
segs = {}
for ifo in workflow.ifos:
flag_str = cp.get_opt_tags("workflow-segments", option_name, [ifo])
key = ifo + ':' + name
if flag_str.upper() == "OFF":
segs[key] = segments.segmentlist([])
elif flag_str.upper() == "ON":
all_seg = segments.segment([start, end])
segs[key] = segments.segmentlist([all_seg])
else:
segs[key] = query_str(ifo, flag_str, start, end,
source=source, server=server,
veto_definer=veto_definer)
logging.info("%s: got %s flags", ifo, option_name)
return SegFile.from_segment_list_dict(name, segs,
extension='.xml',
valid_segment=workflow.analysis_time,
directory=out_dir)
def get_triggered_coherent_segment(workflow, sciencesegs):
"""
Construct the coherent network on and off source segments. Can switch to
construction of segments for a single IFO search when coherent segments
are insufficient for a search.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The workflow instance that the calculated segments belong to.
sciencesegs : dict
Dictionary of all science segments within analysis time.
Returns
--------
onsource : ligo.segments.segmentlistdict
A dictionary containing the on source segments for network IFOs
offsource : ligo.segments.segmentlistdict
A dictionary containing the off source segments for network IFOs
"""
# Load parsed workflow config options
cp = workflow.cp
triggertime = int(os.path.basename(cp.get('workflow', 'trigger-time')))
minduration = int(os.path.basename(cp.get('workflow-exttrig_segments',
'min-duration')))
maxduration = int(os.path.basename(cp.get('workflow-exttrig_segments',
'max-duration')))
onbefore = int(os.path.basename(cp.get('workflow-exttrig_segments',
'on-before')))
onafter = int(os.path.basename(cp.get('workflow-exttrig_segments',
'on-after')))
padding = int(os.path.basename(cp.get('workflow-exttrig_segments',
'pad-data')))
if cp.has_option("workflow-condition_strain", "do-gating"):
padding += int(os.path.basename(cp.get("condition_strain",
"pad-data")))
quanta = int(os.path.basename(cp.get('workflow-exttrig_segments',
'quanta')))
# Check available data segments meet criteria specified in arguments
commonsegs = sciencesegs.extract_common(sciencesegs.keys())
offsrclist = commonsegs[tuple(commonsegs.keys())[0]]
if len(offsrclist) > 1:
logging.info("Removing network segments that do not contain trigger "
"time")
for seg in offsrclist:
if triggertime in seg:
offsrc = seg
else:
offsrc = offsrclist[0]
if abs(offsrc) < minduration + 2 * padding:
fail = segments.segment([triggertime - minduration / 2. - padding,
triggertime + minduration / 2. + padding])
logging.warning("Available network segment shorter than minimum "
"allowed duration.")
return None, fail
# Will segment duration be the maximum desired length or not?
if abs(offsrc) >= maxduration + 2 * padding:
logging.info("Available network science segment duration (%ds) is "
"greater than the maximum allowed segment length (%ds). "
"Truncating..." % (abs(offsrc), maxduration))
else:
logging.info("Available network science segment duration (%ds) is "
"less than the maximum allowed segment length (%ds)."
% (abs(offsrc), maxduration))
logging.info("%ds of padding applied at beginning and end of segment."
% padding)
# Construct on-source
onstart = triggertime - onbefore
onend = triggertime + onafter
oncentre = onstart + ((onbefore + onafter) / 2)
onsrc = segments.segment(onstart, onend)
logging.info("Constructed ON-SOURCE: duration %ds (%ds before to %ds after"
" trigger)."
% (abs(onsrc), triggertime - onsrc[0],
onsrc[1] - triggertime))
onsrc = segments.segmentlist([onsrc])
# Maximal, centred coherent network segment
idealsegment = segments.segment(int(oncentre - padding -
0.5 * maxduration),
int(oncentre + padding +
0.5 * maxduration))
# Construct off-source
if (idealsegment in offsrc):
offsrc = idealsegment
elif idealsegment[1] not in offsrc:
offsrc &= segments.segment(offsrc[1] - maxduration - 2 * padding,
offsrc[1])
elif idealsegment[0] not in offsrc:
offsrc &= segments.segment(offsrc[0],
offsrc[0] + maxduration + 2 * padding)
# Trimming off-source
excess = (abs(offsrc) - 2 * padding) % quanta
if excess != 0:
logging.info("Trimming %ds excess time to make OFF-SOURCE duration a "
"multiple of %ds" % (excess, quanta))
offset = (offsrc[0] + abs(offsrc) / 2.) - oncentre
if 2 * abs(offset) > excess:
if offset < 0:
offsrc &= segments.segment(offsrc[0] + excess,
offsrc[1])
elif offset > 0:
offsrc &= segments.segment(offsrc[0],
offsrc[1] - excess)
assert abs(offsrc) % quanta == 2 * padding
else:
logging.info("This will make OFF-SOURCE symmetrical about trigger "
"time.")
start = int(offsrc[0] - offset + excess / 2)
end = int(offsrc[1] - offset - round(float(excess) / 2))
offsrc = segments.segment(start, end)
assert abs(offsrc) % quanta == 2 * padding
logging.info("Constructed OFF-SOURCE: duration %ds (%ds before to %ds "
"after trigger)."
% (abs(offsrc) - 2 * padding,
triggertime - offsrc[0] - padding,
offsrc[1] - triggertime - padding))
offsrc = segments.segmentlist([offsrc])
# Put segments into segmentlistdicts
onsource = segments.segmentlistdict()
offsource = segments.segmentlistdict()
ifos = ''
for iifo in sciencesegs.keys():
ifos += str(iifo)
onsource[iifo] = onsrc
offsource[iifo] = offsrc
return onsource, offsource
def generate_triggered_segment(workflow, out_dir, sciencesegs):
cp = workflow.cp
if cp.has_option("workflow", "allow-single-ifo-search"):
min_ifos = 1
else:
min_ifos = 2
triggertime = int(os.path.basename(cp.get('workflow', 'trigger-time')))
minbefore = int(os.path.basename(cp.get('workflow-exttrig_segments',
'min-before')))
minafter = int(os.path.basename(cp.get('workflow-exttrig_segments',
'min-after')))
minduration = int(os.path.basename(cp.get('workflow-exttrig_segments',
'min-duration')))
onbefore = int(os.path.basename(cp.get('workflow-exttrig_segments',
'on-before')))
onafter = int(os.path.basename(cp.get('workflow-exttrig_segments',
'on-after')))
padding = int(os.path.basename(cp.get('workflow-exttrig_segments',
'pad-data')))
if cp.has_option("workflow-condition_strain", "do-gating"):
padding += int(os.path.basename(cp.get("condition_strain",
"pad-data")))
# How many IFOs meet minimum data requirements?
min_seg = segments.segment(triggertime - onbefore - minbefore - padding,
triggertime + onafter + minafter + padding)
scisegs = segments.segmentlistdict({ifo: sciencesegs[ifo]
for ifo in sciencesegs.keys() if min_seg in sciencesegs[ifo]
and abs(sciencesegs[ifo]) >= minduration})
# Find highest number of IFOs that give an acceptable coherent segment
num_ifos = len(scisegs)
while num_ifos >= min_ifos:
# Consider all combinations for a given number of IFOs
ifo_combos = itertools.combinations(scisegs, num_ifos)
onsource = {}
offsource = {}
for ifo_combo in ifo_combos:
ifos = "".join(ifo_combo)
logging.info("Calculating optimal segment for %s.", ifos)
segs = segments.segmentlistdict({ifo: scisegs[ifo]
for ifo in ifo_combo})
onsource[ifos], offsource[ifos] = get_triggered_coherent_segment(\
workflow, segs)
# Which combination gives the longest coherent segment?
valid_combs = [iifos for iifos in onsource.keys()
if onsource[iifos] is not None]
if len(valid_combs) == 0:
# If none, offsource dict will contain segments showing criteria
# that have not been met, for use in plotting
if len(offsource.keys()) > 1:
seg_lens = {ifos: abs(next(offsource[ifos].values())[0])
for ifos in offsource.keys()}
best_comb = max(seg_lens.iterkeys(),
key=(lambda key: seg_lens[key]))
else:
best_comb = tuple(offsource.keys())[0]
logging.info("No combination of %d IFOs with suitable science "
"segment.", num_ifos)
else:
# Identify best analysis segment
if len(valid_combs) > 1:
seg_lens = {ifos: abs(next(offsource[ifos].values())[0])
for ifos in valid_combs}
best_comb = max(seg_lens.iterkeys(),
key=(lambda key: seg_lens[key]))
else:
best_comb = valid_combs[0]
logging.info("Calculated science segments.")
offsourceSegfile = os.path.join(out_dir, "offSourceSeg.txt")
segmentsUtils.tosegwizard(open(offsourceSegfile, "w"),
list(offsource[best_comb].values())[0])
onsourceSegfile = os.path.join(out_dir, "onSourceSeg.txt")
segmentsUtils.tosegwizard(open(onsourceSegfile, "w"),
list(onsource[best_comb].values())[0])
bufferleft = int(cp.get('workflow-exttrig_segments',
'num-buffer-before'))
bufferright = int(cp.get('workflow-exttrig_segments',
'num-buffer-after'))
onlen = onbefore + onafter
bufferSegment = segments.segment(\
triggertime - onbefore - bufferleft * onlen,
triggertime + onafter + bufferright * onlen)
bufferSegfile = os.path.join(out_dir, "bufferSeg.txt")
segmentsUtils.tosegwizard(open(bufferSegfile, "w"),
segments.segmentlist([bufferSegment]))
return onsource[best_comb], offsource[best_comb]
num_ifos -= 1
logging.warning("No suitable science segments available.")
try:
return None, offsource[best_comb]
except UnboundLocalError:
return None, min_seg
def get_flag_segments_file(workflow, name, option_name, out_dir):
"""Get segments from option name syntax for each ifo for indivudal flags.
Use syntax of configparser string to define the resulting segment_file
e.x. option_name = +up_flag1,+up_flag2,+up_flag3,-down_flag1,-down_flag2
Each ifo may have a different string and is stored separately in the file.
Each flag is stored separately in the file.
Flags which add time must precede flags which subtract time.
Parameters
----------
workflow: pycbc.workflow.Workflow
name: string
Name of the segment list being created
option_name: str
Name of option in the associated config parser to get the flag list
returns
--------
seg_file: pycbc.workflow.SegFile
SegFile intance that points to the segment xml file on disk.
"""
from pycbc.dq import query_str
make_analysis_dir(out_dir)
cp = workflow.cp
start = workflow.analysis_time[0]
end = workflow.analysis_time[1]
# Check for veto definer file
veto_definer = None
if cp.has_option("workflow-segments", "segments-veto-definer-url"):
veto_definer = save_veto_definer(workflow.cp, out_dir, [])
# Check for provided server
server = "https://segments.ligo.org"
if cp.has_option("workflow-segments", "segments-database-url"):
server = cp.get("workflow-segments", "segments-database-url")
source = "any"
if cp.has_option("workflow-segments", "segments-source"):
source = cp.get("workflow-segments", "segments-source")
if source == "file":
local_file_path = \
resolve_url(cp.get("workflow-segments", option_name+"-file"))
pfn = os.path.join(out_dir, os.path.basename(local_file_path))
shutil.move(local_file_path, pfn)
return SegFile.from_segment_xml(pfn)
segs = {}
for ifo in workflow.ifos:
if cp.has_option_tags("workflow-segments", option_name, [ifo]):
flag_str = cp.get_opt_tags("workflow-segments", option_name, [ifo])
flag_list = flag_str.split(',')
for flag in flag_list:
flag_name = flag[1:]
key = flag_name
if len(key.split(':')) > 2:
key = ':'.join(key.split(':')[:2])
segs[key] = query_str(ifo, flag, start, end,
source=source, server=server,
veto_definer=veto_definer)
logging.info("%s: got %s segments", ifo, flag_name)
else:
logging.info("%s: no segments requested", ifo)
return SegFile.from_segment_list_dict(name, segs,
extension='.xml',
valid_segment=workflow.analysis_time,
directory=out_dir)
| 19,177
| 41.429204
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/tmpltbank.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the template bank stage of CBC
workflows. For details about this module and its capabilities see here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/template_bank.html
"""
import os
import logging
import configparser as ConfigParser
import pycbc
from pycbc.workflow.core import FileList
from pycbc.workflow.core import make_analysis_dir, resolve_url_to_file
from pycbc.workflow.jobsetup import select_tmpltbank_class, sngl_ifo_job_setup
def setup_tmpltbank_workflow(workflow, science_segs, datafind_outs,
output_dir=None, psd_files=None, tags=None,
return_format=None):
'''
Setup template bank section of CBC workflow. This function is responsible
for deciding which of the various template bank workflow generation
utilities should be used.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
science_segs : Keyed dictionary of ligo.segments.segmentlist objects
scienceSegs[ifo] holds the science segments to be analysed for each
ifo.
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
output_dir : path string
The directory where data products will be placed.
psd_files : pycbc.workflow.core.FileList
The file list containing predefined PSDs, if provided.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
tmplt_banks : pycbc.workflow.core.FileList
The FileList holding the details of all the template bank jobs.
'''
if tags is None:
tags = []
logging.info("Entering template bank generation module.")
make_analysis_dir(output_dir)
cp = workflow.cp
# Parse for options in ini file
tmpltbankMethod = cp.get_opt_tags("workflow-tmpltbank", "tmpltbank-method",
tags)
# There can be a large number of different options here, for e.g. to set
# up fixed bank, or maybe something else
if tmpltbankMethod == "PREGENERATED_BANK":
logging.info("Setting template bank from pre-generated bank(s).")
tmplt_banks = setup_tmpltbank_pregenerated(workflow, tags=tags)
# Else we assume template banks will be generated in the workflow
elif tmpltbankMethod == "WORKFLOW_INDEPENDENT_IFOS":
logging.info("Adding template bank jobs to workflow.")
tmplt_banks = setup_tmpltbank_dax_generated(workflow, science_segs,
datafind_outs, output_dir, tags=tags,
psd_files=psd_files)
elif tmpltbankMethod == "WORKFLOW_INDEPENDENT_IFOS_NODATA":
logging.info("Adding template bank jobs to workflow.")
tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir,
tags=tags, independent_ifos=True,
psd_files=psd_files)
elif tmpltbankMethod == "WORKFLOW_NO_IFO_VARIATION_NODATA":
logging.info("Adding template bank jobs to workflow.")
tmplt_banks = setup_tmpltbank_without_frames(workflow, output_dir,
tags=tags, independent_ifos=False,
psd_files=psd_files)
else:
errMsg = "Template bank method not recognized. Must be either "
errMsg += "PREGENERATED_BANK, WORKFLOW_INDEPENDENT_IFOS "
errMsg += "or WORKFLOW_INDEPENDENT_IFOS_NODATA."
raise ValueError(errMsg)
# Check the format of the input template bank file and return it in
# the format requested as per return_format, provided a conversion
# between the two specific formats has been implemented. Currently,
# a conversion from xml.gz or xml to hdf is supported, but not vice
# versa. If a return_format is not specified the function returns
# the bank in the format as it was inputted.
tmplt_bank_filename=tmplt_banks[0].name
ext = tmplt_bank_filename.split('.', 1)[1]
logging.info("Input bank is a %s file", ext)
if return_format is None :
tmplt_banks_return = tmplt_banks
elif return_format in ('hdf', 'h5', 'hdf5'):
if ext in ('hdf', 'h5', 'hdf5') or ext in ('xml.gz' , 'xml'):
tmplt_banks_return = pycbc.workflow.convert_bank_to_hdf(workflow,
tmplt_banks, "bank")
else :
if ext == return_format:
tmplt_banks_return = tmplt_banks
else:
raise NotImplementedError("{0} to {1} conversion is not "
"supported.".format(ext, return_format))
logging.info("Leaving template bank generation module.")
return tmplt_banks_return
def setup_tmpltbank_dax_generated(workflow, science_segs, datafind_outs,
output_dir, tags=None,
psd_files=None):
'''
Setup template bank jobs that are generated as part of the CBC workflow.
This function will add numerous jobs to the CBC workflow using
configuration options from the .ini file. The following executables are
currently supported:
* lalapps_tmpltbank
* pycbc_geom_nonspin_bank
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
science_segs : Keyed dictionary of ligo.segments.segmentlist objects
scienceSegs[ifo] holds the science segments to be analysed for each
ifo.
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
output_dir : path string
The directory where data products will be placed.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
psd_file : pycbc.workflow.core.FileList
The file list containing predefined PSDs, if provided.
Returns
--------
tmplt_banks : pycbc.workflow.core.FileList
The FileList holding the details of all the template bank jobs.
'''
if tags is None:
tags = []
cp = workflow.cp
# Need to get the exe to figure out what sections are analysed, what is
# discarded etc. This should *not* be hardcoded, so using a new executable
# will require a bit of effort here ....
ifos = science_segs.keys()
tmplt_bank_exe = os.path.basename(cp.get('executables', 'tmpltbank'))
# Select the appropriate class
exe_class = select_tmpltbank_class(tmplt_bank_exe)
# Set up class for holding the banks
tmplt_banks = FileList([])
for ifo in ifos:
job_instance = exe_class(workflow.cp, 'tmpltbank', ifo=ifo,
out_dir=output_dir,
tags=tags)
# Check for the write_psd flag
if cp.has_option_tags("workflow-tmpltbank", "tmpltbank-write-psd-file", tags):
job_instance.write_psd = True
else:
job_instance.write_psd = False
sngl_ifo_job_setup(workflow, ifo, tmplt_banks, job_instance,
science_segs[ifo], datafind_outs,
allow_overlap=True)
return tmplt_banks
def setup_tmpltbank_without_frames(workflow, output_dir,
tags=None, independent_ifos=False,
psd_files=None):
'''
Setup CBC workflow to use a template bank (or banks) that are generated in
the workflow, but do not use the data to estimate a PSD, and therefore do
not vary over the duration of the workflow. This can either generate one
bank that is valid for all ifos at all times, or multiple banks that are
valid only for a single ifo at all times (one bank per ifo).
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
output_dir : path string
The directory where the template bank outputs will be placed.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
independent_ifos : Boolean, optional (default=False)
If given this will produce one template bank per ifo. If not given
there will be on template bank to cover all ifos.
psd_file : pycbc.workflow.core.FileList
The file list containing predefined PSDs, if provided.
Returns
--------
tmplt_banks : pycbc.workflow.core.FileList
The FileList holding the details of the template bank(s).
'''
if tags is None:
tags = []
cp = workflow.cp
# Need to get the exe to figure out what sections are analysed, what is
# discarded etc. This should *not* be hardcoded, so using a new executable
# will require a bit of effort here ....
ifos = workflow.ifos
fullSegment = workflow.analysis_time
tmplt_bank_exe = os.path.basename(cp.get('executables','tmpltbank'))
# Can not use lalapps_template bank with this
if tmplt_bank_exe == 'lalapps_tmpltbank':
errMsg = "Lalapps_tmpltbank cannot be used to generate template banks "
errMsg += "without using frames. Try another code."
raise ValueError(errMsg)
# Select the appropriate class
exe_instance = select_tmpltbank_class(tmplt_bank_exe)
tmplt_banks = FileList([])
# Make the distinction between one bank for all ifos and one bank per ifo
if independent_ifos:
ifoList = [ifo for ifo in ifos]
else:
ifoList = [[ifo for ifo in ifos]]
# Check for the write_psd flag
if cp.has_option_tags("workflow-tmpltbank", "tmpltbank-write-psd-file", tags):
exe_instance.write_psd = True
else:
exe_instance.write_psd = False
for ifo in ifoList:
job_instance = exe_instance(workflow.cp, 'tmpltbank', ifo=ifo,
out_dir=output_dir,
tags=tags,
psd_files=psd_files)
node = job_instance.create_nodata_node(fullSegment)
workflow.add_node(node)
tmplt_banks += node.output_files
return tmplt_banks
def setup_tmpltbank_pregenerated(workflow, tags=None):
'''
Setup CBC workflow to use a pregenerated template bank.
The bank given in cp.get('workflow','pregenerated-template-bank') will be used
as the input file for all matched-filtering jobs. If this option is
present, workflow will assume that it should be used and not generate
template banks within the workflow.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
An instanced class that manages the constructed workflow.
tags : list of strings
If given these tags are used to uniquely name and identify output files
that would be produced in multiple calls to this function.
Returns
--------
tmplt_banks : pycbc.workflow.core.FileList
The FileList holding the details of the template bank.
'''
if tags is None:
tags = []
# Currently this uses the *same* fixed bank for all ifos.
# Maybe we want to add capability to analyse separate banks in all ifos?
# Set up class for holding the banks
tmplt_banks = FileList([])
cp = workflow.cp
global_seg = workflow.analysis_time
file_attrs = {'segs' : global_seg, 'tags' : tags}
try:
# First check if we have a bank for all ifos
pre_gen_bank = cp.get_opt_tags('workflow-tmpltbank',
'tmpltbank-pregenerated-bank', tags)
file_attrs['ifos'] = workflow.ifos
curr_file = resolve_url_to_file(pre_gen_bank, attrs=file_attrs)
tmplt_banks.append(curr_file)
except ConfigParser.Error:
# Okay then I must have banks for each ifo
for ifo in workflow.ifos:
try:
pre_gen_bank = cp.get_opt_tags('workflow-tmpltbank',
'tmpltbank-pregenerated-bank-%s' % ifo.lower(),
tags)
file_attrs['ifos'] = [ifo]
curr_file = resolve_url_to_file(pre_gen_bank, attrs=file_attrs)
tmplt_banks.append(curr_file)
except ConfigParser.Error:
err_msg = "Cannot find pregerated template bank in section "
err_msg += "[workflow-tmpltbank] or any tagged sections. "
if tags:
tagged_secs = " ".join("[workflow-tmpltbank-%s]" \
%(ifo,) for ifo in workflow.ifos)
err_msg += "Tagged sections are %s. " %(tagged_secs,)
err_msg += "I looked for 'tmpltbank-pregenerated-bank' option "
err_msg += "and 'tmpltbank-pregenerated-bank-%s'." %(ifo,)
raise ConfigParser.Error(err_msg)
return tmplt_banks
| 14,333
| 41.916168
| 86
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/splittable.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the splitting output files stage of
workflows. For details about this module and its capabilities see here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
"""
import os
import logging
from pycbc.workflow.core import FileList, make_analysis_dir
from pycbc.workflow.jobsetup import (PycbcSplitBankExecutable,
PycbcSplitBankXmlExecutable, PycbcSplitInspinjExecutable,
PycbcHDFSplitInjExecutable)
def select_splitfilejob_instance(curr_exe):
"""
This function returns an instance of the class that is appropriate for
splitting an output file up within workflow (for e.g. splitbank).
Parameters
----------
curr_exe : string
The name of the Executable that is being used.
curr_section : string
The name of the section storing options for this executble
Returns
--------
exe class : sub-class of pycbc.workflow.core.Executable
The class that holds the utility functions appropriate
for the given Executable. This class **must** contain
* exe_class.create_job()
and the job returned by this **must** contain
* job.create_node()
"""
if curr_exe == 'pycbc_hdf5_splitbank':
exe_class = PycbcSplitBankExecutable
elif curr_exe == 'pycbc_splitbank':
exe_class = PycbcSplitBankXmlExecutable
elif curr_exe == 'pycbc_split_inspinj':
exe_class = PycbcSplitInspinjExecutable
elif curr_exe == 'pycbc_hdf_splitinj':
exe_class = PycbcHDFSplitInjExecutable
else:
# Should we try some sort of default class??
err_string = "No class exists for Executable %s" %(curr_exe,)
raise NotImplementedError(err_string)
return exe_class
def setup_splittable_workflow(workflow, input_tables, out_dir=None, tags=None):
'''
This function aims to be the gateway for code that is responsible for taking
some input file containing some table, and splitting into multiple files
containing different parts of that table. For now the only supported operation
is using lalapps_splitbank to split a template bank xml file into multiple
template bank xml files.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the jobs will be added to.
input_tables : pycbc.workflow.core.FileList
The input files to be split up.
out_dir : path
The directory in which output will be written.
Returns
--------
split_table_outs : pycbc.workflow.core.FileList
The list of split up files as output from this job.
'''
if tags is None:
tags = []
logging.info("Entering split output files module.")
make_analysis_dir(out_dir)
# Parse for options in .ini file
splitMethod = workflow.cp.get_opt_tags("workflow-splittable",
"splittable-method", tags)
if splitMethod == "IN_WORKFLOW":
# Scope here for choosing different options
logging.info("Adding split output file jobs to workflow.")
split_table_outs = setup_splittable_dax_generated(workflow,
input_tables, out_dir, tags)
elif splitMethod == "NOOP":
# Probably better not to call the module at all, but this option will
# return the input file list.
split_table_outs = input_tables
else:
errMsg = "Splittable method not recognized. Must be one of "
errMsg += "IN_WORKFLOW or NOOP."
raise ValueError(errMsg)
logging.info("Leaving split output files module.")
return split_table_outs
def setup_splittable_dax_generated(workflow, input_tables, out_dir, tags):
'''
Function for setting up the splitting jobs as part of the workflow.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the jobs will be added to.
input_tables : pycbc.workflow.core.FileList
The input files to be split up.
out_dir : path
The directory in which output will be written.
Returns
--------
split_table_outs : pycbc.workflow.core.FileList
The list of split up files as output from this job.
'''
cp = workflow.cp
# Get values from ini file
try:
num_splits = cp.get_opt_tags("workflow-splittable",
"splittable-num-banks", tags)
except BaseException:
inj_interval = int(cp.get_opt_tags("workflow-splittable",
"splitinjtable-interval", tags))
if cp.has_option_tags("em_bright_filter", "max-keep", tags) and \
cp.has_option("workflow-injections", "em-bright-only"):
num_injs = int(cp.get_opt_tags("em_bright_filter", "max-keep",
tags))
else:
# This needed to be changed from num-injs to ninjections in order
# to work properly with pycbc_create_injections
num_injs = int(cp.get_opt_tags("workflow-injections",
"ninjections", tags))
inj_tspace = float(abs(workflow.analysis_time)) / num_injs
num_splits = int(inj_interval // inj_tspace) + 1
split_exe_tag = cp.get_opt_tags("workflow-splittable",
"splittable-exe-tag", tags)
split_exe = os.path.basename(cp.get("executables", split_exe_tag))
# Select the appropriate class
exe_class = select_splitfilejob_instance(split_exe)
# Set up output structure
out_file_groups = FileList([])
# Set up the condorJob class for the current executable
curr_exe_job = exe_class(workflow.cp, split_exe_tag, num_splits,
out_dir=out_dir)
for input in input_tables:
node = curr_exe_job.create_node(input, tags=tags)
workflow.add_node(node)
out_file_groups += node.output_files
return out_file_groups
| 6,964
| 37.910615
| 82
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/jobsetup.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This library code contains functions and classes that are used to set up
and add jobs/nodes to a pycbc workflow. For details about pycbc.workflow see:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope.html
"""
import math, os
import lal
from ligo import segments
from pycbc.workflow.core import Executable, File, FileList, Node
def int_gps_time_to_str(t):
"""Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and
converts it to a string. If a LIGOTimeGPS with nonzero decimal part is
given, raises a ValueError."""
int_t = int(t)
if abs(float(t - int_t)) > 0.:
raise ValueError('Need an integer GPS time, got %s' % str(t))
return str(int_t)
def select_tmpltbank_class(curr_exe):
""" This function returns a class that is appropriate for setting up
template bank jobs within workflow.
Parameters
----------
curr_exe : string
The name of the executable to be used for generating template banks.
Returns
--------
exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
functions appropriate for the given executable. Instances of the class
('jobs') **must** have methods
* job.create_node()
and
* job.get_valid_times(ifo, )
"""
exe_to_class_map = {
'pycbc_geom_nonspinbank' : PyCBCTmpltbankExecutable,
'pycbc_aligned_stoch_bank': PyCBCTmpltbankExecutable
}
try:
return exe_to_class_map[curr_exe]
except KeyError:
raise NotImplementedError(
"No job class exists for executable %s, exiting" % curr_exe)
def select_matchedfilter_class(curr_exe):
""" This function returns a class that is appropriate for setting up
matched-filtering jobs within workflow.
Parameters
----------
curr_exe : string
The name of the matched filter executable to be used.
Returns
--------
exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
functions appropriate for the given executable. Instances of the class
('jobs') **must** have methods
* job.create_node()
and
* job.get_valid_times(ifo, )
"""
exe_to_class_map = {
'pycbc_inspiral' : PyCBCInspiralExecutable,
'pycbc_inspiral_skymax' : PyCBCInspiralExecutable,
'pycbc_multi_inspiral' : PyCBCMultiInspiralExecutable,
}
try:
return exe_to_class_map[curr_exe]
except KeyError:
# also conceivable to introduce a default class??
raise NotImplementedError(
"No job class exists for executable %s, exiting" % curr_exe)
def select_generic_executable(workflow, exe_tag):
""" Returns a class that is appropriate for setting up jobs to run executables
having specific tags in the workflow config.
Executables should not be "specialized" jobs fitting into one of the
select_XXX_class functions above, i.e. not a matched filter or template
bank job, which require extra setup.
Parameters
----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance.
exe_tag : string
The name of the config section storing options for this executable and
the option giving the executable path in the [executables] section.
Returns
--------
exe_class : Sub-class of pycbc.workflow.core.Executable that holds utility
functions appropriate for the given executable. Instances of the class
('jobs') **must** have a method job.create_node()
"""
exe_path = workflow.cp.get("executables", exe_tag)
exe_name = os.path.basename(exe_path)
exe_to_class_map = {
'ligolw_add' : LigolwAddExecutable,
'lalapps_inspinj' : LalappsInspinjExecutable,
'pycbc_create_injections' : PycbcCreateInjectionsExecutable,
'pycbc_condition_strain' : PycbcConditionStrainExecutable
}
try:
return exe_to_class_map[exe_name]
except KeyError:
# Should we try some sort of default class??
raise NotImplementedError(
"No job class exists for executable %s, exiting" % exe_name)
def sngl_ifo_job_setup(workflow, ifo, out_files, curr_exe_job, science_segs,
datafind_outs, parents=None,
allow_overlap=True):
""" This function sets up a set of single ifo jobs. A basic overview of how this
works is as follows:
* (1) Identify the length of data that each job needs to read in, and what
part of that data the job is valid for.
* START LOOPING OVER SCIENCE SEGMENTS
* (2) Identify how many jobs are needed (if any) to cover the given science
segment and the time shift between jobs. If no jobs continue.
* START LOOPING OVER JOBS
* (3) Identify the time that the given job should produce valid output (ie.
inspiral triggers) over.
* (4) Identify the data range that the job will need to read in to produce
the aforementioned valid output.
* (5) Identify all parents/inputs of the job.
* (6) Add the job to the workflow
* END LOOPING OVER JOBS
* END LOOPING OVER SCIENCE SEGMENTS
Parameters
-----------
workflow: pycbc.workflow.core.Workflow
An instance of the Workflow class that manages the constructed workflow.
ifo : string
The name of the ifo to set up the jobs for
out_files : pycbc.workflow.core.FileList
The FileList containing the list of jobs. Jobs will be appended
to this list, and it does not need to be empty when supplied.
curr_exe_job : Job
An instanced of the Job class that has a get_valid times method.
science_segs : ligo.segments.segmentlist
The list of times that the jobs should cover
datafind_outs : pycbc.workflow.core.FileList
The file list containing the datafind files.
parents : pycbc.workflow.core.FileList (optional, kwarg, default=None)
The FileList containing the list of jobs that are parents to
the one being set up.
allow_overlap : boolean (optional, kwarg, default = True)
If this is set the times that jobs are valid for will be allowed to
overlap. This may be desired for template banks which may have some
overlap in the times they cover. This may not be desired for inspiral
jobs, where you probably want triggers recorded by jobs to not overlap
at all.
Returns
--------
out_files : pycbc.workflow.core.FileList
A list of the files that will be generated by this step in the
workflow.
"""
########### (1) ############
# Get the times that can be analysed and needed data lengths
data_length, valid_chunk, valid_length = identify_needed_data(curr_exe_job)
# Loop over science segments and set up jobs
for curr_seg in science_segs:
########### (2) ############
# Initialize the class that identifies how many jobs are needed and the
# shift between them.
segmenter = JobSegmenter(data_length, valid_chunk, valid_length,
curr_seg, curr_exe_job)
for job_num in range(segmenter.num_jobs):
############## (3) #############
# Figure out over what times this job will be valid for
job_valid_seg = segmenter.get_valid_times_for_job(job_num,
allow_overlap=allow_overlap)
############## (4) #############
# Get the data that this job should read in
job_data_seg = segmenter.get_data_times_for_job(job_num)
############# (5) ############
# Identify parents/inputs to the job
if parents:
# Find the set of files with the best overlap
curr_parent = parents.find_outputs_in_range(ifo, job_valid_seg,
useSplitLists=True)
if not curr_parent:
err_string = ("No parent jobs found overlapping %d to %d."
%(job_valid_seg[0], job_valid_seg[1]))
err_string += "\nThis is a bad error! Contact a developer."
raise ValueError(err_string)
else:
curr_parent = [None]
curr_dfouts = None
if datafind_outs:
curr_dfouts = datafind_outs.find_all_output_in_range(ifo,
job_data_seg, useSplitLists=True)
if not curr_dfouts:
err_str = ("No datafind jobs found overlapping %d to %d."
%(job_data_seg[0],job_data_seg[1]))
err_str += "\nThis shouldn't happen. Contact a developer."
raise ValueError(err_str)
############## (6) #############
# Make node and add to workflow
# Note if I have more than one curr_parent I need to make more than
# one job. If there are no curr_parents it is set to [None] and I
# make a single job. This catches the case of a split template bank
# where I run a number of jobs to cover a single range of time.
# Sort parent jobs to ensure predictable order
sorted_parents = sorted(curr_parent,
key=lambda fobj: fobj.tagged_description)
for pnum, parent in enumerate(sorted_parents):
if len(curr_parent) != 1:
tag = ["JOB%d" %(pnum,)]
else:
tag = []
# To ensure output file uniqueness I add a tag
# We should generate unique names automatically, but it is a
# pain until we can set the output names for all Executables
node = curr_exe_job.create_node(job_data_seg, job_valid_seg,
parent=parent,
df_parents=curr_dfouts,
tags=tag)
workflow.add_node(node)
curr_out_files = node.output_files
# FIXME: Here we remove PSD files if they are coming through.
# This should be done in a better way. On to-do list.
curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\
not in i.tags]
out_files += curr_out_files
return out_files
def multi_ifo_coherent_job_setup(workflow, out_files, curr_exe_job,
science_segs, datafind_outs, output_dir,
parents=None, slide_dict=None, tags=None):
"""
Method for setting up coherent inspiral jobs.
"""
if tags is None:
tags = []
data_seg, job_valid_seg = curr_exe_job.get_valid_times()
curr_out_files = FileList([])
if 'IPN' in datafind_outs[-1].description \
and 'bank_veto_bank' in datafind_outs[-2].description:
# FIXME: This looks like a really nasty hack for the GRB code.
# This should be fixed properly to avoid strange behaviour!
ipn_sky_points = datafind_outs[-1]
bank_veto = datafind_outs[-2]
frame_files = datafind_outs[:-2]
else:
ipn_sky_points = None
if 'bank_veto_bank' in datafind_outs[-1].name:
bank_veto = datafind_outs[-1]
frame_files = datafind_outs[:-1]
else:
bank_veto = None
frame_files = datafind_outs
split_bank_counter = 0
if curr_exe_job.injection_file is None:
for split_bank in parents:
tag = list(tags)
tag.append(split_bank.tag_str)
node = curr_exe_job.create_node(data_seg, job_valid_seg,
parent=split_bank, dfParents=frame_files,
bankVetoBank=bank_veto, ipn_file=ipn_sky_points,
slide=slide_dict, tags=tag)
workflow.add_node(node)
split_bank_counter += 1
curr_out_files.extend(node.output_files)
else:
for inj_file in curr_exe_job.injection_file:
for split_bank in parents:
tag = list(tags)
tag.append(inj_file.tag_str)
tag.append(split_bank.tag_str)
node = curr_exe_job.create_node(data_seg, job_valid_seg,
parent=split_bank, inj_file=inj_file, tags=tag,
dfParents=frame_files, bankVetoBank=bank_veto,
ipn_file=ipn_sky_points)
workflow.add_node(node)
split_bank_counter += 1
curr_out_files.extend(node.output_files)
# FIXME: Here we remove PSD files if they are coming
# through. This should be done in a better way. On
# to-do list.
# IWHNOTE: This will not be needed when coh_PTF is retired, but it is
# okay to do this. It just means you can't access these files
# later.
curr_out_files = [i for i in curr_out_files if 'PSD_FILE'\
not in i.tags]
out_files += curr_out_files
return out_files
def identify_needed_data(curr_exe_job):
""" This function will identify the length of data that a specific
executable needs to analyse and what part of that data is valid (ie.
inspiral doesn't analyse the first or last 8s of data it reads in).
Parameters
-----------
curr_exe_job : Job
An instance of the Job class that has a get_valid times method.
Returns
--------
dataLength : float
The amount of data (in seconds) that each instance of the job must read
in.
valid_chunk : ligo.segments.segment
The times within dataLength for which that jobs output **can** be
valid (ie. for inspiral this is (72, dataLength-72) as, for a standard
setup the inspiral job cannot look for triggers in the first 72 or
last 72 seconds of data read in.)
valid_length : float
The maximum length of data each job can be valid for. This is
abs(valid_segment).
"""
# Set up the condorJob class for the current executable
data_lengths, valid_chunks = curr_exe_job.get_valid_times()
# Begin by getting analysis start and end, and start and end of time
# that the output file is valid for
valid_lengths = [abs(valid_chunk) for valid_chunk in valid_chunks]
return data_lengths, valid_chunks, valid_lengths
class JobSegmenter(object):
""" This class is used when running sngl_ifo_job_setup to determine what times
should be analysed be each job and what data is needed.
"""
def __init__(self, data_lengths, valid_chunks, valid_lengths, curr_seg,
curr_exe_class):
""" Initialize class. """
self.exe_class = curr_exe_class
self.curr_seg = curr_seg
self.curr_seg_length = float(abs(curr_seg))
self.data_length, self.valid_chunk, self.valid_length = \
self.pick_tile_size(self.curr_seg_length, data_lengths,
valid_chunks, valid_lengths)
self.data_chunk = segments.segment([0, self.data_length])
self.data_loss = self.data_length - abs(self.valid_chunk)
if self.data_loss < 0:
raise ValueError("pycbc.workflow.jobsetup needs fixing! Please contact a developer")
if self.curr_seg_length < self.data_length:
self.num_jobs = 0
return
# How many jobs do we need
self.num_jobs = int( math.ceil( (self.curr_seg_length \
- self.data_loss) / float(self.valid_length) ))
if self.curr_seg_length == self.data_length:
# If the segment length is identical to the data length then I
# will have exactly 1 job!
self.job_time_shift = 0
else:
# What is the incremental shift between jobs
self.job_time_shift = (self.curr_seg_length - self.data_length) / \
float(self.num_jobs - 1)
def pick_tile_size(self, seg_size, data_lengths, valid_chunks, valid_lengths):
""" Choose job tiles size based on science segment length """
if len(valid_lengths) == 1:
return data_lengths[0], valid_chunks[0], valid_lengths[0]
else:
# Pick the tile size that is closest to 1/3 of the science segment
target_size = seg_size / 3
pick, pick_diff = 0, abs(valid_lengths[0] - target_size)
for i, size in enumerate(valid_lengths):
if abs(size - target_size) < pick_diff:
pick, pick_diff = i, abs(size - target_size)
return data_lengths[pick], valid_chunks[pick], valid_lengths[pick]
def get_valid_times_for_job(self, num_job, allow_overlap=True):
""" Get the times for which this job is valid. """
# small factor of 0.0001 to avoid float round offs causing us to
# miss a second at end of segments.
shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job\
+ 0.0001)
job_valid_seg = self.valid_chunk.shift(shift_dur)
# If we need to recalculate the valid times to avoid overlap
if not allow_overlap:
data_per_job = (self.curr_seg_length - self.data_loss) / \
float(self.num_jobs)
lower_boundary = num_job*data_per_job + \
self.valid_chunk[0] + self.curr_seg[0]
upper_boundary = data_per_job + lower_boundary
# NOTE: Convert to int after calculating both boundaries
# small factor of 0.0001 to avoid float round offs causing us to
# miss a second at end of segments.
lower_boundary = int(lower_boundary)
upper_boundary = int(upper_boundary + 0.0001)
if lower_boundary < job_valid_seg[0] or \
upper_boundary > job_valid_seg[1]:
err_msg = ("Workflow is attempting to generate output "
"from a job at times where it is not valid.")
raise ValueError(err_msg)
job_valid_seg = segments.segment([lower_boundary,
upper_boundary])
return job_valid_seg
def get_data_times_for_job(self, num_job):
""" Get the data that this job will read in. """
# small factor of 0.0001 to avoid float round offs causing us to
# miss a second at end of segments.
shift_dur = self.curr_seg[0] + int(self.job_time_shift * num_job\
+ 0.0001)
job_data_seg = self.data_chunk.shift(shift_dur)
# Sanity check that all data is used
if num_job == 0:
if job_data_seg[0] != self.curr_seg[0]:
err= "Job is not using data from the start of the "
err += "science segment. It should be using all data."
raise ValueError(err)
if num_job == (self.num_jobs - 1):
if job_data_seg[1] != self.curr_seg[1]:
err = "Job is not using data from the end of the "
err += "science segment. It should be using all data."
raise ValueError(err)
if hasattr(self.exe_class, 'zero_pad_data_extend'):
job_data_seg = self.exe_class.zero_pad_data_extend(job_data_seg,
self.curr_seg)
return job_data_seg
class PyCBCInspiralExecutable(Executable):
""" The class used to create jobs for pycbc_inspiral Executable. """
current_retention_level = Executable.ALL_TRIGGERS
time_dependent_options = ['--channel-name']
def __init__(self, cp, exe_name, ifo=None, out_dir=None,
injection_file=None, tags=None, reuse_executable=False):
if tags is None:
tags = []
super().__init__(cp, exe_name, ifo, out_dir, tags=tags,
reuse_executable=reuse_executable,
set_submit_subdir=False)
self.cp = cp
self.injection_file = injection_file
self.ext = '.hdf'
self.num_threads = 1
if self.get_opt('processing-scheme') is not None:
stxt = self.get_opt('processing-scheme')
if len(stxt.split(':')) > 1:
self.num_threads = stxt.split(':')[1]
def create_node(self, data_seg, valid_seg, parent=None, df_parents=None,
tags=None):
if tags is None:
tags = []
node = Node(self, valid_seg=valid_seg)
if not self.has_opt('pad-data'):
raise ValueError("The option pad-data is a required option of "
"%s. Please check the ini file." % self.name)
pad_data = int(self.get_opt('pad-data'))
# set remaining options flags
node.add_opt('--gps-start-time',
int_gps_time_to_str(data_seg[0] + pad_data))
node.add_opt('--gps-end-time',
int_gps_time_to_str(data_seg[1] - pad_data))
node.add_opt('--trig-start-time', int_gps_time_to_str(valid_seg[0]))
node.add_opt('--trig-end-time', int_gps_time_to_str(valid_seg[1]))
if self.injection_file is not None:
node.add_input_opt('--injection-file', self.injection_file)
# set the input and output files
fil = node.new_output_file_opt(
valid_seg,
self.ext,
'--output',
tags=tags,
store_file=self.retain_files,
use_tmp_subdirs=True
)
# For inspiral jobs we overrwrite the "relative.submit.dir"
# attribute to avoid too many files in one sub-directory
curr_rel_dir = fil.name.split('/')[0]
node.add_profile('pegasus', 'relative.submit.dir',
self.pegasus_name + '_' + curr_rel_dir)
# Must ensure this is not a LIGOGPS as JSON won't understand it
data_seg = segments.segment([int(data_seg[0]), int(data_seg[1])])
fil.add_metadata('data_seg', data_seg)
node.add_input_opt('--bank-file', parent)
if df_parents is not None:
node.add_input_list_opt('--frame-files', df_parents)
return node
def get_valid_times(self):
""" Determine possible dimensions of needed input and valid output
"""
if self.cp.has_option('workflow-matchedfilter',
'min-analysis-segments'):
min_analysis_segs = int(self.cp.get('workflow-matchedfilter',
'min-analysis-segments'))
else:
min_analysis_segs = 0
if self.cp.has_option('workflow-matchedfilter',
'max-analysis-segments'):
max_analysis_segs = int(self.cp.get('workflow-matchedfilter',
'max-analysis-segments'))
else:
# Choose ridiculously large default value
max_analysis_segs = 1000
if self.cp.has_option('workflow-matchedfilter', 'min-analysis-length'):
min_analysis_length = int(self.cp.get('workflow-matchedfilter',
'min-analysis-length'))
else:
min_analysis_length = 0
if self.cp.has_option('workflow-matchedfilter', 'max-analysis-length'):
max_analysis_length = int(self.cp.get('workflow-matchedfilter',
'max-analysis-length'))
else:
# Choose a ridiculously large default value
max_analysis_length = 100000
segment_length = int(self.get_opt('segment-length'))
pad_data = 0
if self.has_opt('pad-data'):
pad_data += int(self.get_opt('pad-data'))
# NOTE: Currently the tapered data is ignored as it is short and
# will lie within the segment start/end pad. This means that
# the tapered data *will* be used for PSD estimation (but this
# effect should be small). It will also be in the data segments
# used for SNR generation (when in the middle of a data segment
# where zero-padding is not being used) but the templates should
# not be long enough to use this data assuming segment start/end
# pad take normal values. When using zero-padding this data will
# be used for SNR generation.
#if self.has_opt('taper-data'):
# pad_data += int(self.get_opt( 'taper-data' ))
if self.has_opt('allow-zero-padding'):
self.zero_padding=True
else:
self.zero_padding=False
start_pad = int(self.get_opt( 'segment-start-pad'))
end_pad = int(self.get_opt('segment-end-pad'))
seg_ranges = range(min_analysis_segs, max_analysis_segs + 1)
data_lengths = []
valid_regions = []
for nsegs in seg_ranges:
analysis_length = (segment_length - start_pad - end_pad) * nsegs
if not self.zero_padding:
data_length = analysis_length + pad_data * 2 \
+ start_pad + end_pad
start = pad_data + start_pad
end = data_length - pad_data - end_pad
else:
data_length = analysis_length + pad_data * 2
start = pad_data
end = data_length - pad_data
if data_length > max_analysis_length: continue
if data_length < min_analysis_length: continue
data_lengths += [data_length]
valid_regions += [segments.segment(start, end)]
# If min_analysis_length is given, ensure that it is added as an option
# for job analysis length.
if min_analysis_length:
data_length = min_analysis_length
if not self.zero_padding:
start = pad_data + start_pad
end = data_length - pad_data - end_pad
else:
start = pad_data
end = data_length - pad_data
if end > start:
data_lengths += [data_length]
valid_regions += [segments.segment(start, end)]
return data_lengths, valid_regions
def zero_pad_data_extend(self, job_data_seg, curr_seg):
"""When using zero padding, *all* data is analysable, but the setup
functions must include the padding data where it is available so that
we are not zero-padding in the middle of science segments. This
function takes a job_data_seg, that is chosen for a particular node
and extends it with segment-start-pad and segment-end-pad if that
data is available.
"""
if self.zero_padding is False:
return job_data_seg
else:
start_pad = int(self.get_opt( 'segment-start-pad'))
end_pad = int(self.get_opt('segment-end-pad'))
new_data_start = max(curr_seg[0], job_data_seg[0] - start_pad)
new_data_end = min(curr_seg[1], job_data_seg[1] + end_pad)
new_data_seg = segments.segment([new_data_start, new_data_end])
return new_data_seg
# FIXME: This is probably misnamed, this is really GRBInspiralExectuable.
# There's nothing coherent here, it's just that data segment stuff is
# very different between GRB and all-sky/all-time
class PyCBCMultiInspiralExecutable(Executable):
"""
The class responsible for setting up jobs for the
pycbc_multi_inspiral executable.
"""
current_retention_level = Executable.ALL_TRIGGERS
# bank-veto-bank-file is a file input option for pycbc_multi_inspiral
file_input_options = Executable.file_input_options + \
['--bank-veto-bank-file']
def __init__(self, cp, name, ifo=None, injection_file=None,
gate_files=None, out_dir=None, tags=None):
if tags is None:
tags = []
super().__init__(cp, name, ifo, out_dir=out_dir, tags=tags)
self.injection_file = injection_file
self.data_seg = segments.segment(int(cp.get('workflow', 'start-time')),
int(cp.get('workflow', 'end-time')))
self.num_threads = 1
def create_node(self, data_seg, valid_seg, parent=None, inj_file=None,
dfParents=None, bankVetoBank=None, ipn_file=None,
slide=None, tags=None):
if tags is None:
tags = []
node = Node(self)
if not dfParents:
raise ValueError("%s must be supplied with frame or cache files"
% self.name)
# If doing single IFO search, make sure slides are disabled
if len(self.ifo_list) < 2 and \
(node.get_opt('--do-short-slides') is not None or \
node.get_opt('--short-slide-offset') is not None):
raise ValueError("Cannot run with time slides in a single IFO "
"configuration! Please edit your configuration "
"file accordingly.")
# Set instuments
node.add_opt("--instruments", " ".join(self.ifo_list))
pad_data = self.get_opt('pad-data')
if pad_data is None:
raise ValueError("The option pad-data is a required option of "
"%s. Please check the ini file." % self.name)
# Feed in bank_veto_bank.xml, if given
if self.cp.has_option('workflow-inspiral', 'bank-veto-bank-file'):
node.add_input_opt('--bank-veto-bank-file', bankVetoBank)
# Set time options
node.add_opt('--gps-start-time', data_seg[0] + int(pad_data))
node.add_opt('--gps-end-time', data_seg[1] - int(pad_data))
node.add_opt('--trig-start-time', valid_seg[0])
node.add_opt('--trig-end-time', valid_seg[1])
node.add_opt('--trigger-time', self.cp.get('workflow', 'trigger-time'))
# Set the input and output files
node.new_output_file_opt(data_seg, '.hdf', '--output',
tags=tags, store_file=self.retain_files)
node.add_input_opt('--bank-file', parent, )
if dfParents is not None:
frame_arg = '--frame-files'
for frame_file in dfParents:
frame_arg += f" {frame_file.ifo}:{frame_file.name}"
node.add_input(frame_file)
node.add_arg(frame_arg)
if ipn_file is not None:
node.add_input_opt('--sky-positions-file', ipn_file)
if inj_file is not None:
if self.get_opt('--do-short-slides') is not None or \
self.get_opt('--short-slide-offset') is not None:
raise ValueError("Cannot run with short slides in an "
"injection job. Please edit your "
"configuration file accordingly.")
node.add_input_opt('--injection-file', inj_file)
if slide is not None:
for ifo in self.ifo_list:
node.add_opt('--%s-slide-segment' % ifo.lower(), slide[ifo])
# Channels
channel_names = {}
for ifo in self.ifo_list:
channel_names[ifo] = self.cp.get_opt_tags(
"workflow", "%s-channel-name" % ifo.lower(), "")
channel_names_str = \
" ".join([val for key, val in channel_names.items()])
node.add_opt("--channel-name", channel_names_str)
return node
def get_valid_times(self):
pad_data = int(self.get_opt('pad-data'))
if self.has_opt("segment-start-pad"):
pad_data = int(self.get_opt("pad-data"))
start_pad = int(self.get_opt("segment-start-pad"))
end_pad = int(self.get_opt("segment-end-pad"))
valid_start = self.data_seg[0] + pad_data + start_pad
valid_end = self.data_seg[1] - pad_data - end_pad
elif self.has_opt('analyse-segment-end'):
safety = 1
deadtime = int(self.get_opt('segment-length')) / 2
spec_len = int(self.get_opt('inverse-spec-length')) / 2
valid_start = (self.data_seg[0] + deadtime - spec_len + pad_data -
safety)
valid_end = self.data_seg[1] - spec_len - pad_data - safety
else:
overlap = int(self.get_opt('segment-length')) / 4
valid_start = self.data_seg[0] + overlap + pad_data
valid_end = self.data_seg[1] - overlap - pad_data
return self.data_seg, segments.segment(valid_start, valid_end)
class PyCBCTmpltbankExecutable(Executable):
""" The class used to create jobs for pycbc_geom_nonspin_bank Executable and
any other Executables using the same command line option groups.
"""
current_retention_level = Executable.MERGED_TRIGGERS
def __init__(self, cp, exe_name, ifo=None, out_dir=None,
tags=None, write_psd=False, psd_files=None):
if tags is None:
tags = []
super().__init__(cp, exe_name, ifo, out_dir, tags=tags)
self.cp = cp
self.write_psd = write_psd
self.psd_files = psd_files
def create_node(self, data_seg, valid_seg, parent=None, df_parents=None, tags=None):
if tags is None:
tags = []
node = Node(self)
if not df_parents:
raise ValueError("%s must be supplied with data file(s)"
% self.name)
pad_data = int(self.get_opt('pad-data'))
if pad_data is None:
raise ValueError("The option pad-data is a required option of "
"%s. Please check the ini file." % self.name)
# set the remaining option flags
node.add_opt('--gps-start-time',
int_gps_time_to_str(data_seg[0] + pad_data))
node.add_opt('--gps-end-time',
int_gps_time_to_str(data_seg[1] - pad_data))
# set the input and output files
# Add the PSD file if needed
if self.write_psd:
node.new_output_file_opt(valid_seg, '.txt', '--psd-output',
tags=tags+['PSD_FILE'], store_file=self.retain_files)
node.new_output_file_opt(valid_seg, '.xml.gz', '--output-file',
tags=tags, store_file=self.retain_files)
node.add_input_list_opt('--frame-files', df_parents)
return node
def create_nodata_node(self, valid_seg, tags=None):
""" A simplified version of create_node that creates a node that does
not need to read in data.
Parameters
-----------
valid_seg : ligo.segments.segment
The segment over which to declare the node valid. Usually this
would be the duration of the analysis.
Returns
--------
node : pycbc.workflow.core.Node
The instance corresponding to the created node.
"""
if tags is None:
tags = []
node = Node(self)
# Set the output file
# Add the PSD file if needed
if self.write_psd:
node.new_output_file_opt(valid_seg, '.txt', '--psd-output',
tags=tags+['PSD_FILE'],
store_file=self.retain_files)
node.new_output_file_opt(valid_seg, '.xml.gz', '--output-file',
store_file=self.retain_files)
if self.psd_files is not None:
should_add = False
# If any of the ifos for this job are in the set
# of ifos for which a static psd was provided.
for ifo in self.ifo_list:
for psd_file in self.psd_files:
if ifo in psd_file.ifo_list:
should_add = True
if should_add:
node.add_input_opt('--psd-file', psd_file)
return node
def get_valid_times(self):
pad_data = int(self.get_opt( 'pad-data'))
analysis_length = int(self.cp.get('workflow-tmpltbank',
'analysis-length'))
data_length = analysis_length + pad_data * 2
start = pad_data
end = data_length - pad_data
return [data_length], [segments.segment(start, end)]
class LigolwAddExecutable(Executable):
""" The class used to create nodes for the ligolw_add Executable. """
current_retention_level = Executable.INTERMEDIATE_PRODUCT
def create_node(self, jobSegment, input_files, output=None,
use_tmp_subdirs=True, tags=None):
if tags is None:
tags = []
node = Node(self)
# Very few options to ligolw_add, all input files are given as a long
# argument list. If this becomes unwieldy we could dump all these files
# to a cache file and read that in. ALL INPUT FILES MUST BE LISTED AS
# INPUTS (with .add_input_opt_file) IF THIS IS DONE THOUGH!
for fil in input_files:
node.add_input_arg(fil)
if output:
node.add_output_opt('--output', output)
else:
node.new_output_file_opt(jobSegment, '.xml.gz', '--output',
tags=tags, store_file=self.retain_files,
use_tmp_subdirs=use_tmp_subdirs)
return node
class PycbcSplitInspinjExecutable(Executable):
"""
The class responsible for running the pycbc_split_inspinj executable
"""
current_retention_level = Executable.INTERMEDIATE_PRODUCT
def __init__(self, cp, exe_name, num_splits, ifo=None, out_dir=None):
super().__init__(cp, exe_name, ifo, out_dir, tags=[])
self.num_splits = int(num_splits)
def create_node(self, parent, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_opt('--input-file', parent)
if parent.name.endswith("gz"):
ext = ".xml.gz"
else:
ext = ".xml"
out_files = FileList([])
for i in range(self.num_splits):
curr_tag = 'split%d' % i
curr_tags = parent.tags + [curr_tag]
job_tag = parent.description + "_" + self.name.upper()
out_file = File(parent.ifo_list, job_tag, parent.segment,
extension=ext, directory=self.out_dir,
tags=curr_tags, store_file=self.retain_files)
out_files.append(out_file)
node.add_output_list_opt('--output-files', out_files)
return node
class LalappsInspinjExecutable(Executable):
"""
The class used to create jobs for the lalapps_inspinj Executable.
"""
current_retention_level = Executable.FINAL_RESULT
extension = '.xml'
def create_node(self, segment, exttrig_file=None, tags=None):
if tags is None:
tags = []
node = Node(self)
curr_tags = self.tags + tags
# This allows the desired number of injections to be given explicitly
# in the config file. Used for coh_PTF as segment length is unknown
# before run time.
if self.get_opt('write-compress') is not None:
self.extension = '.xml.gz'
# Check if these injections are using trigger information to choose
# sky positions for the simulated signals
if (self.get_opt('l-distr') == 'exttrig' and exttrig_file is not None \
and 'trigger' in exttrig_file.description):
# Use an XML file containing trigger information
triggered = True
node.add_input_opt('--exttrig-file', exttrig_file)
elif (self.get_opt('l-distr') == 'ipn' and exttrig_file is not None \
and 'IPN' in exttrig_file.description):
# Use an IPN sky points file
triggered = True
node.add_input_opt('--ipn-file', exttrig_file)
elif (self.get_opt('l-distr') != 'exttrig') \
and (self.get_opt('l-distr') != 'ipn' and not \
self.has_opt('ipn-file')):
# Use no trigger information for generating injections
triggered = False
else:
err_msg = "The argument 'l-distr' passed to the "
err_msg += "%s job has the value " % self.tagged_name
err_msg += "'%s' but you have not " % self.get_opt('l-distr')
err_msg += "provided the corresponding ExtTrig or IPN file. "
err_msg += "Please check your configuration files and try again."
raise ValueError(err_msg)
if triggered:
num_injs = int(self.cp.get_opt_tags('workflow-injections',
'num-injs', curr_tags))
inj_tspace = float(segment[1] - segment[0]) / num_injs
node.add_opt('--time-interval', inj_tspace)
node.add_opt('--time-step', inj_tspace)
node.new_output_file_opt(segment, self.extension, '--output',
store_file=self.retain_files)
node.add_opt('--gps-start-time', int_gps_time_to_str(segment[0]))
node.add_opt('--gps-end-time', int_gps_time_to_str(segment[1]))
return node
class PycbcSplitBankExecutable(Executable):
""" The class responsible for creating jobs for pycbc_hdf5_splitbank. """
extension = '.hdf'
current_retention_level = Executable.ALL_TRIGGERS
def __init__(self, cp, exe_name, num_banks,
ifo=None, out_dir=None):
super().__init__(cp, exe_name, ifo, out_dir, tags=[])
self.num_banks = int(num_banks)
def create_node(self, bank, tags=None):
"""
Set up a CondorDagmanNode class to run splitbank code
Parameters
----------
bank : pycbc.workflow.core.File
The File containing the template bank to be split
Returns
--------
node : pycbc.workflow.core.Node
The node to run the job
"""
if tags is None:
tags = []
node = Node(self)
node.add_input_opt('--bank-file', bank)
# Get the output (taken from inspiral.py)
out_files = FileList([])
for i in range( 0, self.num_banks):
curr_tag = 'bank%d' %(i)
# FIXME: What should the tags actually be? The job.tags values are
# currently ignored.
curr_tags = bank.tags + [curr_tag] + tags
job_tag = bank.description + "_" + self.name.upper()
out_file = File(bank.ifo_list, job_tag, bank.segment,
extension=self.extension, directory=self.out_dir,
tags=curr_tags, store_file=self.retain_files)
out_files.append(out_file)
node.add_output_list_opt('--output-filenames', out_files)
return node
class PycbcSplitBankXmlExecutable(PycbcSplitBankExecutable):
""" Subclass resonsible for creating jobs for pycbc_splitbank. """
extension='.xml.gz'
class PycbcConditionStrainExecutable(Executable):
""" The class responsible for creating jobs for pycbc_condition_strain. """
current_retention_level = Executable.ALL_TRIGGERS
def create_node(self, input_files, tags=None):
if tags is None:
tags = []
node = Node(self)
start_time = self.cp.get("workflow", "start-time")
end_time = self.cp.get("workflow", "end-time")
node.add_opt('--gps-start-time', start_time)
node.add_opt('--gps-end-time', end_time)
node.add_input_list_opt('--frame-files', input_files)
out_file = File(self.ifo, "gated",
segments.segment(int(start_time), int(end_time)),
directory=self.out_dir, store_file=self.retain_files,
extension=input_files[0].name.split('.', 1)[-1],
tags=tags)
node.add_output_opt('--output-strain-file', out_file)
out_gates_file = File(self.ifo, "output_gates",
segments.segment(int(start_time), int(end_time)),
directory=self.out_dir, extension='txt',
store_file=self.retain_files, tags=tags)
node.add_output_opt('--output-gates-file', out_gates_file)
return node, out_file
class PycbcCreateInjectionsExecutable(Executable):
""" The class responsible for creating jobs
for ``pycbc_create_injections``.
"""
current_retention_level = Executable.ALL_TRIGGERS
extension = '.hdf'
def create_node(self, config_file=None, seed=None, tags=None):
""" Set up a CondorDagmanNode class to run ``pycbc_create_injections``.
Parameters
----------
config_file : pycbc.workflow.core.File
A ``pycbc.workflow.core.File`` for inference configuration file
to be used with ``--config-files`` option.
seed : int
Seed to use for generating injections.
tags : list
A list of tags to include in filenames.
Returns
--------
node : pycbc.workflow.core.Node
The node to run the job.
"""
# default for tags is empty list
tags = [] if tags is None else tags
# get analysis start and end time
start_time = self.cp.get("workflow", "start-time")
end_time = self.cp.get("workflow", "end-time")
analysis_time = segments.segment(int(start_time), int(end_time))
# make node for running executable
node = Node(self)
if config_file is not None:
node.add_input_opt("--config-files", config_file)
if seed:
node.add_opt("--seed", seed)
injection_file = node.new_output_file_opt(analysis_time,
self.extension,
"--output-file",
tags=tags)
return node, injection_file
class PycbcInferenceExecutable(Executable):
""" The class responsible for creating jobs for ``pycbc_inference``.
"""
current_retention_level = Executable.ALL_TRIGGERS
def create_node(self, config_file, seed=None, tags=None,
analysis_time=None):
""" Set up a pegasus.Node instance to run ``pycbc_inference``.
Parameters
----------
config_file : pycbc.workflow.core.File
A ``pycbc.workflow.core.File`` for inference configuration file
to be used with ``--config-files`` option.
seed : int
An ``int`` to be used with ``--seed`` option.
tags : list
A list of tags to include in filenames.
Returns
--------
node : pycbc.workflow.core.Node
The node to run the job.
"""
# default for tags is empty list
tags = [] if tags is None else tags
# if analysis time not provided, try to get it from the config file
if analysis_time is None:
start_time = self.cp.get("workflow", "start-time")
end_time = self.cp.get("workflow", "end-time")
analysis_time = segments.segment(int(start_time), int(end_time))
# make node for running executable
node = Node(self)
node.add_input_opt("--config-file", config_file)
if seed is not None:
node.add_opt("--seed", seed)
inference_file = node.new_output_file_opt(analysis_time,
".hdf", "--output-file",
tags=tags)
if self.cp.has_option("pegasus_profile-inference",
"condor|+CheckpointSig"):
err_msg = "This is not yet supported/tested with pegasus 5. "
err_msg += "Please reimplement this (with unittest :-) )."
raise ValueError(err_msg)
#ckpt_file_name = "{}.checkpoint".format(inference_file.name)
#ckpt_file = dax.File(ckpt_file_name)
# DO NOT call pegasus API stuff outside of
# pegasus_workflow.py.
#node._dax_node.uses(ckpt_file, link=dax.Link.OUTPUT,
# register=False, transfer=False)
return node, inference_file
class PycbcHDFSplitInjExecutable(Executable):
""" The class responsible for creating jobs for ``pycbc_hdf_splitinj``.
"""
current_retention_level = Executable.ALL_TRIGGERS
def __init__(self, cp, exe_name, num_splits, ifo=None, out_dir=None):
super().__init__(cp, exe_name, ifo, out_dir, tags=[])
self.num_splits = int(num_splits)
def create_node(self, parent, tags=None):
if tags is None:
tags = []
node = Node(self)
node.add_input_opt('--input-file', parent)
out_files = FileList([])
for i in range(self.num_splits):
curr_tag = 'split%d' % i
curr_tags = parent.tags + [curr_tag]
job_tag = parent.description + "_" + self.name.upper()
out_file = File(parent.ifo_list, job_tag, parent.segment,
extension='.hdf', directory=self.out_dir,
tags=curr_tags, store_file=self.retain_files)
out_files.append(out_file)
node.add_output_list_opt('--output-files', out_files)
return node
| 51,006
| 41.189413
| 96
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/__init__.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This package provides the utilities to construct an inspiral workflow for
performing a coincident CBC matched-filter analysis on gravitational-wave
interferometer data
"""
import os.path
from pycbc.workflow.configuration import *
from pycbc.workflow.core import *
from pycbc.workflow.grb_utils import *
from pycbc.workflow.jobsetup import *
from pycbc.workflow.psd import *
from pycbc.workflow.matched_filter import *
from pycbc.workflow.datafind import *
from pycbc.workflow.segment import *
from pycbc.workflow.tmpltbank import *
from pycbc.workflow.psdfiles import *
from pycbc.workflow.splittable import *
from pycbc.workflow.coincidence import *
from pycbc.workflow.injection import *
from pycbc.workflow.plotting import *
from pycbc.workflow.minifollowups import *
from pycbc.workflow.dq import *
from pycbc.workflow.versioning import *
# Set the pycbc workflow specific pegasus configuration and planning files
from pycbc.workflow.pegasus_workflow import PEGASUS_FILE_DIRECTORY
# Set the configuration file base directory
INI_FILE_DIRECTORY = os.path.join(os.path.dirname(__file__), 'ini_files')
| 2,063
| 37.222222
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/pegasus_sites.py
|
# Copyright (C) 2021 The PyCBC development team
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
""" This module provides default site catalogs, which should be suitable for
most use cases. You can override individual details here. It should also be
possible to implement a new site, but not sure how that would work in practice.
"""
import os.path
import tempfile
import urllib.parse
from shutil import which
from urllib.parse import urljoin
from urllib.request import pathname2url
from Pegasus.api import Directory, FileServer, Site, Operation, Namespace
from Pegasus.api import Arch, OS, SiteCatalog
from pycbc.version import last_release # noqa
# NOTE urllib is weird. For some reason it only allows known schemes and will
# give *wrong* results, rather then failing, if you use something like gsiftp
# We can add schemes explicitly, as below, but be careful with this!
urllib.parse.uses_relative.append('gsiftp')
urllib.parse.uses_netloc.append('gsiftp')
KNOWN_SITES = ['local', 'condorpool_symlink',
'condorpool_copy', 'condorpool_shared', 'osg']
def add_site_pegasus_profile(site, cp):
"""Add options from [pegasus_profile] in configparser to site"""
# Add global profile information
if cp.has_section('pegasus_profile'):
add_ini_site_profile(site, cp, 'pegasus_profile')
# Add site-specific profile information
if cp.has_section('pegasus_profile-{}'.format(site.name)):
add_ini_site_profile(site, cp, 'pegasus_profile-{}'.format(site.name))
def add_ini_site_profile(site, cp, sec):
"""Add options from sec in configparser to site"""
for opt in cp.options(sec):
namespace = opt.split('|')[0]
if namespace in ('pycbc', 'container'):
continue
value = cp.get(sec, opt).strip()
key = opt.split('|')[1]
site.add_profiles(Namespace(namespace), key=key, value=value)
def add_local_site(sitecat, cp, local_path, local_url):
"""Add the local site to site catalog"""
# local_url must end with a '/'
if not local_url.endswith('/'):
local_url = local_url + '/'
local = Site("local", arch=Arch.X86_64, os_type=OS.LINUX)
add_site_pegasus_profile(local, cp)
local_dir = Directory(Directory.SHARED_SCRATCH,
path=os.path.join(local_path, 'local-site-scratch'))
local_file_serv = FileServer(urljoin(local_url, 'local-site-scratch'),
Operation.ALL)
local_dir.add_file_servers(local_file_serv)
local.add_directories(local_dir)
local.add_profiles(Namespace.PEGASUS, key="style", value="condor")
local.add_profiles(Namespace.CONDOR, key="getenv", value="True")
sitecat.add_sites(local)
def add_condorpool_symlink_site(sitecat, cp):
"""Add condorpool_symlink site to site catalog"""
site = Site("condorpool_symlink", arch=Arch.X86_64, os_type=OS.LINUX)
add_site_pegasus_profile(site, cp)
site.add_profiles(Namespace.PEGASUS, key="style", value="condor")
site.add_profiles(Namespace.PEGASUS, key="data.configuration",
value="nonsharedfs")
site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging',
value="true")
site.add_profiles(Namespace.PEGASUS, key='auxillary.local',
value="true")
site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid",
value="False")
site.add_profiles(Namespace.CONDOR, key="should_transfer_files",
value="Yes")
site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output",
value="ON_EXIT_OR_EVICT")
site.add_profiles(Namespace.CONDOR, key="getenv", value="True")
site.add_profiles(Namespace.CONDOR, key="+DESIRED_Sites",
value='"nogrid"')
site.add_profiles(Namespace.CONDOR, key="+IS_GLIDEIN",
value='"False"')
site.add_profiles(Namespace.CONDOR, key="+flock_local",
value="True")
site.add_profiles(Namespace.DAGMAN, key="retry", value="2")
sitecat.add_sites(site)
def add_condorpool_copy_site(sitecat, cp):
"""Add condorpool_copy site to site catalog"""
site = Site("condorpool_copy", arch=Arch.X86_64, os_type=OS.LINUX)
add_site_pegasus_profile(site, cp)
site.add_profiles(Namespace.PEGASUS, key="style", value="condor")
site.add_profiles(Namespace.PEGASUS, key="data.configuration",
value="nonsharedfs")
site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging',
value="true")
# This explicitly disables symlinking
site.add_profiles(Namespace.PEGASUS, key='nosymlink',
value=True)
site.add_profiles(Namespace.PEGASUS, key='auxillary.local',
value="true")
site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid",
value="False")
site.add_profiles(Namespace.CONDOR, key="should_transfer_files",
value="Yes")
site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output",
value="ON_EXIT_OR_EVICT")
site.add_profiles(Namespace.CONDOR, key="getenv", value="True")
site.add_profiles(Namespace.CONDOR, key="+DESIRED_Sites",
value='"nogrid"')
site.add_profiles(Namespace.CONDOR, key="+IS_GLIDEIN",
value='"False"')
site.add_profiles(Namespace.CONDOR, key="+flock_local",
value="True")
site.add_profiles(Namespace.DAGMAN, key="retry", value="2")
sitecat.add_sites(site)
def add_condorpool_shared_site(sitecat, cp, local_path, local_url):
"""Add condorpool_shared site to site catalog"""
# local_url must end with a '/'
if not local_url.endswith('/'):
local_url = local_url + '/'
site = Site("condorpool_shared", arch=Arch.X86_64, os_type=OS.LINUX)
add_site_pegasus_profile(site, cp)
# It's annoying that this is needed!
local_dir = Directory(Directory.SHARED_SCRATCH,
path=os.path.join(local_path, 'cpool-site-scratch'))
local_file_serv = FileServer(urljoin(local_url, 'cpool-site-scratch'),
Operation.ALL)
local_dir.add_file_servers(local_file_serv)
site.add_directories(local_dir)
site.add_profiles(Namespace.PEGASUS, key="style", value="condor")
site.add_profiles(Namespace.PEGASUS, key="data.configuration",
value="sharedfs")
site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging',
value="true")
site.add_profiles(Namespace.PEGASUS, key='auxillary.local',
value="true")
site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid",
value="False")
site.add_profiles(Namespace.CONDOR, key="should_transfer_files",
value="Yes")
site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output",
value="ON_EXIT_OR_EVICT")
site.add_profiles(Namespace.CONDOR, key="getenv", value="True")
site.add_profiles(Namespace.CONDOR, key="+DESIRED_Sites",
value='"nogrid"')
site.add_profiles(Namespace.CONDOR, key="+IS_GLIDEIN",
value='"False"')
site.add_profiles(Namespace.CONDOR, key="+flock_local",
value="True")
site.add_profiles(Namespace.DAGMAN, key="retry", value="2")
# Need to set PEGASUS_HOME
peg_home = which('pegasus-plan')
assert peg_home.endswith('bin/pegasus-plan')
peg_home = peg_home.replace('bin/pegasus-plan', '')
site.add_profiles(Namespace.ENV, key="PEGASUS_HOME", value=peg_home)
sitecat.add_sites(site)
# NOTE: We should now be able to add a nonfs site. I'll leave this for a
# future patch/as demanded feature though. The setup would largely be
# the same as the OSG site, except without the OSG specific things.
# def add_condorpool_nonfs_site(sitecat, cp):
def add_osg_site(sitecat, cp):
"""Add osg site to site catalog"""
site = Site("osg", arch=Arch.X86_64, os_type=OS.LINUX)
add_site_pegasus_profile(site, cp)
site.add_profiles(Namespace.PEGASUS, key="style", value="condor")
site.add_profiles(Namespace.PEGASUS, key="data.configuration",
value="condorio")
site.add_profiles(Namespace.PEGASUS, key='transfer.bypass.input.staging',
value="true")
site.add_profiles(Namespace.CONDOR, key="should_transfer_files",
value="Yes")
site.add_profiles(Namespace.CONDOR, key="when_to_transfer_output",
value="ON_SUCCESS")
site.add_profiles(Namespace.CONDOR, key="success_exit_code",
value="0")
site.add_profiles(Namespace.CONDOR, key="+OpenScienceGrid",
value="True")
site.add_profiles(Namespace.CONDOR, key="getenv",
value="False")
site.add_profiles(Namespace.CONDOR, key="+InitializeModulesEnv",
value="False")
site.add_profiles(Namespace.CONDOR, key="+SingularityCleanEnv",
value="True")
site.add_profiles(Namespace.CONDOR, key="Requirements",
value="(HAS_SINGULARITY =?= TRUE) && "
"(HAS_LIGO_FRAMES =?= True) && "
"(IS_GLIDEIN =?= True)")
cvmfs_loc = '"/cvmfs/singularity.opensciencegrid.org/pycbc/pycbc-el8:v'
cvmfs_loc += last_release + '"'
site.add_profiles(Namespace.CONDOR, key="+SingularityImage",
value=cvmfs_loc)
# On OSG failure rate is high
site.add_profiles(Namespace.DAGMAN, key="retry", value="4")
site.add_profiles(Namespace.ENV, key="LAL_DATA_PATH",
value="/cvmfs/oasis.opensciencegrid.org/ligo/sw/pycbc/lalsuite-extra/current/share/lalsimulation")
# Add MKL location to LD_LIBRARY_PATH for OSG
site.add_profiles(Namespace.ENV, key="LD_LIBRARY_PATH",
value="/usr/local/lib:/.singularity.d/libs")
sitecat.add_sites(site)
def add_site(sitecat, sitename, cp, out_dir=None):
"""Add site sitename to site catalog"""
# Allow local site scratch to be overriden for any site which uses it
sec = 'pegasus_profile-{}'.format(sitename)
opt = 'pycbc|site-scratch'
if cp.has_option(sec, opt):
out_dir = os.path.abspath(cp.get(sec, opt))
if cp.has_option(sec, 'pycbc|unique-scratch'):
scratchdir = tempfile.mkdtemp(prefix='pycbc-tmp_', dir=out_dir)
os.chmod(scratchdir, 0o755)
try:
os.symlink(scratchdir, '{}-site-scratch'.format(sitename))
except OSError:
pass
out_dir = scratchdir
elif out_dir is None:
out_dir = os.getcwd()
local_url = urljoin('file://', pathname2url(out_dir))
if sitename == 'local':
add_local_site(sitecat, cp, out_dir, local_url)
elif sitename == 'condorpool_symlink':
add_condorpool_symlink_site(sitecat, cp)
elif sitename == 'condorpool_copy':
add_condorpool_copy_site(sitecat, cp)
elif sitename == 'condorpool_shared':
add_condorpool_shared_site(sitecat, cp, out_dir, local_url)
elif sitename == 'osg':
add_osg_site(sitecat, cp)
else:
raise ValueError("Do not recognize site {}".format(sitename))
def make_catalog(cp, out_dir):
"""Make combined catalog of built-in known sites"""
catalog = SiteCatalog()
for site in KNOWN_SITES:
add_site(catalog, site, cp, out_dir=out_dir)
return catalog
| 11,831
| 42.5
| 120
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/dq.py
|
# Copyright (C) 2020 Max Trevor and Derek Davis
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import os
import logging
from ligo import segments
from pycbc.workflow.core import (FileList, Executable, Node,
File, SegFile, make_analysis_dir)
from pycbc.workflow.datafind import setup_datafind_workflow
class PyCBCCalculateDQExecutable(Executable):
current_retention_level = Executable.ALL_TRIGGERS
def create_node(self, segment, frames):
start = int(segment[0])
end = int(segment[1])
node = Node(self)
node.add_input_list_opt('--frame-files', frames)
node.add_opt('--gps-start-time', start)
node.add_opt('--gps-end-time', end)
node.new_output_file_opt(segment, '.hdf', '--output-file')
return node
class PyCBCRerankDQExecutable(Executable):
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, workflow, ifo, dq_type, dq_files, binned_rate_file):
node = Node(self)
node.add_opt('--dq-type', dq_type)
node.add_opt('--ifo', ifo)
node.add_input_list_opt('--input-file', dq_files)
node.add_input_opt('--rate-file', binned_rate_file)
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file')
return node
class PyCBCBinTriggerRatesDQExecutable(Executable):
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, workflow, ifo, dq_files, trig_file, bank_file):
node = Node(self)
node.add_opt('--ifo', ifo)
node.add_input_opt('--bank-file', bank_file)
node.add_input_opt('--trig-file', trig_file)
node.add_input_list_opt('--dq-file', dq_files)
node.new_output_file_opt(workflow.analysis_time,'.hdf',
'--output-file')
return node
class PyCBCCalculateDQFlagExecutable(Executable):
# current_retention_level = Executable.ALL_TRIGGERS
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, workflow, segment, dq_file, flag):
node = Node(self)
# Executable objects are initialized with ifo information
start = int(segment[0])
end = int(segment[1])
node.add_opt('--ifo', self.ifo_string)
node.add_opt('--flag', flag)
node.add_opt('--gps-start-time', start)
node.add_opt('--gps-end-time', end)
node.add_input_opt('--dq-segments', dq_file)
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file')
return node
def setup_dq_reranking(workflow, dq_label, insps, bank,
segs, analyzable_file, dq_file,
output_dir=None, tags=None):
make_analysis_dir(output_dir)
output = FileList()
if tags:
dq_tags = tags + [dq_label]
else:
dq_tags = [dq_label]
dq_type = workflow.cp.get_opt_tags("workflow-data_quality",
'dq-type', [dq_label])
if dq_type == 'timeseries':
if dq_label not in workflow.cp.get_subsections('workflow-datafind'):
msg = """No workflow-datafind section with dq tag.
Tags must be used in workflow-datafind sections "
if more than one source of data is used.
Strain data source must be tagged
workflow-datafind-hoft.
Consult the documentation for more info."""
raise ValueError(msg)
dq_ifos = workflow.cp.get_opt_tags("workflow-data_quality",
'ifos', [dq_label])
dq_ifos = dq_ifos.split(',')
dq_segs = {}
dq_segs_for_file = {}
for ifo in dq_ifos:
dq_segs[ifo] = segs[ifo]
dq_segs_for_file[ifo+':'+dq_label] = segs[ifo]
dq_segs_file = SegFile.from_segment_list_dict(dq_label,
dq_segs_for_file,
extension='.xml',
valid_segment=workflow.analysis_time,
directory=output_dir)
datafind_files, dq_file, dq_segs, dq_name = \
setup_datafind_workflow(workflow,
dq_segs, "datafind_dq",
seg_file=dq_segs_file,
tags=dq_tags)
for ifo in dq_ifos:
ifo_insp = [insp for insp in insps if (insp.ifo == ifo)]
assert len(ifo_insp)==1
ifo_insp = ifo_insp[0]
dq_files = FileList()
for seg in dq_segs[ifo]:
seg_frames = datafind_files.find_all_output_in_range(ifo, seg)
raw_exe = PyCBCCalculateDQExecutable(workflow.cp,
'calculate_dq', ifos=ifo,
out_dir=output_dir,
tags=dq_tags)
raw_node = raw_exe.create_node(seg, seg_frames)
workflow += raw_node
dq_files += raw_node.output_files
intermediate_exe = PyCBCBinTriggerRatesDQExecutable(workflow.cp,
'bin_trigger_rates_dq',
ifos=ifo,
out_dir=output_dir,
tags=dq_tags)
intermediate_node = intermediate_exe.create_node(workflow, ifo,
dq_files,
ifo_insp, bank)
workflow += intermediate_node
binned_rate_file = intermediate_node.output_file
new_exe = PyCBCRerankDQExecutable(workflow.cp,
'rerank_dq', ifos=ifo,
out_dir=output_dir,
tags=dq_tags)
new_node = new_exe.create_node(workflow, ifo, dq_label,
dq_files, binned_rate_file)
workflow += new_node
output += new_node.output_files
elif dq_type == 'flag':
flag_str = workflow.cp.get_opt_tags("workflow-data_quality",
'flag-name', dq_tags)
ifo = flag_str[:2]
ifo_insp = [insp for insp in insps if (insp.ifo == ifo)]
assert len(ifo_insp)==1
ifo_insp = ifo_insp[0]
flag_name = flag_str
logging.info("Creating job for flag %s", flag_name)
for seg in segs[ifo]:
raw_exe = PyCBCCalculateDQFlagExecutable(workflow.cp,
'calculate_dqflag', ifos=ifo,
out_dir=output_dir,
tags=dq_tags)
raw_node = raw_exe.create_node(workflow, seg, dq_file,
flag_name)
workflow += raw_node
dq_files = raw_node.output_files
intermediate_exe = PyCBCBinTriggerRatesDQExecutable(workflow.cp,
'bin_trigger_rates_dq',
ifos=ifo,
out_dir=output_dir,
tags=dq_tags)
intermediate_node = intermediate_exe.create_node(workflow, ifo,
dq_files,
ifo_insp, bank)
workflow += intermediate_node
binned_rate_file = intermediate_node.output_file
new_exe = PyCBCRerankDQExecutable(workflow.cp,
'rerank_dq', ifos=ifo,
out_dir=output_dir,
tags=dq_tags)
new_node = new_exe.create_node(workflow, ifo, dq_label,
dq_files, binned_rate_file)
workflow += new_node
output += new_node.output_files
else:
msg = """Incorrect DQ type specified.
Only valid DQ types are 'flag'
and 'timeseries'.
Consult the documentation for more info."""
raise ValueError(msg)
return output
| 9,556
| 45.848039
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/datafind.py
|
# Copyright (C) 2013 Ian Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for querying a datafind server to determine the
availability of the data that the code is attempting to run on. It also
performs a number of tests and can act on these as described below. Full
documentation for this function can be found here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/ahope/datafind.html
"""
import os, copy
import logging
from ligo import segments
from ligo.lw import utils, table
from glue import lal
from gwdatafind import find_urls as find_frame_urls
from pycbc.workflow.core import SegFile, File, FileList, make_analysis_dir
from pycbc.io.ligolw import LIGOLWContentHandler
def setup_datafind_workflow(workflow, scienceSegs, outputDir, seg_file=None,
tags=None):
"""
Setup datafind section of the workflow. This section is responsible for
generating, or setting up the workflow to generate, a list of files that
record the location of the frame files needed to perform the analysis.
There could be multiple options here, the datafind jobs could be done at
run time or could be put into a dag. The subsequent jobs will know
what was done here from the OutFileList containing the datafind jobs
(and the Dagman nodes if appropriate.
For now the only implemented option is to generate the datafind files at
runtime. This module can also check if the frameFiles actually exist, check
whether the obtained segments line up with the original ones and update the
science segments to reflect missing data files.
Parameters
----------
workflow: pycbc.workflow.core.Workflow
The workflow class that stores the jobs that will be run.
scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
This contains the times that the workflow is expected to analyse.
outputDir : path
All output files written by datafind processes will be written to this
directory.
seg_file : SegFile, optional (default=None)
The file returned by get_science_segments containing the science
segments and the associated segment_summary. This will
be used for the segment_summary test and is required if, and only if,
performing that test.
tags : list of string, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
datafindOuts : OutGroupList
List of all the datafind output files for use later in the pipeline.
sci_avlble_file : SegFile
SegFile containing the analysable time after checks in the datafind
module are applied to the input segment list. For production runs this
is expected to be equal to the input segment list.
scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
This contains the times that the workflow is expected to analyse. If
the updateSegmentTimes kwarg is given this will be updated to reflect
any instances of missing data.
sci_avlble_name : string
The name with which the analysable time is stored in the
sci_avlble_file.
"""
if tags is None:
tags = []
logging.info("Entering datafind module")
make_analysis_dir(outputDir)
cp = workflow.cp
# Parse for options in ini file
datafind_method = cp.get_opt_tags("workflow-datafind",
"datafind-method", tags)
if cp.has_option_tags("workflow-datafind",
"datafind-check-segment-gaps", tags):
checkSegmentGaps = cp.get_opt_tags("workflow-datafind",
"datafind-check-segment-gaps", tags)
else:
checkSegmentGaps = "no_test"
if cp.has_option_tags("workflow-datafind",
"datafind-check-frames-exist", tags):
checkFramesExist = cp.get_opt_tags("workflow-datafind",
"datafind-check-frames-exist", tags)
else:
checkFramesExist = "no_test"
if cp.has_option_tags("workflow-datafind",
"datafind-check-segment-summary", tags):
checkSegmentSummary = cp.get_opt_tags("workflow-datafind",
"datafind-check-segment-summary", tags)
else:
checkSegmentSummary = "no_test"
logging.info("Starting datafind with setup_datafind_runtime_generated")
if datafind_method == "AT_RUNTIME_MULTIPLE_CACHES":
datafindcaches, datafindouts = \
setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs,
outputDir, tags=tags)
elif datafind_method == "AT_RUNTIME_SINGLE_CACHES":
datafindcaches, datafindouts = \
setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs,
outputDir, tags=tags)
elif datafind_method == "AT_RUNTIME_MULTIPLE_FRAMES":
datafindcaches, datafindouts = \
setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs,
outputDir, tags=tags)
elif datafind_method == "AT_RUNTIME_SINGLE_FRAMES":
datafindcaches, datafindouts = \
setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs,
outputDir, tags=tags)
elif datafind_method == "AT_RUNTIME_FAKE_DATA":
pass
elif datafind_method == "FROM_PREGENERATED_LCF_FILES":
ifos = scienceSegs.keys()
datafindcaches, datafindouts = \
setup_datafind_from_pregenerated_lcf_files(cp, ifos,
outputDir, tags=tags)
else:
msg = """Entry datafind-method in [workflow-datafind] does not have "
expected value. Valid values are
AT_RUNTIME_MULTIPLE_FRAMES, AT_RUNTIME_SINGLE_FRAMES
AT_RUNTIME_MULTIPLE_CACHES, AT_RUNTIME_SINGLE_CACHES,
FROM_PREGENERATED_LCF_FILES, or AT_RUNTIME_FAKE_DATA.
Consult the documentation for more info."""
raise ValueError(msg)
using_backup_server = False
if datafind_method == "AT_RUNTIME_MULTIPLE_FRAMES" or \
datafind_method == "AT_RUNTIME_SINGLE_FRAMES":
if cp.has_option_tags("workflow-datafind",
"datafind-backup-datafind-server", tags):
using_backup_server = True
backup_server = cp.get_opt_tags("workflow-datafind",
"datafind-backup-datafind-server", tags)
cp_new = copy.deepcopy(cp)
cp_new.set("workflow-datafind",
"datafind-ligo-datafind-server", backup_server)
cp_new.set('datafind', 'urltype', 'gsiftp')
backup_datafindcaches, backup_datafindouts =\
setup_datafind_runtime_frames_single_call_perifo(cp_new,
scienceSegs, outputDir, tags=tags)
backup_datafindouts = datafind_keep_unique_backups(\
backup_datafindouts, datafindouts)
datafindcaches.extend(backup_datafindcaches)
datafindouts.extend(backup_datafindouts)
logging.info("setup_datafind_runtime_generated completed")
# If we don't have frame files covering all times we can update the science
# segments.
if checkSegmentGaps in ['warn','update_times','raise_error']:
logging.info("Checking science segments against datafind output....")
newScienceSegs = get_science_segs_from_datafind_outs(datafindcaches)
logging.info("New segments calculated from data find output.....")
missingData = False
for ifo in scienceSegs.keys():
# If no science segments in input then do nothing
if not scienceSegs[ifo]:
msg = "No science segments are present for ifo %s, " %(ifo)
msg += "the segment metadata indicates there is no analyzable"
msg += " strain data between the selected GPS start and end "
msg += "times."
logging.warning(msg)
continue
if ifo not in newScienceSegs:
msg = "No data frames were found corresponding to the science "
msg += "segments for ifo %s" %(ifo)
logging.error(msg)
missingData = True
if checkSegmentGaps == 'update_times':
scienceSegs[ifo] = segments.segmentlist()
continue
missing = scienceSegs[ifo] - newScienceSegs[ifo]
if abs(missing):
msg = "From ifo %s we are missing frames covering:" %(ifo)
msg += "\n%s" % "\n".join(map(str, missing))
missingData = True
logging.error(msg)
if checkSegmentGaps == 'update_times':
# Remove missing time, so that we can carry on if desired
logging.info("Updating science segments for ifo %s."
%(ifo))
scienceSegs[ifo] = scienceSegs[ifo] - missing
if checkSegmentGaps == 'raise_error' and missingData:
raise ValueError("Workflow cannot find needed data, exiting.")
logging.info("Done checking, any discrepancies are reported above.")
elif checkSegmentGaps == 'no_test':
pass
else:
errMsg = "checkSegmentGaps kwarg must take a value from 'no_test', "
errMsg += "'warn', 'update_times' or 'raise_error'."
raise ValueError(errMsg)
# Do all of the frame files that were returned actually exist?
if checkFramesExist in ['warn','update_times','raise_error']:
logging.info("Verifying that all frames exist on disk.")
missingFrSegs, missingFrames = \
get_missing_segs_from_frame_file_cache(datafindcaches)
missingFlag = False
for ifo in missingFrames.keys():
# If no data in the input then do nothing
if not scienceSegs[ifo]:
continue
# If using a backup server, does the frame exist remotely?
if using_backup_server:
# WARNING: This will be slow, but hopefully it will not occur
# for too many frames. This could be optimized if
# it becomes necessary.
new_list = []
for frame in missingFrames[ifo]:
for dfout in datafindouts:
dfout_pfns = list(dfout.pfns)
dfout_urls = [a.url for a in dfout_pfns]
if frame.url in dfout_urls:
pfn = dfout_pfns[dfout_urls.index(frame.url)]
dfout.removePFN(pfn)
if len(dfout.pfns) == 0:
new_list.append(frame)
else:
msg = "Frame %s not found locally. "\
%(frame.url,)
msg += "Replacing with remote url(s) %s." \
%(str([a.url for a in dfout.pfns]),)
logging.info(msg)
break
else:
new_list.append(frame)
missingFrames[ifo] = new_list
if missingFrames[ifo]:
msg = "From ifo %s we are missing the following frames:" %(ifo)
msg +='\n'.join([a.url for a in missingFrames[ifo]])
missingFlag = True
logging.error(msg)
if checkFramesExist == 'update_times':
# Remove missing times, so that we can carry on if desired
logging.info("Updating science times for ifo %s." %(ifo))
scienceSegs[ifo] = scienceSegs[ifo] - missingFrSegs[ifo]
if checkFramesExist == 'raise_error' and missingFlag:
raise ValueError("Workflow cannot find all frames, exiting.")
logging.info("Finished checking frames.")
elif checkFramesExist == 'no_test':
pass
else:
errMsg = "checkFramesExist kwarg must take a value from 'no_test', "
errMsg += "'warn', 'update_times' or 'raise_error'."
raise ValueError(errMsg)
# Check if there are cases where frames exist, but no entry in the segment
# summary table are present.
if checkSegmentSummary in ['warn', 'raise_error']:
logging.info("Checking the segment summary table against frames.")
dfScienceSegs = get_science_segs_from_datafind_outs(datafindcaches)
missingFlag = False
# NOTE: Should this be overrideable in the config file?
sci_seg_name = "SCIENCE"
if seg_file is None:
err_msg = "You must provide the science segments SegFile object "
err_msg += "if using the datafind-check-segment-summary option."
raise ValueError(err_msg)
if seg_file.seg_summ_dict is None:
err_msg = "The provided science segments SegFile object must "
err_msg += "contain a valid segment_summary table if using the "
err_msg += "datafind-check-segment-summary option."
raise ValueError(err_msg)
seg_summary_times = seg_file.seg_summ_dict
for ifo in dfScienceSegs.keys():
curr_seg_summ_times = seg_summary_times[ifo + ":" + sci_seg_name]
missing = (dfScienceSegs[ifo] & seg_file.valid_segments)
missing.coalesce()
missing = missing - curr_seg_summ_times
missing.coalesce()
scienceButNotFrame = scienceSegs[ifo] - dfScienceSegs[ifo]
scienceButNotFrame.coalesce()
missing2 = scienceSegs[ifo] - scienceButNotFrame
missing2.coalesce()
missing2 = missing2 - curr_seg_summ_times
missing2.coalesce()
if abs(missing):
msg = "From ifo %s the following times have frames, " %(ifo)
msg += "but are not covered in the segment summary table."
msg += "\n%s" % "\n".join(map(str, missing))
logging.error(msg)
missingFlag = True
if abs(missing2):
msg = "From ifo %s the following times have frames, " %(ifo)
msg += "are science, and are not covered in the segment "
msg += "summary table."
msg += "\n%s" % "\n".join(map(str, missing2))
logging.error(msg)
missingFlag = True
if checkSegmentSummary == 'raise_error' and missingFlag:
errMsg = "Segment_summary discrepancy detected, exiting."
raise ValueError(errMsg)
elif checkSegmentSummary == 'no_test':
pass
else:
errMsg = "checkSegmentSummary kwarg must take a value from 'no_test', "
errMsg += "'warn', or 'raise_error'."
raise ValueError(errMsg)
# Now need to create the file for SCIENCE_AVAILABLE
sci_avlble_dict = segments.segmentlistdict()
# NOTE: Should this be overrideable in the config file?
sci_avlble_name = "SCIENCE_AVAILABLE"
for ifo in scienceSegs.keys():
sci_avlble_dict[ifo + ':' + sci_avlble_name] = scienceSegs[ifo]
sci_avlble_file = SegFile.from_segment_list_dict('SCIENCE_AVAILABLE',
sci_avlble_dict, ifo_list = scienceSegs.keys(),
valid_segment=workflow.analysis_time,
extension='.xml', tags=tags, directory=outputDir)
logging.info("Leaving datafind module")
if datafind_method == "AT_RUNTIME_FAKE_DATA":
datafindouts = None
else:
datafindouts = FileList(datafindouts)
return datafindouts, sci_avlble_file, scienceSegs, sci_avlble_name
def setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs,
outputDir, tags=None):
"""
This function uses the `gwdatafind` library to obtain the location of all
the frame files that will be needed to cover the analysis of the data
given in scienceSegs. This function will not check if the returned frames
cover the whole time requested, such sanity checks are done in the
pycbc.workflow.setup_datafind_workflow entry function. As opposed to
setup_datafind_runtime_single_call_perifo this call will one call to the
datafind server for every science segment. This function will return a list
of output files that correspond to the cache .lcf files that are produced,
which list the locations of all frame files. This will cause problems with
pegasus, which expects to know about all input files (ie. the frame files
themselves.)
Parameters
-----------
cp : ConfigParser.ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
This contains the times that the workflow is expected to analyse.
outputDir : path
All output files written by datafind processes will be written to this
directory.
tags : list of strings, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
datafindcaches : list of glue.lal.Cache instances
The glue.lal.Cache representations of the various calls to the datafind
server and the returned frame files.
datafindOuts : pycbc.workflow.core.FileList
List of all the datafind output files for use later in the pipeline.
"""
if tags is None:
tags = []
# Now ready to loop over the input segments
datafindouts = []
datafindcaches = []
logging.info("Querying datafind server for all science segments.")
for ifo, scienceSegsIfo in scienceSegs.items():
observatory = ifo[0].upper()
frameType = cp.get_opt_tags("workflow-datafind",
"datafind-%s-frame-type" % (ifo.lower()), tags)
for seg in scienceSegsIfo:
msg = "Finding data between %d and %d " %(seg[0],seg[1])
msg += "for ifo %s" %(ifo)
logging.info(msg)
# WARNING: For now the workflow will expect times to be in integer seconds
startTime = int(seg[0])
endTime = int(seg[1])
# Sometimes the connection can drop, so try a backup here
try:
cache, cache_file = run_datafind_instance(
cp,
outputDir,
observatory,
frameType,
startTime,
endTime,
ifo,
tags=tags
)
except:
cache, cache_file = run_datafind_instance(
cp,
outputDir,
observatory,
frameType,
startTime,
endTime,
ifo,
tags=tags
)
datafindouts.append(cache_file)
datafindcaches.append(cache)
return datafindcaches, datafindouts
def setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs, outputDir,
tags=None):
"""
This function uses the `gwdatafind` library to obtain the location of all
the frame files that will be needed to cover the analysis of the data
given in scienceSegs. This function will not check if the returned frames
cover the whole time requested, such sanity checks are done in the
pycbc.workflow.setup_datafind_workflow entry function. As opposed to
setup_datafind_runtime_generated this call will only run one call to
datafind per ifo, spanning the whole time. This function will return a list
of output files that correspond to the cache .lcf files that are produced,
which list the locations of all frame files. This will cause problems with
pegasus, which expects to know about all input files (ie. the frame files
themselves.)
Parameters
-----------
cp : ConfigParser.ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
This contains the times that the workflow is expected to analyse.
outputDir : path
All output files written by datafind processes will be written to this
directory.
tags : list of strings, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
datafindcaches : list of glue.lal.Cache instances
The glue.lal.Cache representations of the various calls to the datafind
server and the returned frame files.
datafindOuts : pycbc.workflow.core.FileList
List of all the datafind output files for use later in the pipeline.
"""
if tags is None:
tags = []
# We want to ignore gaps as the detectors go up and down and calling this
# way will give gaps. See the setup_datafind_runtime_generated function
# for datafind calls that only query for data that will exist
cp.set("datafind","on_gaps","ignore")
# Now ready to loop over the input segments
datafindouts = []
datafindcaches = []
logging.info("Querying datafind server for all science segments.")
for ifo, scienceSegsIfo in scienceSegs.items():
observatory = ifo[0].upper()
checked_times = segments.segmentlist([])
frame_types = cp.get_opt_tags(
"workflow-datafind",
"datafind-%s-frame-type" % (ifo.lower()), tags
)
# Check if this is one type, or time varying
frame_types = frame_types.replace(' ', '').strip().split(',')
for ftype in frame_types:
# Check the times, default to full time initially
# This REQUIRES a coalesced segment list to work
start = int(scienceSegsIfo[0][0])
end = int(scienceSegsIfo[-1][1])
# Then check for limits. We're expecting something like:
# value[start:end], so need to extract value, start and end
if '[' in ftype:
# This gets start and end out
bopt = ftype.split('[')[1].split(']')[0]
newstart, newend = bopt.split(':')
# Then check if the times are within science time
start = max(int(newstart), start)
end = min(int(newend), end)
if end <= start:
continue
# This extracts value
ftype = ftype.split('[')[0]
curr_times = segments.segment(start, end)
# The times here must be distinct. We cannot have two different
# frame files at the same time from the same ifo.
if checked_times.intersects_segment(curr_times):
err_msg = "Different frame types cannot overlap in time."
raise ValueError(err_msg)
checked_times.append(curr_times)
# Ask datafind where the frames are
try:
cache, cache_file = run_datafind_instance(
cp,
outputDir,
observatory,
ftype,
start,
end,
ifo,
tags=tags
)
except:
cache, cache_file = run_datafind_instance(
cp,
outputDir,
observatory,
ftype,
start,
end,
ifo,
tags=tags
)
datafindouts.append(cache_file)
datafindcaches.append(cache)
return datafindcaches, datafindouts
def setup_datafind_runtime_frames_single_call_perifo(cp, scienceSegs,
outputDir, tags=None):
"""
This function uses the `gwdatafind` library to obtain the location of all
the frame files that will be needed to cover the analysis of the data
given in scienceSegs. This function will not check if the returned frames
cover the whole time requested, such sanity checks are done in the
pycbc.workflow.setup_datafind_workflow entry function. As opposed to
setup_datafind_runtime_generated this call will only run one call to
datafind per ifo, spanning the whole time. This function will return a list
of files corresponding to the individual frames returned by the datafind
query. This will allow pegasus to more easily identify all the files used
as input, but may cause problems for codes that need to take frame cache
files as input.
Parameters
-----------
cp : ConfigParser.ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
This contains the times that the workflow is expected to analyse.
outputDir : path
All output files written by datafind processes will be written to this
directory.
tags : list of strings, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
datafindcaches : list of glue.lal.Cache instances
The glue.lal.Cache representations of the various calls to the datafind
server and the returned frame files.
datafindOuts : pycbc.workflow.core.FileList
List of all the datafind output files for use later in the pipeline.
"""
datafindcaches, _ = \
setup_datafind_runtime_cache_single_call_perifo(cp, scienceSegs,
outputDir, tags=tags)
datafindouts = convert_cachelist_to_filelist(datafindcaches)
return datafindcaches, datafindouts
def setup_datafind_runtime_frames_multi_calls_perifo(cp, scienceSegs,
outputDir, tags=None):
"""
This function uses the `gwdatafind` library to obtain the location of all
the frame files that will be needed to cover the analysis of the data
given in scienceSegs. This function will not check if the returned frames
cover the whole time requested, such sanity checks are done in the
pycbc.workflow.setup_datafind_workflow entry function. As opposed to
setup_datafind_runtime_single_call_perifo this call will one call to the
datafind server for every science segment. This function will return a list
of files corresponding to the individual frames returned by the datafind
query. This will allow pegasus to more easily identify all the files used
as input, but may cause problems for codes that need to take frame cache
files as input.
Parameters
-----------
cp : ConfigParser.ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
scienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
This contains the times that the workflow is expected to analyse.
outputDir : path
All output files written by datafind processes will be written to this
directory.
tags : list of strings, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
datafindcaches : list of glue.lal.Cache instances
The glue.lal.Cache representations of the various calls to the datafind
server and the returned frame files.
datafindOuts : pycbc.workflow.core.FileList
List of all the datafind output files for use later in the pipeline.
"""
datafindcaches, _ = \
setup_datafind_runtime_cache_multi_calls_perifo(cp, scienceSegs,
outputDir, tags=tags)
datafindouts = convert_cachelist_to_filelist(datafindcaches)
return datafindcaches, datafindouts
def setup_datafind_from_pregenerated_lcf_files(cp, ifos, outputDir, tags=None):
"""
This function is used if you want to run with pregenerated lcf frame
cache files.
Parameters
-----------
cp : ConfigParser.ConfigParser instance
This contains a representation of the information stored within the
workflow configuration files
ifos : list of ifo strings
List of ifos to get pregenerated files for.
outputDir : path
All output files written by datafind processes will be written to this
directory. Currently this sub-module writes no output.
tags : list of strings, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniqueify the actual filename.
Returns
--------
datafindcaches : list of glue.lal.Cache instances
The glue.lal.Cache representations of the various calls to the datafind
server and the returned frame files.
datafindOuts : pycbc.workflow.core.FileList
List of all the datafind output files for use later in the pipeline.
"""
if tags is None:
tags = []
datafindcaches = []
for ifo in ifos:
search_string = "datafind-pregenerated-cache-file-%s" %(ifo.lower(),)
frame_cache_file_name = cp.get_opt_tags("workflow-datafind",
search_string, tags=tags)
curr_cache = lal.Cache.fromfilenames([frame_cache_file_name],
coltype=lal.LIGOTimeGPS)
curr_cache.ifo = ifo
datafindcaches.append(curr_cache)
datafindouts = convert_cachelist_to_filelist(datafindcaches)
return datafindcaches, datafindouts
def convert_cachelist_to_filelist(datafindcache_list):
"""
Take as input a list of glue.lal.Cache objects and return a pycbc FileList
containing all frames within those caches.
Parameters
-----------
datafindcache_list : list of glue.lal.Cache objects
The list of cache files to convert.
Returns
--------
datafind_filelist : FileList of frame File objects
The list of frame files.
"""
prev_file = None
prev_name = None
this_name = None
datafind_filelist = FileList([])
for cache in datafindcache_list:
# sort the cache into time sequential order
cache.sort()
curr_ifo = cache.ifo
for frame in cache:
# Pegasus doesn't like "localhost" in URLs.
frame.url = frame.url.replace('file://localhost','file://')
# Create one File() object for each unique frame file that we
# get back in the cache.
if prev_file:
prev_name = os.path.basename(prev_file.cache_entry.url)
this_name = os.path.basename(frame.url)
if (prev_file is None) or (prev_name != this_name):
currFile = File(curr_ifo, frame.description,
frame.segment, file_url=frame.url, use_tmp_subdirs=True)
datafind_filelist.append(currFile)
prev_file = currFile
# Populate the PFNs for the File() we just created
if frame.url.startswith('file://'):
currFile.add_pfn(frame.url, site='local')
if frame.url.startswith(
'file:///cvmfs/oasis.opensciencegrid.org/ligo/frames'):
# Datafind returned a URL valid on the osg as well
# so add the additional PFNs to allow OSG access.
currFile.add_pfn(frame.url, site='osg')
elif frame.url.startswith(
'file:///cvmfs/gwosc.osgstorage.org/'):
# Datafind returned a URL valid on the osg as well
# so add the additional PFNs to allow OSG access.
for s in ['osg', 'orangegrid', 'osgconnect']:
currFile.add_pfn(frame.url, site=s)
currFile.add_pfn(frame.url, site="{}-scratch".format(s))
else:
currFile.add_pfn(frame.url, site='notlocal')
return datafind_filelist
def get_science_segs_from_datafind_outs(datafindcaches):
"""
This function will calculate the science segments that are covered in
the OutGroupList containing the frame files returned by various
calls to the datafind server. This can then be used to check whether this
list covers what it is expected to cover.
Parameters
----------
datafindcaches : OutGroupList
List of all the datafind output files.
Returns
--------
newScienceSegs : Dictionary of ifo keyed ligo.segments.segmentlist instances
The times covered by the frames found in datafindOuts.
"""
newScienceSegs = {}
for cache in datafindcaches:
if len(cache) > 0:
groupSegs = segments.segmentlist(e.segment for e in cache).coalesce()
ifo = cache.ifo
if ifo not in newScienceSegs:
newScienceSegs[ifo] = groupSegs
else:
newScienceSegs[ifo].extend(groupSegs)
newScienceSegs[ifo].coalesce()
return newScienceSegs
def get_missing_segs_from_frame_file_cache(datafindcaches):
"""
This function will use os.path.isfile to determine if all the frame files
returned by the local datafind server actually exist on the disk. This can
then be used to update the science times if needed.
Parameters
-----------
datafindcaches : OutGroupList
List of all the datafind output files.
Returns
--------
missingFrameSegs : Dict. of ifo keyed ligo.segments.segmentlist instances
The times corresponding to missing frames found in datafindOuts.
missingFrames: Dict. of ifo keyed lal.Cache instances
The list of missing frames
"""
missingFrameSegs = {}
missingFrames = {}
for cache in datafindcaches:
if len(cache) > 0:
# Don't bother if these are not file:// urls, assume all urls in
# one cache file must be the same type
if not cache[0].scheme == 'file':
warn_msg = "We have %s entries in the " %(cache[0].scheme,)
warn_msg += "cache file. I do not check if these exist."
logging.info(warn_msg)
continue
_, currMissingFrames = cache.checkfilesexist(on_missing="warn")
missingSegs = segments.segmentlist(e.segment \
for e in currMissingFrames).coalesce()
ifo = cache.ifo
if ifo not in missingFrameSegs:
missingFrameSegs[ifo] = missingSegs
missingFrames[ifo] = lal.Cache(currMissingFrames)
else:
missingFrameSegs[ifo].extend(missingSegs)
# NOTE: This .coalesce probably isn't needed as the segments
# should be disjoint. If speed becomes an issue maybe remove it?
missingFrameSegs[ifo].coalesce()
missingFrames[ifo].extend(currMissingFrames)
return missingFrameSegs, missingFrames
def get_segment_summary_times(scienceFile, segmentName):
"""
This function will find the times for which the segment_summary is set
for the flag given by segmentName.
Parameters
-----------
scienceFile : SegFile
The segment file that we want to use to determine this.
segmentName : string
The DQ flag to search for times in the segment_summary table.
Returns
---------
summSegList : ligo.segments.segmentlist
The times that are covered in the segment summary table.
"""
# Parse the segmentName
segmentName = segmentName.split(':')
if not len(segmentName) in [2, 3]:
raise ValueError(f"Invalid channel name {segmentName}.")
ifo = segmentName[0]
channel = segmentName[1]
version = ''
if len(segmentName) == 3:
version = int(segmentName[2])
# Load the filename
xmldoc = utils.load_filename(
scienceFile.cache_entry.path,
compress='auto',
contenthandler=LIGOLWContentHandler
)
# Get the segment_def_id for the segmentName
segmentDefTable = table.Table.get_table(xmldoc, "segment_definer")
for entry in segmentDefTable:
if (entry.ifos == ifo) and (entry.name == channel):
if len(segmentName) == 2 or (entry.version==version):
segDefID = entry.segment_def_id
break
else:
raise ValueError("Cannot find channel %s in segment_definer table."\
%(segmentName))
# Get the segmentlist corresponding to this segmentName in segment_summary
segmentSummTable = table.Table.get_table(xmldoc, "segment_summary")
summSegList = segments.segmentlist([])
for entry in segmentSummTable:
if entry.segment_def_id == segDefID:
segment = segments.segment(entry.start_time, entry.end_time)
summSegList.append(segment)
summSegList.coalesce()
return summSegList
def run_datafind_instance(cp, outputDir, observatory, frameType,
startTime, endTime, ifo, tags=None):
"""
This function will query the datafind server once to find frames between
the specified times for the specified frame type and observatory.
Parameters
----------
cp : ConfigParser instance
Source for any kwargs that should be sent to the datafind module
outputDir : Output cache files will be written here. We also write the
commands for reproducing what is done in this function to this
directory.
observatory : string
The observatory to query frames for. Ex. 'H', 'L' or 'V'. NB: not
'H1', 'L1', 'V1' which denote interferometers.
frameType : string
The frame type to query for.
startTime : int
Integer start time to query the datafind server for frames.
endTime : int
Integer end time to query the datafind server for frames.
ifo : string
The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'.
Maybe this could be merged with the observatory string, but this
could cause issues if running on old 'H2' and 'H1' data.
tags : list of string, optional (default=None)
Use this to specify tags. This can be used if this module is being
called more than once to give call specific configuration (by setting
options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
This is also used to tag the Files returned by the class to uniqueify
the Files and uniquify the actual filename.
FIXME: Filenames may not be unique with current codes!
Returns
--------
dfCache : glue.lal.Cache instance
The glue.lal.Cache representation of the call to the datafind
server and the returned frame files.
cacheFile : pycbc.workflow.core.File
Cache file listing all of the datafind output files for use later in the pipeline.
"""
if tags is None:
tags = []
# Determine if we should override the default datafind server
if cp.has_option_tags("workflow-datafind",
"datafind-ligo-datafind-server", tags):
datafind_server = cp.get_opt_tags(
"workflow-datafind",
"datafind-ligo-datafind-server",
tags
)
else:
datafind_server = None
seg = segments.segment([startTime, endTime])
# Take the datafind kwargs from config (usually urltype=file is
# given).
dfKwargs = {}
# By default ignore missing frames, this case is dealt with outside of here
dfKwargs['on_gaps'] = 'ignore'
if cp.has_section("datafind"):
for item, value in cp.items("datafind"):
dfKwargs[item] = value
for tag in tags:
if cp.has_section('datafind-%s' %(tag)):
for item, value in cp.items("datafind-%s" %(tag)):
dfKwargs[item] = value
# It is useful to print the corresponding command to the logs
# directory to check if this was expected.
log_datafind_command(observatory, frameType, startTime, endTime,
os.path.join(outputDir,'logs'), **dfKwargs)
logging.debug("Asking datafind server for frames.")
dfCache = lal.Cache.from_urls(
find_frame_urls(
observatory,
frameType,
startTime,
endTime,
host=datafind_server,
**dfKwargs
),
)
logging.debug("Frames returned")
# workflow format output file
cache_file = File(ifo, 'DATAFIND', seg, extension='lcf',
directory=outputDir, tags=tags)
cache_file.add_pfn(cache_file.cache_entry.path, site='local')
dfCache.ifo = ifo
# Dump output to file
fP = open(cache_file.storage_path, "w")
# FIXME: CANNOT use dfCache.tofile because it will print 815901601.00000
# as a gps time which is incompatible with the lal cache format
# (and the C codes) which demand an integer.
#dfCache.tofile(fP)
for entry in dfCache:
start = str(int(entry.segment[0]))
duration = str(int(abs(entry.segment)))
print("%s %s %s %s %s" \
% (entry.observatory, entry.description, start, duration, entry.url), file=fP)
entry.segment = segments.segment(int(entry.segment[0]), int(entry.segment[1]))
fP.close()
return dfCache, cache_file
def log_datafind_command(observatory, frameType, startTime, endTime,
outputDir, **dfKwargs):
"""
This command will print an equivalent gw_data_find command to disk that
can be used to debug why the internal datafind module is not working.
"""
# FIXME: This does not accurately reproduce the call as assuming the
# kwargs will be the same is wrong, so some things need to be converted
# "properly" to the command line equivalent.
gw_command = ['gw_data_find', '--observatory', observatory,
'--type', frameType,
'--gps-start-time', str(startTime),
'--gps-end-time', str(endTime)]
for name, value in dfKwargs.items():
if name == 'match':
gw_command.append("--match")
gw_command.append(str(value))
elif name == 'urltype':
gw_command.append("--url-type")
gw_command.append(str(value))
elif name == 'on_gaps':
pass
else:
errMsg = "Unknown datafind kwarg given: %s. " %(name)
errMsg+= "This argument is stripped in the logged .sh command."
logging.warn(errMsg)
fileName = "%s-%s-%d-%d.sh" \
%(observatory, frameType, startTime, endTime-startTime)
filePath = os.path.join(outputDir, fileName)
fP = open(filePath, 'w')
fP.write(' '.join(gw_command))
fP.close()
def datafind_keep_unique_backups(backup_outs, orig_outs):
"""This function will take a list of backup datafind files, presumably
obtained by querying a remote datafind server, e.g. CIT, and compares
these against a list of original datafind files, presumably obtained by
querying the local datafind server. Only the datafind files in the backup
list that do not appear in the original list are returned. This allows us
to use only files that are missing from the local cluster.
Parameters
-----------
backup_outs : FileList
List of datafind files from the remote datafind server.
orig_outs : FileList
List of datafind files from the local datafind server.
Returns
--------
FileList
List of datafind files in backup_outs and not in orig_outs.
"""
# NOTE: This function is not optimized and could be made considerably
# quicker if speed becomes in issue. With 4s frame files this might
# be slow, but for >1000s files I don't foresee any issue, so I keep
# this simple.
return_list = FileList([])
# We compare the LFNs to determine uniqueness
# Is there a way to associate two paths with one LFN??
orig_names = [f.name for f in orig_outs]
for file in backup_outs:
if file.name not in orig_names:
return_list.append(file)
else:
index_num = orig_names.index(file.name)
orig_out = orig_outs[index_num]
pfns = list(file.pfns)
# This shouldn't happen, but catch if it does
assert(len(pfns) == 1)
orig_out.add_pfn(pfns[0].url, site='notlocal')
return return_list
| 48,413
| 43.744917
| 92
|
py
|
pycbc
|
pycbc-master/pycbc/workflow/injection.py
|
# Copyright (C) 2015 Ian Harry, Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module is responsible for setting up the part of a pycbc workflow that
will generate the injection files to be used for assessing the workflow's
ability to detect predicted signals.
Full documentation for this module can be found here:
https://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html
"""
import logging
import os.path
import configparser as ConfigParser
from pycbc.workflow.core import FileList, make_analysis_dir, Node
from pycbc.workflow.core import Executable, resolve_url_to_file
from pycbc.workflow.jobsetup import (LalappsInspinjExecutable,
PycbcCreateInjectionsExecutable, select_generic_executable)
def veto_injections(workflow, inj_file, veto_file, veto_name, out_dir, tags=None):
tags = [] if tags is None else tags
make_analysis_dir(out_dir)
node = Executable(workflow.cp, 'strip_injections', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_opt('--segment-name', veto_name)
node.add_input_opt('--veto-file', veto_file)
node.add_input_opt('--injection-file', inj_file)
node.add_opt('--ifos', ' '.join(workflow.ifos))
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0]
class PyCBCOptimalSNRExecutable(Executable):
"""Compute optimal SNR for injections"""
current_retention_level = Executable.ALL_TRIGGERS
def create_node(self, workflow, inj_file, precalc_psd_files, group_str):
node = Node(self)
_, ext = os.path.splitext(inj_file.name)
node.add_input_opt('--input-file', inj_file)
node.add_opt('--injection-fraction-range', group_str)
node.add_input_list_opt('--time-varying-psds', precalc_psd_files)
node.new_output_file_opt(workflow.analysis_time, ext,
'--output-file')
return node
class PyCBCMergeHDFExecutable(Executable):
"""Merge HDF injection files executable class"""
current_retention_level = Executable.MERGED_TRIGGERS
def create_node(self, workflow, input_files):
node = Node(self)
node.add_input_list_opt('--injection-files', input_files)
node.new_output_file_opt(workflow.analysis_time, '.hdf',
'--output-file')
return node
def compute_inj_optimal_snr(workflow, inj_file, precalc_psd_files, out_dir,
tags=None):
"Set up a job for computing optimal SNRs of a sim_inspiral file."
if tags is None:
tags = []
try:
factor = int(workflow.cp.get_opt_tags('workflow-optimal-snr',
'parallelization-factor',
tags))
except Exception as e:
logging.warning(e)
factor = 1
if factor == 1:
# parallelization factor not given - default to single optimal snr job
opt_snr_exe = PyCBCOptimalSNRExecutable(workflow.cp, 'optimal_snr',
ifos=workflow.ifos,
out_dir=out_dir, tags=tags)
node = opt_snr_exe.create_node(workflow, inj_file,
precalc_psd_files, '0/1')
workflow += node
return node.output_files[0]
opt_snr_split_files = []
for i in range(factor):
group_str = '%s/%s' % (i, factor)
opt_snr_exe = PyCBCOptimalSNRExecutable(workflow.cp, 'optimal_snr',
ifos=workflow.ifos,
out_dir=out_dir,
tags=tags + [str(i)])
opt_snr_exe.update_current_retention_level(
Executable.INTERMEDIATE_PRODUCT)
node = opt_snr_exe.create_node(workflow, inj_file, precalc_psd_files,
group_str)
opt_snr_split_files += [node.output_files[0]]
workflow += node
hdfcombine_exe = PyCBCMergeHDFExecutable(
workflow.cp,
'optimal_snr_merge',
ifos=workflow.ifos,
out_dir=out_dir,
tags=tags
)
hdfcombine_node = hdfcombine_exe.create_node(
workflow,
opt_snr_split_files
)
workflow += hdfcombine_node
return hdfcombine_node.output_files[0]
def cut_distant_injections(workflow, inj_file, out_dir, tags=None):
"Set up a job for removing injections that are too distant to be seen"
if tags is None:
tags = []
node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--input', inj_file)
node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file')
workflow += node
return node.output_files[0]
def inj_to_hdf(workflow, inj_file, out_dir, tags=None):
""" Convert injection file to hdf format if not already one
"""
_, ext = os.path.splitext(inj_file.name)
if ext == '.hdf':
return inj_file
if tags is None:
tags = []
node = Executable(workflow.cp, 'inj2hdf', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--injection-file', inj_file)
node.new_output_file_opt(workflow.analysis_time, '.hdf', '--output-file')
workflow += node
return node.output_file
def setup_injection_workflow(workflow, output_dir=None,
inj_section_name='injections', tags=None):
"""
This function is the gateway for setting up injection-generation jobs in a
workflow. It should be possible for this function to support a number
of different ways/codes that could be used for doing this, however as this
will presumably stay as a single call to a single code (which need not be
inspinj) there are currently no subfunctions in this moudle.
Parameters
-----------
workflow : pycbc.workflow.core.Workflow
The Workflow instance that the coincidence jobs will be added to.
output_dir : path
The directory in which injection files will be stored.
inj_section_name : string (optional, default='injections')
The string that corresponds to the option describing the exe location
in the [executables] section of the .ini file and that corresponds to
the section (and sub-sections) giving the options that will be given to
the code at run time.
tags : list of strings (optional, default = [])
A list of the tagging strings that will be used for all jobs created
by this call to the workflow. This will be used in output names.
Returns
--------
inj_files : pycbc.workflow.core.FileList
The list of injection files created by this call.
inj_tags : list of strings
The tag corresponding to each injection file and used to uniquely
identify them. The FileList class contains functions to search
based on tags.
"""
if tags is None:
tags = []
logging.info("Entering injection module.")
make_analysis_dir(output_dir)
# Get full analysis segment for output file naming
full_segment = workflow.analysis_time
# Identify which injections to do by presence of sub-sections in
# the configuration file
inj_tags = []
inj_files = FileList([])
for section in workflow.cp.get_subsections(inj_section_name):
inj_tag = section.upper()
curr_tags = tags + [inj_tag]
# Parse for options in ini file
injection_method = workflow.cp.get_opt_tags("workflow-injections",
"injections-method",
curr_tags)
if injection_method in ["IN_WORKFLOW", "AT_RUNTIME"]:
exe = select_generic_executable(workflow, 'injections')
inj_job = exe(workflow.cp, inj_section_name,
out_dir=output_dir, ifos='HL',
tags=curr_tags)
if exe is PycbcCreateInjectionsExecutable:
config_url = workflow.cp.get('workflow-injections',
section+'-config-file')
config_file = resolve_url_to_file(config_url)
node, inj_file = inj_job.create_node(config_file)
else:
node = inj_job.create_node(full_segment)
if injection_method == "AT_RUNTIME":
workflow.execute_node(node)
else:
workflow.add_node(node)
inj_file = node.output_files[0]
inj_files.append(inj_file)
elif injection_method == "PREGENERATED":
file_attrs = {
'ifos': ['HL'],
'segs': full_segment,
'tags': curr_tags
}
injection_path = workflow.cp.get_opt_tags(
"workflow-injections",
"injections-pregenerated-file",
curr_tags
)
curr_file = resolve_url_to_file(injection_path, attrs=file_attrs)
inj_files.append(curr_file)
else:
err = "Injection method must be one of IN_WORKFLOW, "
err += "AT_RUNTIME or PREGENERATED. Got %s." % (injection_method)
raise ValueError(err)
inj_tags.append(inj_tag)
logging.info("Leaving injection module.")
return inj_files, inj_tags
| 10,485
| 39.176245
| 82
|
py
|
pycbc
|
pycbc-master/pycbc/inference/geweke.py
|
# Copyright (C) 2017 Christopher M. Biwer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" Functions for computing the Geweke convergence statistic.
"""
import numpy
def geweke(x, seg_length, seg_stride, end_idx, ref_start,
ref_end=None, seg_start=0):
""" Calculates Geweke conervergence statistic for a chain of data.
This function will advance along the chain and calculate the
statistic for each step.
Parameters
----------
x : numpy.array
A one-dimensional array of data.
seg_length : int
Number of samples to use for each Geweke calculation.
seg_stride : int
Number of samples to advance before next Geweke calculation.
end_idx : int
Index of last start.
ref_start : int
Index of beginning of end reference segment.
ref_end : int
Index of end of end reference segment. Default is None which
will go to the end of the data array.
seg_start : int
What index to start computing the statistic. Default is 0 which
will go to the beginning of the data array.
Returns
-------
starts : numpy.array
The start index of the first segment in the chain.
ends : numpy.array
The end index of the first segment in the chain.
stats : numpy.array
The Geweke convergence diagnostic statistic for the segment.
"""
# lists to hold statistic and end index
stats = []
ends = []
# get the beginning of all segments
starts = numpy.arange(seg_start, end_idx, seg_stride)
# get second segment of data at the end to compare
x_end = x[ref_start:ref_end]
# loop over all segments
for start in starts:
# find the end of the first segment
x_start_end = int(start + seg_length)
# get first segment
x_start = x[start:x_start_end]
# compute statistic
stats.append((x_start.mean() - x_end.mean()) / numpy.sqrt(
x_start.var() + x_end.var()))
# store end of first segment
ends.append(x_start_end)
return numpy.array(starts), numpy.array(ends), numpy.array(stats)
| 2,802
| 32.369048
| 75
|
py
|
pycbc
|
pycbc-master/pycbc/inference/gelman_rubin.py
|
# Copyright (C) 2017 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This modules provides functions for evaluating the Gelman-Rubin convergence
diagnostic statistic.
"""
import numpy
def walk(chains, start, end, step):
""" Calculates Gelman-Rubin conervergence statistic along chains of data.
This function will advance along the chains and calculate the
statistic for each step.
Parameters
----------
chains : iterable
An iterable of numpy.array instances that contain the samples
for each chain. Each chain has shape (nparameters, niterations).
start : float
Start index of blocks to calculate all statistics.
end : float
Last index of blocks to calculate statistics.
step : float
Step size to take for next block.
Returns
-------
starts : numpy.array
1-D array of start indexes of calculations.
ends : numpy.array
1-D array of end indexes of caluclations.
stats : numpy.array
Array with convergence statistic. It has
shape (nparameters, ncalculations).
"""
# get number of chains, parameters, and iterations
chains = numpy.array(chains)
_, nparameters, _ = chains.shape
# get end index of blocks
ends = numpy.arange(start, end, step)
stats = numpy.zeros((nparameters, len(ends)))
# get start index of blocks
starts = numpy.array(len(ends) * [start])
# loop over end indexes and calculate statistic
for i, e in enumerate(ends):
tmp = chains[:, :, 0:e]
stats[:, i] = gelman_rubin(tmp)
return starts, ends, stats
def gelman_rubin(chains, auto_burn_in=True):
""" Calculates the univariate Gelman-Rubin convergence statistic
which compares the evolution of multiple chains in a Markov-Chain Monte
Carlo process and computes their difference to determine their convergence.
The between-chain and within-chain variances are computed for each sampling
parameter, and a weighted combination of the two is used to determine the
convergence. As the chains converge, the point scale reduction factor
should go to 1.
Parameters
----------
chains : iterable
An iterable of numpy.array instances that contain the samples
for each chain. Each chain has shape (nparameters, niterations).
auto_burn_in : bool
If True, then only use later half of samples provided.
Returns
-------
psrf : numpy.array
A numpy.array of shape (nparameters) that has the point estimates of
the potential scale reduction factor.
"""
# remove first half of samples
# this will have shape (nchains, nparameters, niterations)
if auto_burn_in:
_, _, niterations = numpy.array(chains).shape
chains = numpy.array([chain[:, niterations // 2 + 1:]
for chain in chains])
# get number of chains, parameters, and iterations
chains = numpy.array(chains)
nchains, nparameters, niterations = chains.shape
# calculate the covariance matrix for each chain
# this will have shape (nchains, nparameters, nparameters)
chains_covs = numpy.array([numpy.cov(chain) for chain in chains])
if nparameters == 1:
chains_covs = chains_covs.reshape((nchains, 1, 1))
# calculate W the within-chain variance
# this will have shape (nparameters, nparameters)
w = numpy.zeros(chains_covs[0].shape)
for i, row in enumerate(chains_covs[0]):
for j, _ in enumerate(row):
w[i, j] = numpy.mean(chains_covs[:, i, j])
if nparameters == 1:
w = w.reshape((1, 1))
# calculate B the between-chain variance
# this will have shape (nparameters, nparameters)
means = numpy.zeros((nparameters, nchains))
for i, chain in enumerate(chains):
means[:, i] = numpy.mean(chain, axis=1).transpose()
b = niterations * numpy.cov(means)
if nparameters == 1:
b = b.reshape((1, 1))
# get diagonal elements of W and B
# these will have shape (nparameters)
w_diag = numpy.diag(w)
b_diag = numpy.diag(b)
# get variance for each chain
# this will have shape (nparameters, nchains)
var = numpy.zeros((nparameters, nchains))
for i, chain_cov in enumerate(chains_covs):
var[:, i] = numpy.diag(chain_cov)
# get mean of means
# this will have shape (nparameters)
mu_hat = numpy.mean(means, axis=1)
# get variance of variances
# this will have shape (nparameters)
s = numpy.var(var, axis=1)
# get V the combined variance of all chains
# this will have shape (nparameters)
v = ((niterations - 1.) * w_diag / niterations +
(1. + 1. / nchains) * b_diag / niterations)
# get factors in variance of V calculation
# this will have shape (nparameters)
k = 2 * b_diag**2 / (nchains - 1)
mid_term = numpy.cov(
var, means**2)[nparameters:2*nparameters, 0:nparameters].T
end_term = numpy.cov(
var, means)[nparameters:2*nparameters, 0:nparameters].T
wb = niterations / nchains * numpy.diag(mid_term - 2 * mu_hat * end_term)
# get variance of V
# this will have shape (nparameters)
var_v = (
(niterations - 1.) ** 2 * s +
(1. + 1. / nchains) ** 2 * k +
2. * (niterations - 1.) * (1. + 1. / nchains) * wb
) / niterations**2
# get degrees of freedom
# this will have shape (nparameters)
dof = (2. * v**2) / var_v
# more degrees of freedom factors
# this will have shape (nparameters)
df_adj = (dof + 3.) / (dof + 1.)
# estimate R
# this will have shape (nparameters)
r2_fixed = (niterations - 1.) / niterations
r2_random = (1. + 1. / nchains) * (1. / niterations) * (b_diag / w_diag)
r2_estimate = r2_fixed + r2_random
# calculate PSRF the potential scale reduction factor
# this will have shape (nparameters)
psrf = numpy.sqrt(r2_estimate * df_adj)
return psrf
| 6,652
| 34.388298
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/burn_in.py
|
# Copyright (C) 2017 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for determining when Markov Chains
have burned in.
"""
import logging
from abc import ABCMeta, abstractmethod
import numpy
from scipy.stats import ks_2samp
from pycbc.io.record import get_vars_from_arg
# The value to use for a burn-in iteration if a chain is not burned in
NOT_BURNED_IN_ITER = -1
#
# =============================================================================
#
# Convenience functions
#
# =============================================================================
#
def ks_test(samples1, samples2, threshold=0.9):
"""Applies a KS test to determine if two sets of samples are the same.
The ks test is applied parameter-by-parameter. If the two-tailed p-value
returned by the test is greater than ``threshold``, the samples are
considered to be the same.
Parameters
----------
samples1 : dict
Dictionary of mapping parameters to the first set of samples.
samples2 : dict
Dictionary of mapping parameters to the second set of samples.
threshold : float
The thershold to use for the p-value. Default is 0.9.
Returns
-------
dict :
Dictionary mapping parameter names to booleans indicating whether the
given parameter passes the KS test.
"""
is_the_same = {}
assert set(samples1.keys()) == set(samples2.keys()), (
"samples1 and 2 must have the same parameters")
# iterate over the parameters
for param in samples1:
s1 = samples1[param]
s2 = samples2[param]
_, p_value = ks_2samp(s1, s2)
is_the_same[param] = p_value > threshold
return is_the_same
def max_posterior(lnps_per_walker, dim):
"""Burn in based on samples being within dim/2 of maximum posterior.
Parameters
----------
lnps_per_walker : 2D array
Array of values that are proportional to the log posterior values. Must
have shape ``nwalkers x niterations``.
dim : int
The dimension of the parameter space.
Returns
-------
burn_in_idx : array of int
The burn in indices of each walker. If a walker is not burned in, its
index will be be equal to the length of the chain.
is_burned_in : array of bool
Whether or not a walker is burned in.
"""
if len(lnps_per_walker.shape) != 2:
raise ValueError("lnps_per_walker must have shape "
"nwalkers x niterations")
# find the value to compare against
max_p = lnps_per_walker.max()
criteria = max_p - dim/2.
nwalkers, _ = lnps_per_walker.shape
burn_in_idx = numpy.empty(nwalkers, dtype=int)
is_burned_in = numpy.empty(nwalkers, dtype=bool)
# find the first iteration in each chain where the logpost has exceeded
# max_p - dim/2
for ii in range(nwalkers):
chain = lnps_per_walker[ii, :]
passedidx = numpy.where(chain >= criteria)[0]
is_burned_in[ii] = passedidx.size > 0
if is_burned_in[ii]:
burn_in_idx[ii] = passedidx[0]
else:
burn_in_idx[ii] = NOT_BURNED_IN_ITER
return burn_in_idx, is_burned_in
def posterior_step(logposts, dim):
"""Finds the last time a chain made a jump > dim/2.
Parameters
----------
logposts : array
1D array of values that are proportional to the log posterior values.
dim : int
The dimension of the parameter space.
Returns
-------
int
The index of the last time the logpost made a jump > dim/2. If that
never happened, returns 0.
"""
if logposts.ndim > 1:
raise ValueError("logposts must be a 1D array")
criteria = dim/2.
dp = numpy.diff(logposts)
indices = numpy.where(dp >= criteria)[0]
if indices.size > 0:
idx = indices[-1] + 1
else:
idx = 0
return idx
def nacl(nsamples, acls, nacls=5):
"""Burn in based on ACL.
This applies the following test to determine burn in:
1. The first half of the chain is ignored.
2. An ACL is calculated from the second half.
3. If ``nacls`` times the ACL is < the length of the chain / 2,
the chain is considered to be burned in at the half-way point.
Parameters
----------
nsamples : int
The number of samples of in the chain(s).
acls : dict
Dictionary of parameter -> ACL(s). The ACLs for each parameter may
be an integer or an array of integers (for multiple chains).
nacls : int, optional
The number of ACLs the chain(s) must have gone past the halfway point
in order to be considered burned in. Default is 5.
Returns
-------
dict
Dictionary of parameter -> boolean(s) indicating if the chain(s) pass
the test. If an array of values was provided for the acls, the values
will be arrays of booleans.
"""
kstart = int(nsamples / 2.)
return {param: (nacls * acl) < kstart for (param, acl) in acls.items()}
def evaluate_tests(burn_in_test, test_is_burned_in, test_burn_in_iter):
"""Evaluates burn in data from multiple tests.
The iteration to use for burn-in depends on the logic in the burn-in
test string. For example, if the test was 'max_posterior | nacl' and
max_posterior burned-in at iteration 5000 while nacl burned in at
iteration 6000, we'd want to use 5000 as the burn-in iteration.
However, if the test was 'max_posterior & nacl', we'd want to use
6000 as the burn-in iteration. This function handles all cases by
doing the following: first, take the collection of burn in iterations
from all the burn in tests that were applied. Next, cycle over the
iterations in increasing order, checking which tests have burned in
by that point. Then evaluate the burn-in string at that point to see
if it passes, and if so, what the iteration is. The first point that
the test passes is used as the burn-in iteration.
Parameters
----------
burn_in_test : str
The test to apply; e.g., ``'max_posterior & nacl'``.
test_is_burned_in : dict
Dictionary of test name -> boolean indicating whether a specific burn
in test has passed.
test_burn_in_iter : dict
Dictionary of test name -> int indicating when a specific test burned
in.
Returns
-------
is_burned_in : bool
Whether or not the data passes all burn in tests.
burn_in_iteration :
The iteration at which all the tests pass. If the tests did not all
pass (``is_burned_in`` is false), then returns
:py:data:`NOT_BURNED_IN_ITER`.
"""
burn_in_iters = numpy.unique(list(test_burn_in_iter.values()))
burn_in_iters.sort()
for ii in burn_in_iters:
test_results = {t: (test_is_burned_in[t] &
0 <= test_burn_in_iter[t] <= ii)
for t in test_is_burned_in}
is_burned_in = eval(burn_in_test, {"__builtins__": None},
test_results)
if is_burned_in:
break
if not is_burned_in:
ii = NOT_BURNED_IN_ITER
return is_burned_in, ii
#
# =============================================================================
#
# Burn in classes
#
# =============================================================================
#
class BaseBurnInTests(metaclass=ABCMeta):
"""Base class for burn in tests."""
available_tests = ('halfchain', 'min_iterations', 'max_posterior',
'posterior_step', 'nacl',
)
# pylint: disable=unnecessary-pass
def __init__(self, sampler, burn_in_test, **kwargs):
self.sampler = sampler
# determine the burn-in tests that are going to be done
self.do_tests = get_vars_from_arg(burn_in_test)
self.burn_in_test = burn_in_test
self.is_burned_in = False
self.burn_in_iteration = NOT_BURNED_IN_ITER
self.test_is_burned_in = {} # burn in status per test
self.test_burn_in_iteration = {} # burn in iter per test
self.test_aux_info = {} # any additional information the test stores
# Arguments specific to each test...
# for nacl:
self._nacls = int(kwargs.pop('nacls', 5))
# for max_posterior and posterior_step
self._ndim = int(kwargs.pop('ndim', len(sampler.variable_params)))
# for min iterations
self._min_iterations = int(kwargs.pop('min_iterations', 0))
@abstractmethod
def burn_in_index(self, filename):
"""The burn in index (retrieved from the iteration).
This is an abstract method because how this is evaluated depends on
if this is an ensemble MCMC or not.
"""
pass
def _getniters(self, filename):
"""Convenience function to get the number of iterations in the file.
If `niterations` hasn't been written to the file yet, just returns 0.
"""
with self.sampler.io(filename, 'r') as fp:
try:
niters = fp.niterations
except KeyError:
niters = 0
return niters
def _getnsamples(self, filename):
"""Convenience function to get the number of samples saved in the file.
If no samples have been written to the file yet, just returns 0.
"""
with self.sampler.io(filename, 'r') as fp:
try:
group = fp[fp.samples_group]
# we'll just use the first parameter
params = list(group.keys())
nsamples = group[params[0]].shape[-1]
except (KeyError, IndexError):
nsamples = 0
return nsamples
def _index2iter(self, filename, index):
"""Converts the index in some samples at which burn in occurs to the
iteration of the sampler that corresponds to.
"""
with self.sampler.io(filename, 'r') as fp:
thin_interval = fp.thinned_by
return index * thin_interval
def _iter2index(self, filename, iteration):
"""Converts an iteration to the index it corresponds to.
"""
with self.sampler.io(filename, 'r') as fp:
thin_interval = fp.thinned_by
return iteration // thin_interval
def _getlogposts(self, filename):
"""Convenience function for retrieving log posteriors.
Parameters
----------
filename : str
The file to read.
Returns
-------
array
The log posterior values. They are not flattened, so have dimension
nwalkers x niterations.
"""
with self.sampler.io(filename, 'r') as fp:
samples = fp.read_raw_samples(
['loglikelihood', 'logprior'], thin_start=0, thin_interval=1,
flatten=False)
logposts = samples['loglikelihood'] + samples['logprior']
return logposts
def _getacls(self, filename, start_index):
"""Convenience function for calculating acls for the given filename.
"""
return self.sampler.compute_acl(filename, start_index=start_index)
def _getaux(self, test):
"""Convenience function for getting auxilary information.
Parameters
----------
test : str
The name of the test to retrieve auxilary information about.
Returns
-------
dict
The ``test_aux_info[test]`` dictionary. If a dictionary does
not exist yet for the given test, an empty dictionary will be
created and saved to ``test_aux_info[test]``.
"""
try:
aux = self.test_aux_info[test]
except KeyError:
aux = self.test_aux_info[test] = {}
return aux
def halfchain(self, filename):
"""Just uses half the chain as the burn-in iteration.
"""
niters = self._getniters(filename)
# this test cannot determine when something will burn in
# only when it was not burned in in the past
self.test_is_burned_in['halfchain'] = True
self.test_burn_in_iteration['halfchain'] = niters//2
def min_iterations(self, filename):
"""Just checks that the sampler has been run for the minimum number
of iterations.
"""
niters = self._getniters(filename)
is_burned_in = self._min_iterations < niters
if is_burned_in:
burn_in_iter = self._min_iterations
else:
burn_in_iter = NOT_BURNED_IN_ITER
self.test_is_burned_in['min_iterations'] = is_burned_in
self.test_burn_in_iteration['min_iterations'] = burn_in_iter
@abstractmethod
def max_posterior(self, filename):
"""Carries out the max posterior test and stores the results."""
pass
@abstractmethod
def posterior_step(self, filename):
"""Carries out the posterior step test and stores the results."""
pass
@abstractmethod
def nacl(self, filename):
"""Carries out the nacl test and stores the results."""
pass
@abstractmethod
def evaluate(self, filename):
"""Performs all tests and evaluates the results to determine if and
when all tests pass.
"""
pass
def write(self, fp, path=None):
"""Writes burn-in info to an open HDF file.
Parameters
----------
fp : pycbc.inference.io.base.BaseInferenceFile
Open HDF file to write the data to. The HDF file should be an
instance of a pycbc BaseInferenceFile.
path : str, optional
Path in the HDF file to write the data to. Default is (None) is
to write to the path given by the file's ``sampler_group``
attribute.
"""
if path is None:
path = fp.sampler_group
fp.write_data('burn_in_test', self.burn_in_test, path)
fp.write_data('is_burned_in', self.is_burned_in, path)
fp.write_data('burn_in_iteration', self.burn_in_iteration, path)
testgroup = 'burn_in_tests'
# write individual test data
for tst in self.do_tests:
subpath = '/'.join([path, testgroup, tst])
fp.write_data('is_burned_in', self.test_is_burned_in[tst], subpath)
fp.write_data('burn_in_iteration',
self.test_burn_in_iteration[tst],
subpath)
# write auxiliary info
if tst in self.test_aux_info:
for name, data in self.test_aux_info[tst].items():
fp.write_data(name, data, subpath)
@staticmethod
def _extra_tests_from_config(cp, section, tag):
"""For loading class-specific tests."""
# pylint: disable=unused-argument
return {}
@classmethod
def from_config(cls, cp, sampler):
"""Loads burn in from section [sampler-burn_in]."""
section = 'sampler'
tag = 'burn_in'
burn_in_test = cp.get_opt_tag(section, 'burn-in-test', tag)
kwargs = {}
if cp.has_option_tag(section, 'nacl', tag):
kwargs['nacl'] = int(cp.get_opt_tag(section, 'nacl', tag))
if cp.has_option_tag(section, 'ndim', tag):
kwargs['ndim'] = int(
cp.get_opt_tag(section, 'ndim', tag))
if cp.has_option_tag(section, 'min-iterations', tag):
kwargs['min_iterations'] = int(
cp.get_opt_tag(section, 'min-iterations', tag))
# load any class specific tests
kwargs.update(cls._extra_tests_from_config(cp, section, tag))
return cls(sampler, burn_in_test, **kwargs)
class MCMCBurnInTests(BaseBurnInTests):
"""Burn-in tests for collections of independent MCMC chains.
This differs from EnsembleMCMCBurnInTests in that chains are treated as
being independent of each other. The ``is_burned_in`` attribute will be
True if `any` chain passes the burn in tests (whereas in MCMCBurnInTests,
all chains must pass the burn in tests). In other words, independent
samples can be collected even if all of the chains are not burned in.
"""
def __init__(self, sampler, burn_in_test, **kwargs):
super(MCMCBurnInTests, self).__init__(sampler, burn_in_test, **kwargs)
try:
nchains = sampler.nchains
except AttributeError:
nchains = sampler.nwalkers
self.nchains = nchains
self.is_burned_in = numpy.zeros(self.nchains, dtype=bool)
self.burn_in_iteration = numpy.repeat(NOT_BURNED_IN_ITER, self.nchains)
def burn_in_index(self, filename):
"""The burn in index (retrieved from the iteration)."""
burn_in_index = self._iter2index(filename, self.burn_in_iteration)
# don't set if it isn't burned in
burn_in_index[~self.is_burned_in] = NOT_BURNED_IN_ITER
return burn_in_index
def max_posterior(self, filename):
"""Applies max posterior test."""
logposts = self._getlogposts(filename)
burn_in_idx, is_burned_in = max_posterior(logposts, self._ndim)
# convert index to iterations
burn_in_iter = self._index2iter(filename, burn_in_idx)
burn_in_iter[~is_burned_in] = NOT_BURNED_IN_ITER
# save
test = 'max_posterior'
self.test_is_burned_in[test] = is_burned_in
self.test_burn_in_iteration[test] = burn_in_iter
def posterior_step(self, filename):
"""Applies the posterior-step test."""
logposts = self._getlogposts(filename)
burn_in_idx = numpy.array([posterior_step(logps, self._ndim)
for logps in logposts])
# this test cannot determine when something will burn in
# only when it was not burned in in the past
test = 'posterior_step'
if test not in self.test_is_burned_in:
self.test_is_burned_in[test] = numpy.ones(self.nchains, dtype=bool)
# convert index to iterations
self.test_burn_in_iteration[test] = self._index2iter(filename,
burn_in_idx)
def nacl(self, filename):
"""Applies the :py:func:`nacl` test."""
nsamples = self._getnsamples(filename)
acls = self._getacls(filename, start_index=nsamples//2)
is_burned_in = nacl(nsamples, acls, self._nacls)
# stack the burn in results into an nparams x nchains array
burn_in_per_chain = numpy.stack(list(is_burned_in.values())).all(
axis=0)
# store
test = 'nacl'
self.test_is_burned_in[test] = burn_in_per_chain
try:
burn_in_iter = self.test_burn_in_iteration[test]
except KeyError:
# hasn't been stored yet
burn_in_iter = numpy.repeat(NOT_BURNED_IN_ITER, self.nchains)
self.test_burn_in_iteration[test] = burn_in_iter
burn_in_iter[burn_in_per_chain] = self._index2iter(filename,
nsamples//2)
# add the status for each parameter as additional information
self.test_aux_info[test] = is_burned_in
def evaluate(self, filename):
"""Runs all of the burn-in tests."""
# evaluate all the tests
for tst in self.do_tests:
logging.info("Evaluating %s burn-in test", tst)
getattr(self, tst)(filename)
# evaluate each chain at a time
for ci in range(self.nchains):
# some tests (like halfchain) just store a single bool for all
# chains
tibi = {t: r[ci] if isinstance(r, numpy.ndarray) else r
for t, r in self.test_is_burned_in.items()}
tbi = {t: r[ci] if isinstance(r, numpy.ndarray) else r
for t, r in self.test_burn_in_iteration.items()}
is_burned_in, burn_in_iter = evaluate_tests(self.burn_in_test,
tibi, tbi)
self.is_burned_in[ci] = is_burned_in
self.burn_in_iteration[ci] = burn_in_iter
logging.info("Number of chains burned in: %i of %i",
self.is_burned_in.sum(), self.nchains)
def write(self, fp, path=None):
"""Writes burn-in info to an open HDF file.
Parameters
----------
fp : pycbc.inference.io.base.BaseInferenceFile
Open HDF file to write the data to. The HDF file should be an
instance of a pycbc BaseInferenceFile.
path : str, optional
Path in the HDF file to write the data to. Default is (None) is
to write to the path given by the file's ``sampler_group``
attribute.
"""
if path is None:
path = fp.sampler_group
super(MCMCBurnInTests, self).write(fp, path)
# add number of chains burned in as additional metadata
fp.write_data('nchains_burned_in', self.is_burned_in.sum(), path)
class MultiTemperedMCMCBurnInTests(MCMCBurnInTests):
"""Adds support for multiple temperatures to
:py:class:`MCMCBurnInTests`.
"""
def _getacls(self, filename, start_index):
"""Convenience function for calculating acls for the given filename.
This function is used by the ``n_acl`` burn-in test. That function
expects the returned ``acls`` dict to just report a single ACL for
each parameter. Since multi-tempered samplers return an array of ACLs
for each parameter instead, this takes the max over the array before
returning.
Since we calculate the acls, this will also store it to the sampler.
Parameters
----------
filename : str
Name of the file to retrieve samples from.
start_index : int
Index to start calculating ACLs.
Returns
-------
dict :
Dictionary of parameter names -> array giving ACL for each chain.
"""
acls = super(MultiTemperedMCMCBurnInTests, self)._getacls(
filename, start_index)
# acls will have shape ntemps x nchains, flatten to nchains
return {param: vals.max(axis=0) for (param, vals) in acls.items()}
def _getlogposts(self, filename):
"""Convenience function for retrieving log posteriors.
This just gets the coldest temperature chain, and returns arrays with
shape nwalkers x niterations, so the parent class can run the same
``posterior_step`` function.
"""
return _multitemper_getlogposts(self.sampler, filename)
class EnsembleMCMCBurnInTests(BaseBurnInTests):
"""Provides methods for estimating burn-in of an ensemble MCMC."""
available_tests = ('halfchain', 'min_iterations', 'max_posterior',
'posterior_step', 'nacl', 'ks_test',
)
def __init__(self, sampler, burn_in_test, **kwargs):
super(EnsembleMCMCBurnInTests, self).__init__(
sampler, burn_in_test, **kwargs)
# for kstest
self._ksthreshold = float(kwargs.pop('ks_threshold', 0.9))
def burn_in_index(self, filename):
"""The burn in index (retrieved from the iteration)."""
if self.is_burned_in:
index = self._iter2index(filename, self.burn_in_iteration)
else:
index = NOT_BURNED_IN_ITER
return index
def max_posterior(self, filename):
"""Applies max posterior test to self."""
logposts = self._getlogposts(filename)
burn_in_idx, is_burned_in = max_posterior(logposts, self._ndim)
all_burned_in = is_burned_in.all()
if all_burned_in:
burn_in_iter = self._index2iter(filename, burn_in_idx.max())
else:
burn_in_iter = NOT_BURNED_IN_ITER
# store
test = 'max_posterior'
self.test_is_burned_in[test] = all_burned_in
self.test_burn_in_iteration[test] = burn_in_iter
aux = self._getaux(test)
# additional info
aux['iteration_per_walker'] = self._index2iter(filename, burn_in_idx)
aux['status_per_walker'] = is_burned_in
def posterior_step(self, filename):
"""Applies the posterior-step test."""
logposts = self._getlogposts(filename)
burn_in_idx = numpy.array([posterior_step(logps, self._ndim)
for logps in logposts])
burn_in_iters = self._index2iter(filename, burn_in_idx)
# this test cannot determine when something will burn in
# only when it was not burned in in the past
test = 'posterior_step'
self.test_is_burned_in[test] = True
self.test_burn_in_iteration[test] = burn_in_iters.max()
# store the iteration per walker as additional info
aux = self._getaux(test)
aux['iteration_per_walker'] = burn_in_iters
def nacl(self, filename):
"""Applies the :py:func:`nacl` test."""
nsamples = self._getnsamples(filename)
acls = self._getacls(filename, start_index=nsamples//2)
is_burned_in = nacl(nsamples, acls, self._nacls)
all_burned_in = all(is_burned_in.values())
if all_burned_in:
burn_in_iter = self._index2iter(filename, nsamples//2)
else:
burn_in_iter = NOT_BURNED_IN_ITER
# store
test = 'nacl'
self.test_is_burned_in[test] = all_burned_in
self.test_burn_in_iteration[test] = burn_in_iter
# store the status per parameter as additional info
aux = self._getaux(test)
aux['status_per_parameter'] = is_burned_in
def ks_test(self, filename):
"""Applies ks burn-in test."""
nsamples = self._getnsamples(filename)
with self.sampler.io(filename, 'r') as fp:
# get the samples from the mid point
samples1 = fp.read_raw_samples(
['loglikelihood', 'logprior'], iteration=int(nsamples/2.))
# get the last samples
samples2 = fp.read_raw_samples(
['loglikelihood', 'logprior'], iteration=-1)
# do the test
# is_the_same is a dictionary of params --> bool indicating whether or
# not the 1D marginal is the same at the half way point
is_the_same = ks_test(samples1, samples2, threshold=self._ksthreshold)
is_burned_in = all(is_the_same.values())
if is_burned_in:
burn_in_iter = self._index2iter(filename, int(nsamples//2))
else:
burn_in_iter = NOT_BURNED_IN_ITER
# store
test = 'ks_test'
self.test_is_burned_in[test] = is_burned_in
self.test_burn_in_iteration[test] = burn_in_iter
# store the test per parameter as additional info
aux = self._getaux(test)
aux['status_per_parameter'] = is_the_same
def evaluate(self, filename):
"""Runs all of the burn-in tests."""
# evaluate all the tests
for tst in self.do_tests:
logging.info("Evaluating %s burn-in test", tst)
getattr(self, tst)(filename)
is_burned_in, burn_in_iter = evaluate_tests(
self.burn_in_test, self.test_is_burned_in,
self.test_burn_in_iteration)
self.is_burned_in = is_burned_in
self.burn_in_iteration = burn_in_iter
logging.info("Is burned in: %r", self.is_burned_in)
if self.is_burned_in:
logging.info("Burn-in iteration: %i",
int(self.burn_in_iteration))
@staticmethod
def _extra_tests_from_config(cp, section, tag):
"""Loads the ks test settings from the config file."""
kwargs = {}
if cp.has_option_tag(section, 'ks-threshold', tag):
kwargs['ks_threshold'] = float(
cp.get_opt_tag(section, 'ks-threshold', tag))
return kwargs
class EnsembleMultiTemperedMCMCBurnInTests(EnsembleMCMCBurnInTests):
"""Adds support for multiple temperatures to
:py:class:`EnsembleMCMCBurnInTests`.
"""
def _getacls(self, filename, start_index):
"""Convenience function for calculating acls for the given filename.
This function is used by the ``n_acl`` burn-in test. That function
expects the returned ``acls`` dict to just report a single ACL for
each parameter. Since multi-tempered samplers return an array of ACLs
for each parameter instead, this takes the max over the array before
returning.
Since we calculate the acls, this will also store it to the sampler.
"""
acls = super(EnsembleMultiTemperedMCMCBurnInTests, self)._getacls(
filename, start_index)
# return the max for each parameter
return {param: vals.max() for (param, vals) in acls.items()}
def _getlogposts(self, filename):
"""Convenience function for retrieving log posteriors.
This just gets the coldest temperature chain, and returns arrays with
shape nwalkers x niterations, so the parent class can run the same
``posterior_step`` function.
"""
return _multitemper_getlogposts(self.sampler, filename)
def _multitemper_getlogposts(sampler, filename):
"""Retrieve log posteriors for multi tempered samplers."""
with sampler.io(filename, 'r') as fp:
samples = fp.read_raw_samples(
['loglikelihood', 'logprior'], thin_start=0, thin_interval=1,
temps=0, flatten=False)
# reshape to drop the first dimension
for (stat, arr) in samples.items():
_, nwalkers, niterations = arr.shape
samples[stat] = arr.reshape((nwalkers, niterations))
logposts = samples['loglikelihood'] + samples['logprior']
return logposts
| 30,844
| 37.604506
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/__init__.py
|
# pylint: disable=unused-import
from . import (models, sampler, io)
from . import (burn_in, entropy, gelman_rubin, geweke, option_utils)
| 137
| 33.5
| 68
|
py
|
pycbc
|
pycbc-master/pycbc/inference/option_utils.py
|
# Copyright (C) 2016 Collin Capano, Duncan Brown
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module contains standard options used for inference-related programs.
"""
import argparse
from pycbc import waveform
# -----------------------------------------------------------------------------
#
# Utilities for plotting results
#
# -----------------------------------------------------------------------------
class ParseLabelArg(argparse.Action):
"""Argparse action that will parse arguments that can accept labels.
This assumes that the values set on the command line for its assigned
argument are strings formatted like ``PARAM[:LABEL]``. When the arguments
are parsed, the ``LABEL`` bit is stripped off and added to a dictionary
mapping ``PARAM -> LABEL``. This dictionary is stored to the parsed
namespace called ``{dest}_labels``, where ``{dest}`` is the argument's
``dest`` setting (by default, this is the same as the option string).
Likewise, the argument's ``dest`` in the parsed namespace is updated so
that it is just ``PARAM``.
If no ``LABEL`` is provided, then ``PARAM`` will be used for ``LABEL``.
This action can work on arguments that have ``nargs != 0`` and ``type`` set
to ``str``.
"""
def __init__(self, type=str, nargs=None,
**kwargs): # pylint: disable=redefined-builtin
# check that type is string
if type != str:
raise ValueError("the type for this action must be a string")
if nargs == 0:
raise ValueError("nargs must not be 0 for this action")
super(ParseLabelArg, self).__init__(type=type, nargs=nargs,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
singlearg = isinstance(values, str)
if singlearg:
values = [values]
params = []
labels = {}
for param in values:
psplit = param.split(':')
if len(psplit) == 2:
param, label = psplit
else:
label = param
labels[param] = label
params.append(param)
# update the namespace
if singlearg:
params = params[0]
setattr(namespace, self.dest, params)
setattr(namespace, '{}_labels'.format(self.dest), labels)
class ParseParametersArg(ParseLabelArg):
"""Argparse action that will parse parameters and labels from an opton.
Does the same as ``ParseLabelArg``, with the additional functionality that
if ``LABEL`` is a known parameter in ``pycbc.waveform.parameters``, then
the label attribute there will be used in the labels dictionary.
Otherwise, ``LABEL`` will be used.
Examples
--------
Create a parser and add two arguments that use this action (note that the
first argument accepts multiple inputs while the second only accepts a
single input):
>>> import argparse
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument('--parameters', type=str, nargs="+",
action=ParseParametersArg)
>>> parser.add_argument('--z-arg', type=str, action=ParseParametersArg)
Parse a command line that uses these options:
>>> import shlex
>>> cli = "--parameters 'mass1+mass2:mtotal' ra ni --z-arg foo:bar"
>>> opts = parser.parse_args(shlex.split(cli))
>>> opts.parameters
['mass1+mass2', 'ra', 'ni']
>>> opts.parameters_labels
{'mass1+mass2': '$M~(\\mathrm{M}_\\odot)$', 'ni': 'ni', 'ra': '$\\alpha$'}
>>> opts.z_arg
'foo'
>>> opts.z_arg_labels
{'foo': 'bar'}
In the above, the first argument to ``--parameters`` was ``mtotal``. Since
this is a recognized parameter in ``pycbc.waveform.parameters``, the label
dictionary contains the latex string associated with the ``mtotal``
parameter. A label was not provided for the second argument, and so ``ra``
was used. Since ``ra`` is also a recognized parameter, its associated latex
string was used in the labels dictionary. Since ``ni`` and ``bar`` (the
label for ``z-arg``) are not recognized parameters, they were just used
as-is in the labels dictionaries.
"""
def __call__(self, parser, namespace, values, option_string=None):
super(ParseParametersArg, self).__call__(parser, namespace, values,
option_string=option_string)
# try to replace the labels with a label from waveform.parameters
labels = getattr(namespace, '{}_labels'.format(self.dest))
for param, label in labels.items():
try:
label = getattr(waveform.parameters, label).label
labels[param] = label
except AttributeError:
pass
def add_injsamples_map_opt(parser):
"""Adds option to parser to specify a mapping between injection parameters
an sample parameters.
"""
parser.add_argument('--injection-samples-map', nargs='+',
metavar='INJECTION_PARAM:SAMPLES_PARAM',
help='Rename/apply functions to the injection '
'parameters and name them the same as one of the '
'parameters in samples. This can be used if the '
'injection parameters are not the same as the '
'samples parameters. INJECTION_PARAM may be a '
'function of the injection parameters; '
'SAMPLES_PARAM must a name of one of the '
'parameters in the samples group.')
def add_plot_posterior_option_group(parser):
"""Adds the options needed to configure plots of posterior results.
Parameters
----------
parser : object
ArgumentParser instance.
"""
pgroup = parser.add_argument_group("Options for what plots to create and "
"their formats.")
pgroup.add_argument('--plot-marginal', action='store_true', default=False,
help="Plot 1D marginalized distributions on the "
"diagonal axes.")
pgroup.add_argument('--marginal-percentiles', nargs='+', default=None,
type=float,
help="Percentiles to draw lines at on the 1D "
"histograms.")
pgroup.add_argument('--no-marginal-lines', action='store_true',
default=False,
help="Do not add vertical lines in the 1D marginal "
"plots showing the marginal percentiles.")
pgroup.add_argument('--no-marginal-titles', action='store_true',
default=False,
help="Do not add titles giving the 1D credible range "
"over the 1D marginal plots.")
pgroup.add_argument("--plot-scatter", action='store_true', default=False,
help="Plot each sample point as a scatter plot.")
pgroup.add_argument("--plot-density", action="store_true", default=False,
help="Plot the posterior density as a color map.")
pgroup.add_argument("--plot-contours", action="store_true", default=False,
help="Draw contours showing the 50th and 90th "
"percentile confidence regions.")
pgroup.add_argument('--contour-percentiles', nargs='+', default=None,
type=float,
help="Percentiles to draw contours if different "
"than 50th and 90th.")
# add mins, maxs options
pgroup.add_argument('--mins', nargs='+', metavar='PARAM:VAL', default=[],
help="Specify minimum parameter values to plot. This "
"should be done by specifying the parameter name "
"followed by the value. Parameter names must be "
"the same as the PARAM argument in --parameters "
"(or, if no parameters are provided, the same as "
"the parameter name specified in the variable "
"args in the input file. If none provided, "
"the smallest parameter value in the posterior "
"will be used.")
pgroup.add_argument('--maxs', nargs='+', metavar='PARAM:VAL', default=[],
help="Same as mins, but for the maximum values to "
"plot.")
# add expected parameters options
pgroup.add_argument('--expected-parameters', nargs='+',
metavar='PARAM:VAL',
default=[],
help="Specify expected parameter values to plot. If "
"provided, a cross will be plotted in each axis "
"that an expected parameter is provided. "
"Parameter names must be "
"the same as the PARAM argument in --parameters "
"(or, if no parameters are provided, the same as "
"the parameter name specified in the variable "
"args in the input file.")
pgroup.add_argument('--expected-parameters-color', default='r',
help="What to color the expected-parameters cross. "
"Default is red.")
pgroup.add_argument('--plot-injection-parameters', action='store_true',
default=False,
help="Get the expected parameters from the injection "
"in the input file. There must be only a single "
"injection in the file to work. Any values "
"specified by expected-parameters will override "
"the values obtained for the injection.")
pgroup.add_argument('--pick-injection-by-time', action='store_true',
default=False,
help="In the case of multiple injections, pick one"
" for plotting based on its proximity in time.")
add_injsamples_map_opt(pgroup)
return pgroup
def plot_ranges_from_cli(opts):
"""Parses the mins and maxs arguments from the `plot_posterior` option
group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
mins : dict
Dictionary of parameter name -> specified mins. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
maxs : dict
Dictionary of parameter name -> specified maxs. Only parameters that
were specified in the --mins option will be included; if no parameters
were provided, will return an empty dictionary.
"""
mins = {}
for x in opts.mins:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --mins not specified correctly; see help")
mins[x[0]] = float(x[1])
maxs = {}
for x in opts.maxs:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --maxs not specified correctly; see help")
maxs[x[0]] = float(x[1])
return mins, maxs
def expected_parameters_from_cli(opts):
"""Parses the --expected-parameters arguments from the `plot_posterior`
option group.
Parameters
----------
opts : ArgumentParser
The parsed arguments from the command line.
Returns
-------
dict
Dictionary of parameter name -> expected value. Only parameters that
were specified in the --expected-parameters option will be included; if
no parameters were provided, will return an empty dictionary.
"""
expected = {}
for x in opts.expected_parameters:
x = x.split(':')
if len(x) != 2:
raise ValueError("option --expected-paramters not specified "
"correctly; see help")
expected[x[0]] = float(x[1])
return expected
def add_scatter_option_group(parser):
"""Adds the options needed to configure scatter plots.
Parameters
----------
parser : object
ArgumentParser instance.
"""
scatter_group = parser.add_argument_group("Options for configuring the "
"scatter plot.")
scatter_group.add_argument(
'--z-arg', type=str, default=None, action=ParseParametersArg,
help='What to color the scatter points by. Syntax is the same as the '
'parameters option.')
scatter_group.add_argument(
"--vmin", type=float, help="Minimum value for the colorbar.")
scatter_group.add_argument(
"--vmax", type=float, help="Maximum value for the colorbar.")
scatter_group.add_argument(
"--scatter-cmap", type=str, default='plasma',
help="Specify the colormap to use for points. Default is plasma.")
return scatter_group
def add_density_option_group(parser):
"""Adds the options needed to configure contours and density colour map.
Parameters
----------
parser : object
ArgumentParser instance.
"""
density_group = parser.add_argument_group("Options for configuring the "
"contours and density color map")
density_group.add_argument(
"--density-cmap", type=str, default='viridis',
help="Specify the colormap to use for the density. "
"Default is viridis.")
density_group.add_argument(
"--contour-color", type=str, default=None,
help="Specify the color to use for the contour lines. Default is "
"white for density plots and black for scatter plots.")
density_group.add_argument(
"--contour-linestyles", type=str, default=None, nargs="+",
help="Specify the linestyles to use for the contour lines. Defaut "
"is solid for all.")
density_group.add_argument(
"--no-contour-labels", action="store_true", default=False,
help="Don't put labels on the contours.")
density_group.add_argument(
'--use-kombine-kde', default=False, action="store_true",
help="Use kombine's clustered KDE for determining 2D marginal "
"contours and density instead of scipy's gaussian_kde (the "
"default). This is better at distinguishing bimodal "
"distributions, but is much slower than the default. For speed, "
"suggest setting --kde-args 'max_samples:20000' or smaller if "
"using this. Requires kombine to be installed.")
density_group.add_argument(
'--max-kde-samples', type=int, default=None,
help="Limit the number of samples used for KDE construction to the "
"given value. This can substantially speed up plot generation "
"(particularly when plotting multiple parameters). Suggested "
"values: 5000 to 10000.")
density_group.add_argument(
'--kde-args', metavar="ARG:VALUE", nargs='+', default=None,
help="Pass the given argrument, value pairs to the KDE function "
"(either scipy's or kombine's) when setting it up.")
return density_group
| 16,324
| 44.096685
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/entropy.py
|
""" The module contains functions for calculating the
Kullback-Leibler divergence.
"""
import numpy
from scipy import stats
def check_hist_params(samples, hist_min, hist_max, hist_bins):
""" Checks that the bound values given for the histogram are consistent,
returning the range if they are or raising an error if they are not.
Also checks that if hist_bins is a str, it corresponds to a method
available in numpy.histogram
Parameters
----------
samples : numpy.array
Set of samples to get the min/max if only one of the bounds is given.
hist_min : numpy.float64
Minimum value for the histogram.
hist_max : numpy.float64
Maximum value for the histogram.
hist_bins: int or str
If int, number of equal-width bins to use in numpy.histogram. If str,
it should be one of the methods to calculate the optimal bin width
available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone',
'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis
Estimator). This option will be ignored if `kde=True`.
Returns
-------
hist_range : tuple or None
The bounds (hist_min, hist_max) or None.
hist_bins : int or str
Number of bins or method for optimal width bin calculation.
"""
hist_methods = ['auto', 'fd', 'doane', 'scott', 'stone', 'rice',
'sturges', 'sqrt']
if not hist_bins:
hist_bins = 'fd'
elif isinstance(hist_bins, str) and hist_bins not in hist_methods:
raise ValueError('Method for calculating bins width must be one of'
' {}'.format(hist_methods))
# No bounds given, return None
if not hist_min and not hist_max:
return None, hist_bins
# One of the bounds is missing
if hist_min and not hist_max:
hist_max = samples.max()
elif hist_max and not hist_min:
hist_min = samples.min()
# Both bounds given
elif hist_min and hist_max and hist_min >= hist_max:
raise ValueError('hist_min must be lower than hist_max.')
hist_range = (hist_min, hist_max)
return hist_range, hist_bins
def compute_pdf(samples, method, bins, hist_min, hist_max):
""" Computes the probability density function for a set of samples.
Parameters
----------
samples : numpy.array
Set of samples to calculate the pdf.
method : str
Method to calculate the pdf. Options are 'kde' for the Kernel Density
Estimator, and 'hist' to use numpy.histogram
bins : str or int, optional
This option will be ignored if method is `kde`.
If int, number of equal-width bins to use when calculating probability
density function from a set of samples of the distribution. If str, it
should be one of the methods to calculate the optimal bin width
available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone',
'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis
Estimator).
hist_min : numpy.float64, optional
Minimum of the distributions' values to use. This will be ignored if
`kde=True`.
hist_max : numpy.float64, optional
Maximum of the distributions' values to use. This will be ignored if
`kde=True`.
Returns
-------
pdf : numpy.array
Discrete probability distribution calculated from samples.
"""
if method == 'kde':
samples_kde = stats.gaussian_kde(samples)
npts = 10000 if len(samples) <= 10000 else len(samples)
draw = samples_kde.resample(npts)
pdf = samples_kde.evaluate(draw)
elif method == 'hist':
hist_range, hist_bins = check_hist_params(samples, hist_min,
hist_max, bins)
pdf, _ = numpy.histogram(samples, bins=hist_bins,
range=hist_range, density=True)
else:
raise ValueError('Method not recognized.')
return pdf
def entropy(pdf1, base=numpy.e):
""" Computes the information entropy for a single parameter
from one probability density function.
Parameters
----------
pdf1 : numpy.array
Probability density function.
base : {numpy.e, numpy.float64}, optional
The logarithmic base to use (choose base 2 for information measured
in bits, default is nats).
Returns
-------
numpy.float64
The information entropy value.
"""
return stats.entropy(pdf1, base=base)
def kl(samples1, samples2, pdf1=False, pdf2=False, kde=False,
bins=None, hist_min=None, hist_max=None, base=numpy.e):
""" Computes the Kullback-Leibler divergence for a single parameter
from two distributions.
Parameters
----------
samples1 : numpy.array
Samples or probability density function (for the latter must also set
`pdf1=True`).
samples2 : numpy.array
Samples or probability density function (for the latter must also set
`pdf2=True`).
pdf1 : bool
Set to `True` if `samples1` is a probability density funtion already.
pdf2 : bool
Set to `True` if `samples2` is a probability density funtion already.
kde : bool
Set to `True` if at least one of `pdf1` or `pdf2` is `False` to
estimate the probability density function using kernel density
estimation (KDE).
bins : int or str, optional
If int, number of equal-width bins to use when calculating probability
density function from a set of samples of the distribution. If str, it
should be one of the methods to calculate the optimal bin width
available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone',
'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis
Estimator). This option will be ignored if `kde=True`.
hist_min : numpy.float64
Minimum of the distributions' values to use. This will be ignored if
`kde=True`.
hist_max : numpy.float64
Maximum of the distributions' values to use. This will be ignored if
`kde=True`.
base : numpy.float64
The logarithmic base to use (choose base 2 for information measured
in bits, default is nats).
Returns
-------
numpy.float64
The Kullback-Leibler divergence value.
"""
if pdf1 and pdf2 and kde:
raise ValueError('KDE can only be used when at least one of pdf1 or '
'pdf2 is False.')
sample_groups = {'P': (samples1, pdf1), 'Q': (samples2, pdf2)}
pdfs = {}
for n in sample_groups:
samples, pdf = sample_groups[n]
if pdf:
pdfs[n] = samples
else:
method = 'kde' if kde else 'hist'
pdfs[n] = compute_pdf(samples, method, bins, hist_min, hist_max)
return stats.entropy(pdfs['P'], qk=pdfs['Q'], base=base)
def js(samples1, samples2, kde=False, bins=None, hist_min=None, hist_max=None,
base=numpy.e):
""" Computes the Jensen-Shannon divergence for a single parameter
from two distributions.
Parameters
----------
samples1 : numpy.array
Samples.
samples2 : numpy.array
Samples.
kde : bool
Set to `True` to estimate the probability density function using
kernel density estimation (KDE).
bins : int or str, optional
If int, number of equal-width bins to use when calculating probability
density function from a set of samples of the distribution. If str, it
should be one of the methods to calculate the optimal bin width
available in numpy.histogram: ['auto', 'fd', 'doane', 'scott', 'stone',
'rice', 'sturges', 'sqrt']. Default is 'fd' (Freedman Diaconis
Estimator). This option will be ignored if `kde=True`.
hist_min : numpy.float64
Minimum of the distributions' values to use. This will be ignored if
`kde=True`.
hist_max : numpy.float64
Maximum of the distributions' values to use. This will be ignored if
`kde=True`.
base : numpy.float64
The logarithmic base to use (choose base 2 for information measured
in bits, default is nats).
Returns
-------
numpy.float64
The Jensen-Shannon divergence value.
"""
sample_groups = {'P': samples1, 'Q': samples2}
pdfs = {}
for n in sample_groups:
samples = sample_groups[n]
method = 'kde' if kde else 'hist'
pdfs[n] = compute_pdf(samples, method, bins, hist_min, hist_max)
pdfs['M'] = (1./2) * (pdfs['P'] + pdfs['Q'])
js_div = 0
for pdf in (pdfs['P'], pdfs['Q']):
js_div += (1./2) * kl(pdf, pdfs['M'], pdf1=True, pdf2=True, base=base)
return js_div
| 8,796
| 35.201646
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/evidence.py
|
# Copyright (C) 2019 Steven Reyes
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides functions for estimating the marginal
likelihood or evidence of a model.
"""
import numpy
from scipy import integrate
def arithmetic_mean_estimator(log_likelihood):
"""Returns the log evidence via the prior arithmetic mean estimator (AME).
The logarithm form of AME is used. This is the most basic
evidence estimator, and often requires O(billions) of samples
from the prior.
Parameters
----------
log_likelihood : 1d array of floats
The log likelihood of the data sampled from the prior
distribution.
Returns
-------
float :
Estimation of the log of the evidence.
"""
num_samples = len(log_likelihood)
logl_max = numpy.max(log_likelihood)
log_evidence = 0.
for i, _ in enumerate(log_likelihood):
log_evidence += numpy.exp(log_likelihood[i] - logl_max)
log_evidence = numpy.log(log_evidence)
log_evidence += logl_max - numpy.log(num_samples)
return log_evidence
def harmonic_mean_estimator(log_likelihood):
"""Returns the log evidence via posterior harmonic mean estimator (HME).
The logarithm form of HME is used. This method is not
recommended for general use. It is very slow to converge,
formally, has infinite variance, and very error prone.
Not recommended for general use.
Parameters
----------
log_likelihood : 1d array of floats
The log likelihood of the data sampled from the posterior
distribution.
Returns
-------
float :
Estimation of the log of the evidence.
"""
num_samples = len(log_likelihood)
logl_max = numpy.max(-1.0*log_likelihood)
log_evidence = 0.
for i, _ in enumerate(log_likelihood):
log_evidence += numpy.exp(-1.0*log_likelihood[i] + logl_max)
log_evidence = -1.0*numpy.log(log_evidence)
log_evidence += logl_max
log_evidence += numpy.log(num_samples)
return log_evidence
def thermodynamic_integration(log_likelihood, betas,
method="simpsons"):
"""Returns the log evidence of the model via thermodynamic integration.
Also returns an estimated standard deviation for the log evidence.
Current options are integration through the trapezoid rule, a
first-order corrected trapezoid rule, and Simpson's rule.
Parameters
----------
log_likelihood : 3d array of shape (betas, walker, iteration)
The log likelihood for each temperature separated by
temperature, walker, and iteration.
betas : 1d array
The inverse temperatures used in the MCMC.
method : {"trapzoid", "trapezoid_corrected", "simpsons"},
optional.
The numerical integration method to use for the
thermodynamic integration. Choices include: "trapezoid",
"trapezoid_corrected", "simpsons", for the trapezoid rule,
the first-order correction to the trapezoid rule, and
Simpson's rule. [Default = "simpsons"]
Returns
-------
log_evidence : float
Estimation of the log of the evidence.
mcmc_std : float
The standard deviation of the log evidence estimate from
Monte-Carlo spread.
"""
# Check if the method of integration is in the list of choices
method_list = ["trapezoid", "trapezoid_corrected", "simpsons"]
if method not in method_list:
raise ValueError("Method %s not supported. Expected %s"
% (method, method_list))
# Read in the data and ensure ordering of data.
# Ascending order sort
order = numpy.argsort(betas)
betas = betas[order]
log_likelihood = log_likelihood[order]
# Assume log likelihood is given in shape of beta, walker,
# and iteration.
log_likelihood = numpy.reshape(log_likelihood,
(len(betas),
len(log_likelihood[0].flatten())))
average_logl = numpy.average(log_likelihood, axis=1)
if method in ("trapezoid", "trapezoid_corrected"):
log_evidence = numpy.trapz(average_logl, betas)
if method == "trapezoid_corrected":
# var_correction holds the derivative correction terms
# See Friel et al. 2014 for expression and derivation.
# https://link.springer.com/article/10.1007/s11222-013-9397-1
var_correction = 0
for i in range(len(betas) - 1):
delta_beta = betas[i+1] - betas[i]
pre_fac_var = (1. / 12.) * (delta_beta ** 2.0)
var_diff = numpy.var(log_likelihood[i+1])
var_diff -= numpy.var(log_likelihood[i])
var_correction -= pre_fac_var * var_diff
# Add the derivative correction term back to the log_evidence
# from the first if statement.
log_evidence += var_correction
elif method == "simpsons":
# beta -> 0 tends to contribute the least to the integral
# so we can sacrifice precision there, rather than near
# beta -> 1. Option even="last" puts trapezoid rule at
# first few points.
log_evidence = integrate.simps(average_logl, betas,
even="last")
# Estimate the Monte Carlo variance of the evidence calculation
# See (Evans, Annis, 2019.)
# https://www.sciencedirect.com/science/article/pii/S0022249617302651
ti_vec = numpy.zeros(len(log_likelihood[0]))
# Get log likelihood chains by sample and not by temperature.
logl_per_samp = []
for i, _ in enumerate(log_likelihood[0]):
logl_per_samp.append([log_likelihood[x][i] for x in range(len(betas))])
if method in ("trapezoid", "trapezoid_corrected"):
for i, _ in enumerate(log_likelihood[0]):
ti_vec[i] = numpy.trapz(logl_per_samp[i], betas)
elif method == "simpsons":
for i, _ in enumerate(log_likelihood[0]):
ti_vec[i] = integrate.simps(logl_per_samp[i], betas,
even="last")
# Standard error is sample std / sqrt(number of samples)
mcmc_std = numpy.std(ti_vec) / numpy.sqrt(float(len(log_likelihood[0])))
return log_evidence, mcmc_std
def stepping_stone_algorithm(log_likelihood, betas):
"""Returns the log evidence of the model via stepping stone algorithm.
Also returns an estimated standard deviation for the log evidence.
Parameters
----------
log_likelihood : 3d array of shape (betas, walker, iteration)
The log likelihood for each temperature separated by
temperature, walker, and iteration.
betas : 1d array
The inverse temperatures used in the MCMC.
Returns
-------
log_evidence : float
Estimation of the log of the evidence.
mcmc_std : float
The standard deviation of the log evidence estimate from
Monte-Carlo spread.
"""
# Reverse order sort
order = numpy.argsort(betas)[::-1]
betas = betas[order]
log_likelihood = log_likelihood[order]
# Assume log likelihood is given in shape of beta,
# walker, iteration.
log_likelihood = numpy.reshape(log_likelihood,
(len(betas),
len(log_likelihood[0].flatten())))
log_rk_pb = numpy.zeros(len(betas) - 1)
for i in range(len(betas) - 1):
delta_beta = betas[i] - betas[i+1]
# Max log likelihood for beta [i+1]
max_logl_pb = numpy.max(log_likelihood[i+1])
val_1 = delta_beta * max_logl_pb
val_2 = delta_beta * (log_likelihood[i+1] - max_logl_pb)
val_2 = numpy.log(numpy.average(numpy.exp(val_2)))
log_rk_pb[i] = val_1 + val_2
log_rk = numpy.sum(log_rk_pb)
log_evidence = log_rk
# Calculate the Monte Carlo variation
mcmc_std = 0
for i in range(len(betas) - 1):
delta_beta = betas[i] - betas[i+1]
pre_fact = (delta_beta * log_likelihood[i+1]) - log_rk_pb[i]
pre_fact = numpy.exp(pre_fact) - 1.0
val = numpy.sum(pre_fact ** 2)
mcmc_std += val
mcmc_std /= float(len(log_likelihood[0])) ** 2.0
mcmc_std = numpy.sqrt(mcmc_std)
return log_evidence, mcmc_std
| 8,936
| 34.324111
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/base_cube.py
|
# Copyright (C) 2020 Sumit Kumar, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Common utilities for samplers that rely on transforming between a unit cube
and the prior space. This is typical of many nested sampling algorithms.
"""
import numpy
from .. import models
def call_global_loglikelihood(cube):
return models._global_instance.log_likelihood(cube)
def call_global_logprior(cube):
return models._global_instance.prior_transform(cube)
def setup_calls(model, loglikelihood_function=None, copy_prior=False):
""" Configure calls for MPI support
"""
model_call = CubeModel(model, loglikelihood_function,
copy_prior=copy_prior)
# these are used to help paralleize over multiple cores / MPI
models._global_instance = model_call
log_likelihood_call = call_global_loglikelihood
prior_call = call_global_logprior
return log_likelihood_call, prior_call
class CubeModel(object):
""" Class for making PyCBC Inference 'model class'
Parameters
----------
model : inference.BaseModel instance
A model instance from pycbc.
"""
def __init__(self, model, loglikelihood_function=None, copy_prior=False):
if model.sampling_transforms is not None:
raise ValueError("Ultranest or dynesty do not support sampling transforms")
self.model = model
if loglikelihood_function is None:
loglikelihood_function = 'loglikelihood'
self.loglikelihood_function = loglikelihood_function
self.copy_prior = copy_prior
def log_likelihood(self, cube):
"""
returns log likelihood function
"""
params = dict(zip(self.model.sampling_params, cube))
self.model.update(**params)
if self.model.logprior == -numpy.inf:
return -numpy.inf
return getattr(self.model, self.loglikelihood_function)
def prior_transform(self, cube):
"""
prior transform function for ultranest sampler
It takes unit cube as input parameter and apply
prior transforms
"""
if self.copy_prior:
cube = cube.copy()
# we preserve the type of cube to whatever we were given
dict_cube = dict(zip(self.model.variable_params, cube))
inv = self.model.prior_distribution.cdfinv(**dict_cube)
for i, param in enumerate(self.model.variable_params):
cube[i] = inv[param]
return cube
| 3,361
| 33.659794
| 87
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/base.py
|
# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
Defines the base sampler class to be inherited by all samplers.
"""
from abc import ABCMeta, abstractmethod, abstractproperty
import shutil
import logging
from six import add_metaclass
from pycbc import distributions
from pycbc.inference.io import validate_checkpoint_files
#
# =============================================================================
#
# Base Sampler definition
#
# =============================================================================
#
@add_metaclass(ABCMeta)
class BaseSampler(object):
"""Abstract base class for all inference samplers.
All sampler classes must inherit from this class and implement its abstract
methods.
Parameters
----------
model : Model
An instance of a model from ``pycbc.inference.models``.
"""
name = None
def __init__(self, model):
self.model = model
self.checkpoint_file = None
self.backup_file = None
self.checkpoint_valid = None
self.new_checkpoint = None
# @classmethod <--uncomment when we move to python 3.3
@abstractmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""This should initialize the sampler given a config file.
"""
pass
@property
def variable_params(self):
"""Returns the parameters varied in the model.
"""
return self.model.variable_params
@property
def sampling_params(self):
"""Returns the sampling params used by the model.
"""
return self.model.sampling_params
@property
def static_params(self):
"""Returns the model's fixed parameters.
"""
return self.model.static_params
@abstractproperty
def samples(self):
"""A dict mapping variable_params to arrays of samples currently
in memory. The dictionary may also contain sampling_params.
The sample arrays may have any shape, and may or may not be thinned.
"""
pass
@abstractproperty
def model_stats(self):
"""A dict mapping model's metadata fields to arrays of values for
each sample in ``raw_samples``.
The arrays may have any shape, and may or may not be thinned.
"""
pass
@abstractmethod
def run(self):
"""This function should run the sampler.
Any checkpointing should be done internally in this function.
"""
pass
@abstractproperty
def io(self):
"""A class that inherits from ``BaseInferenceFile`` to handle IO with
an hdf file.
This should be a class, not an instance of class, so that the sampler
can initialize it when needed.
"""
pass
@abstractmethod
def checkpoint(self):
"""The sampler must have a checkpoint method for dumping raw samples
and stats to the file type defined by ``io``.
"""
pass
@abstractmethod
def finalize(self):
"""Do any finalization to the samples file before exiting."""
pass
@abstractmethod
def resume_from_checkpoint(self):
"""Resume the sampler from the output file.
"""
pass
#
# =============================================================================
#
# Convenience functions
#
# =============================================================================
#
def setup_output(sampler, output_file, check_nsamples=True, validate=True):
r"""Sets up the sampler's checkpoint and output files.
The checkpoint file has the same name as the output file, but with
``.checkpoint`` appended to the name. A backup file will also be
created.
Parameters
----------
sampler : sampler instance
Sampler
output_file : str
Name of the output file.
"""
# check for backup file(s)
checkpoint_file = output_file + '.checkpoint'
backup_file = output_file + '.bkup'
# check if we have a good checkpoint and/or backup file
logging.info("Looking for checkpoint file")
checkpoint_valid = False
if validate:
checkpoint_valid = validate_checkpoint_files(checkpoint_file,
backup_file,
check_nsamples)
# Create a new file if the checkpoint doesn't exist, or if it is
# corrupted
sampler.new_checkpoint = False # keeps track if this is a new file or not
if not checkpoint_valid:
logging.info("Checkpoint not found or not valid")
create_new_output_file(sampler, checkpoint_file)
# now the checkpoint is valid
sampler.new_checkpoint = True
# copy to backup
shutil.copy(checkpoint_file, backup_file)
# write the command line, startup
for fn in [checkpoint_file, backup_file]:
with sampler.io(fn, "a") as fp:
fp.write_command_line()
fp.write_resume_point()
fp.write_run_start_time()
# store
sampler.checkpoint_file = checkpoint_file
sampler.backup_file = backup_file
def create_new_output_file(sampler, filename, **kwargs):
r"""Creates a new output file.
Parameters
----------
sampler : sampler instance
Sampler
filename : str
Name of the file to create.
\**kwargs :
All other keyword arguments are passed through to the file's
``write_metadata`` function.
"""
logging.info("Creating file {}".format(filename))
with sampler.io(filename, "w") as fp:
# create the samples group and sampler info group
fp.create_group(fp.samples_group)
fp.create_group(fp.sampler_group)
# save the sampler's metadata
fp.write_sampler_metadata(sampler)
def initial_dist_from_config(cp, variable_params, static_params=None):
r"""Loads a distribution for the sampler start from the given config file.
A distribution will only be loaded if the config file has a [initial-\*]
section(s).
Parameters
----------
cp : Config parser
The config parser to try to load from.
variable_params : list of str
The variable parameters for the distribution.
static_params : dict, optional
The static parameters used to place constraints on the
distribution.
Returns
-------
JointDistribution or None :
The initial distribution. If no [initial-\*] section found in the
config file, will just return None.
"""
if len(cp.get_subsections("initial")):
logging.info("Using a different distribution for the starting points "
"than the prior.")
initial_dists = distributions.read_distributions_from_config(
cp, section="initial")
constraints = distributions.read_constraints_from_config(
cp, constraint_section="initial_constraint",
static_args=static_params)
init_dist = distributions.JointDistribution(
variable_params, *initial_dists,
**{"constraints": constraints})
else:
init_dist = None
return init_dist
| 8,201
| 30.425287
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/epsie.py
|
# Copyright (C) 2019 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides classes for interacting with epsie samplers.
"""
import numpy
import epsie
from epsie.samplers import ParallelTemperedSampler
# we'll use emcee_pt's default beta ladder for temperature levels
from emcee.ptsampler import default_beta_ladder
from pycbc.pool import choose_pool
from .base import (BaseSampler, setup_output)
from .base_mcmc import (BaseMCMC, get_optional_arg_from_config,
nsamples_in_chain)
from .base_multitemper import (MultiTemperedSupport, compute_acf, compute_acl,
acl_from_raw_acls)
from ..burn_in import MultiTemperedMCMCBurnInTests
from ..jump import epsie_proposals_from_config
from ..io import EpsieFile
from .. import models
class EpsieSampler(MultiTemperedSupport, BaseMCMC, BaseSampler):
"""Constructs an MCMC sampler using epsie's parallel-tempered sampler.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nchains : int
Number of chains to use in the sampler.
ntemps : int, optional
Number of temperatures to use in the sampler. A geometrically-spaced
temperature ladder with the gievn number of levels will be constructed
based on the number of parameters. If not provided, must provide
``betas``.
betas : array, optional
An array of inverse temperature values to be used in for the
temperature ladder. If not provided, must provide ``ntemps``.
proposals : list, optional
List of proposals to use. Any parameters that do not have a proposal
provided will use the ``default_propsal``. **Note:** proposals should
be specified for the sampling parameters, not the
variable parameters.
default_proposal : an epsie.Proposal class, optional
The default proposal to use for parameters not in ``proposals``.
Default is :py:class:`epsie.proposals.Normal`.
default_proposal_args : dict, optional
Dictionary of arguments to pass to the default proposal.
swap_interval : int, optional
The number of iterations between temperature swaps. Default is 1.
seed : int, optional
Seed for epsie's random number generator. If None provided, will create
one.
checkpoint_interval : int, optional
Specify the number of iterations to do between checkpoints. If not
provided, no checkpointin will be done.
checkpoint_signal : str, optional
Set the signal to use when checkpointing. For example, 'USR2'.
loglikelihood_function : str, optional
Set the function to call from the model for the ``loglikelihood``.
Default is ``loglikelihood``.
nprocesses : int, optional
The number of parallel processes to use. Default is 1
(no paralleliztion).
use_mpi : bool, optional
Use MPI for parallelization. Default (False) will use python's
multiprocessing.
"""
name = "epsie"
_io = EpsieFile
burn_in_class = MultiTemperedMCMCBurnInTests
def __init__(self, model, nchains, ntemps=None, betas=None,
proposals=None, default_proposal=None,
default_proposal_args=None, seed=None,
swap_interval=1,
checkpoint_interval=None, checkpoint_signal=None,
loglikelihood_function=None,
nprocesses=1, use_mpi=False):
# create the betas if not provided
if betas is None:
betas = default_beta_ladder(len(model.variable_params),
ntemps=ntemps)
self.model = model
# create a wrapper for calling the model
model_call = _EpsieCallModel(model, loglikelihood_function)
# these are used to help paralleize over multiple cores / MPI
models._global_instance = model_call
model_call = models._call_global_model
# Set up the pool
pool = choose_pool(mpi=use_mpi, processes=nprocesses)
# initialize the sampler
self._sampler = ParallelTemperedSampler(
model.sampling_params, model_call, nchains, betas=betas,
swap_interval=swap_interval,
proposals=proposals, default_proposal=default_proposal,
default_proposal_args=default_proposal_args,
seed=seed, pool=pool)
# set other parameters
self.nchains = nchains
self._ntemps = ntemps
self._checkpoint_interval = checkpoint_interval
self._checkpoint_signal = checkpoint_signal
@property
def io(self):
return self._io
@property
def base_shape(self):
return (self.ntemps, self.nchains,)
@property
def betas(self):
"""The inverse temperatures being used."""
return self._sampler.betas
@property
def seed(self):
"""The seed used for epsie's random bit generator.
This is not the same as the seed used for the prior distributions.
"""
return self._sampler.seed
@property
def swap_interval(self):
"""Number of iterations between temperature swaps."""
return self._sampler.swap_interval
@staticmethod
def compute_acf(filename, **kwargs):
r"""Computes the autocorrelation function.
Calls :py:func:`base_multitemper.compute_acf`; see that
function for details.
Parameters
----------
filename : str
Name of a samples file to compute ACFs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.compute_acf`.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. The arrays
will have shape ``ntemps x nchains x niterations``.
"""
return compute_acf(filename, **kwargs)
@staticmethod
def compute_acl(filename, **kwargs):
r"""Computes the autocorrelation length.
Calls :py:func:`base_multitemper.compute_acl`; see that
function for details.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.compute_acl`.
Returns
-------
dict
A dictionary of ntemps-long arrays of the ACLs of each parameter.
"""
return compute_acl(filename, **kwargs)
@property
def acl(self): # pylint: disable=invalid-overridden-method
"""The autocorrelation lengths of the chains.
"""
return acl_from_raw_acls(self.raw_acls)
@property
def effective_nsamples(self): # pylint: disable=invalid-overridden-method
"""The effective number of samples post burn-in that the sampler has
acquired so far.
"""
act = self.act
if act is None:
act = numpy.inf
if self.burn_in is None:
start_iter = 0
else:
start_iter = self.burn_in.burn_in_iteration
nperchain = nsamples_in_chain(start_iter, act, self.niterations)
if self.burn_in is not None:
# ensure that any chain not burned in has zero samples
nperchain[~self.burn_in.is_burned_in] = 0
# and that any chain that is burned in has at least one sample
nperchain[self.burn_in.is_burned_in & (nperchain < 1)] = 1
return int(nperchain.sum())
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
The arrays have shape ``ntemps x nchains x niterations``.
The dictionary also contains sampling parameters.
"""
samples = epsie.array2dict(self._sampler.positions)
# apply boundary conditions
samples = self.model.prior_distribution.apply_boundary_conditions(
**samples)
# apply transforms to go to model's variable params space
if self.model.sampling_transforms is not None:
samples = self.model.sampling_transforms.apply(
samples, inverse=True)
return samples
@property
def model_stats(self):
"""A dict mapping the model's ``default_stats`` to arrays of values.
The arrays have shape ``ntemps x nchains x niterations``.
"""
return epsie.array2dict(self._sampler.blobs)
def clear_samples(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# now clear the sampler
self._sampler.clear()
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as fp:
# get the numpy state
numpy_rstate_group = '/'.join([fp.sampler_group,
'numpy_random_state'])
rstate = fp.read_random_state(group=numpy_rstate_group)
# set the sampler state for epsie
self._sampler.set_state_from_checkpoint(fp, path=fp.sampler_group)
# set the global numpy random state for pycbc
numpy.random.set_state(rstate)
def set_p0(self, samples_file=None, prior=None):
p0 = super(EpsieSampler, self).set_p0(samples_file=samples_file,
prior=prior)
self._sampler.start_position = p0
@property
def pos(self):
"""A dictionary of the current chain positions."""
# we override BaseMCMC's pos property because this can be directly
# retrieved from epsie
return self._sampler.current_positions
def run_mcmc(self, niterations):
"""Advance the chains for a number of iterations.
Parameters
----------
niterations : int
Number of samples to get from sampler.
"""
self._sampler.run(niterations)
def write_results(self, filename):
"""Writes samples, model stats, acceptance ratios, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples,
parameters=self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats, last_iteration=self.niterations)
# write accpetance ratio
acceptance = self._sampler.acceptance
fp.write_acceptance_ratio(acceptance['acceptance_ratio'],
last_iteration=self.niterations)
# write temperature data
if self.ntemps > 1:
temp_ar = self._sampler.temperature_acceptance
temp_swaps = self._sampler.temperature_swaps
fp.write_temperature_data(temp_swaps, temp_ar,
self.swap_interval,
last_iteration=self.niterations)
# write numpy's global state (for the distributions)
numpy_rstate_group = '/'.join([fp.sampler_group,
'numpy_random_state'])
fp.write_random_state(group=numpy_rstate_group)
# write the sampler's state
self._sampler.checkpoint(fp, path=fp.sampler_group)
def finalize(self):
pass
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file.
The following options are retrieved in the ``[sampler]`` section:
* ``name`` :
(required) must match the samlper's name
* ``nchains`` :
(required) the number of chains to use
* ``ntemps`` :
The number of temperatures to use. Either this, or
``inverse-temperatures-file`` must be provided (but not both).
* ``inverse-temperatures-file`` :
Path to an hdf file containing the inverse temperatures ("betas")
to use. The betas will be retrieved from the file's
``.attrs['betas']``. Either this or ``ntemps`` must be provided
(but not both).
* ``niterations`` :
The number of iterations to run the sampler for. Either this or
``effective-nsamples`` must be provided (but not both).
* ``effective-nsamples`` :
Run the sampler until the given number of effective samples are
obtained. A ``checkpoint-interval`` must also be provided in this
case. Either this or ``niterations`` must be provided (but not
both).
* ``thin-interval`` :
Thin the samples by the given value before saving to disk. May
provide this, or ``max-samples-per-chain``, but not both. If
neither options are provided, will save all samples.
* ``max-samples-per-chain`` :
Thin the samples such that the number of samples per chain per
temperature that are saved to disk never exceeds the given value.
May provide this, or ``thin-interval``, but not both. If neither
options are provided, will save all samples.
* ``checkpoint-interval`` :
Sets the checkpoint interval to use. Must be provided if using
``effective-nsamples``.
* ``checkpoint-signal`` :
Set the checkpoint signal, e.g., "USR2". Optional.
* ``seed`` :
The seed to use for epsie's random number generator. If not
provided, epsie will create one.
* ``logl-function`` :
The attribute of the model to use for the loglikelihood. If
not provided, will default to ``loglikelihood``.
* ``swap-interval`` :
The number of iterations between temperature swaps. Default is 1.
Jump proposals must be provided for every sampling
parameter. These are retrieved from subsections
``[jump_proposal-{params}]``, where params is a
:py:const:`pycbc.VARARGS_DELIM` separated list of parameters the
proposal should be used for. See
:py:func:`inference.jump.epsie_proposals_from_config` for
details.
.. note::
Jump proposals should be specified for **sampling parameters**,
not **variable parameters**.
Settings for burn-in tests are read from ``[sampler-burn_in]``. In
particular, the ``burn-in-test`` option is used to set the burn in
tests to perform. See
:py:func:`MultiTemperedMCMCBurnInTests.from_config` for details. If no
``burn-in-test`` is provided, no burn in tests will be carried out.
Parameters
----------
cp : WorkflowConfigParser instance
Config file object to parse.
model : pycbc.inference.model.BaseModel instance
The model to use.
output_file : str, optional
The name of the output file to checkpoint and write results to.
nprocesses : int, optional
The number of parallel processes to use. Default is 1.
use_mpi : bool, optional
Use MPI for parallelization. Default is False.
Returns
-------
EpsiePTSampler :
The sampler instance.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
nchains = int(cp.get(section, "nchains"))
seed = get_optional_arg_from_config(cp, section, 'seed', dtype=int)
ntemps, betas = cls.betas_from_config(cp, section)
# get the swap interval
swap_interval = get_optional_arg_from_config(cp, section,
'swap-interval',
dtype=int)
if swap_interval is None:
swap_interval = 1
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
# get the loglikelihood function
logl = get_optional_arg_from_config(cp, section, 'logl-function')
# get the proposals
proposals = epsie_proposals_from_config(cp)
# check that all of the sampling parameters have a specified
# proposal
sampling_params = set(model.sampling_params)
proposal_params = set(param for prop in proposals
for param in prop.parameters)
missing = sampling_params - proposal_params
if missing:
raise ValueError("Missing jump proposals for sampling parameters "
"{}".format(', '.join(missing)))
# initialize
obj = cls(model, nchains,
ntemps=ntemps, betas=betas, proposals=proposals,
swap_interval=swap_interval, seed=seed,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
loglikelihood_function=logl,
nprocesses=nprocesses, use_mpi=use_mpi)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
# Set up the output file
setup_output(obj, output_file)
if obj.new_checkpoint:
obj.set_start_from_config(cp)
else:
obj.resume_from_checkpoint()
return obj
class _EpsieCallModel(object):
"""Model wrapper for epsie.
Allows model to be called like a function. Returns the loglikelihood
function, logprior, and the model's default stats.
"""
def __init__(self, model, loglikelihood_function=None):
self.model = model
if loglikelihood_function is None:
loglikelihood_function = 'loglikelihood'
self.loglikelihood_function = loglikelihood_function
def __call__(self, **kwargs):
"""Calls update, then calls the loglikelihood and logprior."""
self.model.update(**kwargs)
logp = self.model.logprior
if logp == -numpy.inf:
# don't try to call the log likelihood if the prior rules it out
logl = numpy.nan
else:
logl = getattr(self.model, self.loglikelihood_function)
return logl, logp, self.model.current_stats
| 19,831
| 38.983871
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/cpnest.py
|
# Copyright (C) 2019 Collin Capano, Sumit Kumar
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for using the cpnest sampler
packages for parameter estimation.
"""
import logging
import os
import array
import cpnest
import cpnest.model as cpm
from pycbc.inference.io import (CPNestFile, validate_checkpoint_files)
from .base import (BaseSampler, setup_output)
from .base_mcmc import get_optional_arg_from_config
#
# =============================================================================
#
# Samplers
#
# =============================================================================
#
class CPNestSampler(BaseSampler):
"""This class is used to construct an CPNest sampler from the cpnest
package by John Veitch.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nlive : int
Number of live points to use in sampler.
pool : function with map, Optional
A provider of a map function that allows a function call to be run
over multiple sets of arguments and possibly maps them to
cores/nodes/etc.
"""
name = "cpnest"
_io = CPNestFile
def __init__(self, model, nlive, maxmcmc=1000, nthreads=1, verbose=1,
loglikelihood_function=None):
self.model = model
self.nlive = nlive
self.maxmcmc = maxmcmc
self.nthreads = nthreads
self.verbose = verbose
# create a wrapper for calling the model
self.model_call = CPNestModel(model, loglikelihood_function)
self._sampler = None
self._nested_samples = None
self._posterior_samples = None
self._logz = None
self._dlogz = None
self.checkpoint_file = None
def run(self):
out_dir = os.path.dirname(os.path.abspath(self.checkpoint_file))
if self._sampler is None:
self._sampler = cpnest.CPNest(self.model_call, verbose=1,
output=out_dir,
nthreads=self.nthreads,
nlive=self.nlive,
maxmcmc=self.maxmcmc, resume=True)
res = self._sampler.run()
@property
def io(self):
return self._io
@property
def niterations(self):
return len(tuple(self.samples.values())[0])
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""
Loads the sampler from the given config file.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of live points to use
nlive = int(cp.get(section, "nlive"))
maxmcmc = int(cp.get(section, "maxmcmc"))
nthreads = int(cp.get(section, "nthreads"))
verbose = int(cp.get(section, "verbose"))
loglikelihood_function = \
get_optional_arg_from_config(cp, section, 'loglikelihood-function')
obj = cls(model, nlive=nlive, maxmcmc=maxmcmc, nthreads=nthreads,
verbose=verbose,
loglikelihood_function=loglikelihood_function)
setup_output(obj, output_file, check_nsamples=False)
if not obj.new_checkpoint:
obj.resume_from_checkpoint()
return obj
def checkpoint(self):
pass
def finalize(self):
logz = self._sampler.NS.logZ
dlogz = 0.1 #######FIXME!!!!!###############
logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz))
for fn in [self.checkpoint_file]:
with self.io(fn, "a") as fp:
fp.write_logevidence(logz, dlogz)
logging.info("Writing samples to files")
for fn in [self.checkpoint_file, self.backup_file]:
self.write_results(fn)
logging.info("Validating checkpoint and backup files")
checkpoint_valid = validate_checkpoint_files(
self.checkpoint_file, self.backup_file, check_nsamples=False)
if not checkpoint_valid:
raise IOError("error writing to checkpoint file")
@property
def model_stats(self):
logl = self._sampler.posterior_samples['logL']
logp = self._sampler.posterior_samples['logPrior']
return {'loglikelihood': logl, 'logprior': logp}
@property
def samples(self):
samples_dict = {p: self._sampler.posterior_samples[p] for p in
self.posterior_samples.dtype.names}
return samples_dict
def set_initial_conditions(self, initial_distribution=None,
samples_file=None):
"""Sets up the starting point for the sampler.
Should also set the sampler's random state.
"""
pass
def resume_from_checkpoint(self):
pass
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples, self.model.variable_params)
# write stats
fp.write_samples(self.model_stats)
# write log evidence
fp.write_logevidence(self._sampler.NS.logZ, 0.1)
@property
def nested_samples(self):
return self._sampler.nested_samples
@property
def posterior_samples(self):
return self._sampler.posterior_samples
@property
def logz(self):
return self._logz
@property
def dlogz(self):
return self._dlogz
class CPNestModel(cpm.Model):
"""
Class for making PyCBC Inference 'model class'
compatible with CPNest 'model class'
Parameters
----------
model : inference.BaseModel instance
A model instance from pycbc.
"""
def __init__(self, model, loglikelihood_function=None):
if model.sampling_transforms is not None:
raise ValueError("CPNest does not support sampling transforms")
self.model = model
self.names = list(model.sampling_params)
# set up lohlikelihood_function
if loglikelihood_function is None:
loglikelihood_function = 'loglikelihood'
self.loglikelihood_function = loglikelihood_function
bounds = {}
for dist in model.prior_distribution.distributions:
bounds.update(dist.bounds)
self.bounds = [bounds[params] for params in self.names]
def new_point(self):
point = self.model.prior_rvs()
return cpm.LivePoint(list(self.model.sampling_params),
array.array('d', [point[p] for p in self.model.sampling_params]))
def log_prior(self,xx):
self.model.update(**xx)
return self.model.logprior
def log_likelihood(self, xx):
"""
Modify the log likelihood which will be passed to CPNest 'model class'
"""
self.model.update(**xx)
return getattr(self.model, self.loglikelihood_function)
| 8,274
| 33.053498
| 94
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/base_multitemper.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides constructor classes provide support for parallel tempered MCMC
samplers."""
import logging
import numpy
import h5py
from pycbc.filter import autocorrelation
from pycbc.inference.io import loadfile
class MultiTemperedSupport(object):
"""Provides methods for supporting multi-tempered samplers.
"""
_ntemps = None
@property
def ntemps(self):
"""The number of temeratures that are set."""
return self._ntemps
@staticmethod
def betas_from_config(cp, section):
"""Loads number of temperatures or betas from a config file.
This looks in the given section for:
* ``ntemps`` :
The number of temperatures to use. Either this, or
``inverse-temperatures-file`` must be provided (but not both).
* ``inverse-temperatures-file`` :
Path to an hdf file containing the inverse temperatures ("betas")
to use. The betas will be retrieved from the file's
``.attrs['betas']``. Either this or ``ntemps`` must be provided
(but not both).
Parameters
----------
cp : WorkflowConfigParser instance
Config file object to parse.
section : str
The name of the section to look in.
Returns
-------
ntemps : int or None
The number of temperatures to use, if it was provided.
betas : array
The array of betas to use, if a inverse-temperatures-file was
provided.
"""
if cp.has_option(section, "ntemps") and \
cp.has_option(section, "inverse-temperatures-file"):
raise ValueError("Must specify either ntemps or "
"inverse-temperatures-file, not both.")
if cp.has_option(section, "inverse-temperatures-file"):
# get the path of the file containing inverse temperatures values.
inverse_temperatures_file = cp.get(section,
"inverse-temperatures-file")
betas = read_betas_from_hdf(inverse_temperatures_file)
ntemps = betas.shape[0]
else:
# get the number of temperatures
betas = None
ntemps = int(cp.get(section, "ntemps"))
return ntemps, betas
def read_betas_from_hdf(filename):
"""Loads inverse temperatures from the given file.
"""
# get the path of the file containing inverse temperatures values.
with h5py.File(filename, "r") as fp:
try:
betas = numpy.array(fp.attrs['betas'])
# betas must be in decending order
betas = numpy.sort(betas)[::-1]
except KeyError:
raise AttributeError("No attribute called betas")
return betas
#
# =============================================================================
#
# Functions for computing autocorrelation lengths
#
# =============================================================================
#
def compute_acf(filename, start_index=None, end_index=None,
chains=None, parameters=None, temps=None):
"""Computes the autocorrleation function for independent MCMC chains with
parallel tempering.
Parameters
-----------
filename : str
Name of a samples file to compute ACFs for.
start_index : int, optional
The start index to compute the acl from. If None (the default),
will try to use the burn in iteration for each chain;
otherwise, will start at the first sample.
end_index : {None, int}
The end index to compute the acl to. If None, will go to the end
of the current iteration.
chains : optional, int or array
Calculate the ACF for only the given chains. If None (the
default) ACFs for all chains will be estimated.
parameters : optional, str or array
Calculate the ACF for only the given parameters. If None (the
default) will calculate the ACF for all of the model params.
temps : optional, (list of) int or 'all'
The temperature index (or list of indices) to retrieve. If None
(the default), the ACF will only be computed for the coldest (= 0)
temperature chain. To compute an ACF for all temperates pass 'all',
or a list of all of the temperatures.
Returns
-------
dict :
Dictionary parameter name -> ACF arrays. The arrays have shape
``ntemps x nchains x niterations``.
"""
acfs = {}
with loadfile(filename, 'r') as fp:
if parameters is None:
parameters = fp.variable_params
if isinstance(parameters, str):
parameters = [parameters]
temps = _get_temps_idx(fp, temps)
if chains is None:
chains = numpy.arange(fp.nchains)
for param in parameters:
subacfs = []
for tk in temps:
subsubacfs = []
for ci in chains:
samples = fp.read_raw_samples(
param, thin_start=start_index, thin_interval=1,
thin_end=end_index, chains=ci, temps=tk)[param]
thisacf = autocorrelation.calculate_acf(samples).numpy()
subsubacfs.append(thisacf)
# stack the chains
subacfs.append(subsubacfs)
# stack the temperatures
acfs[param] = numpy.stack(subacfs)
return acfs
def compute_acl(filename, start_index=None, end_index=None,
min_nsamples=10):
"""Computes the autocorrleation length for independent MCMC chains with
parallel tempering.
ACLs are calculated separately for each chain.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
start_index : {None, int}
The start index to compute the acl from. If None, will try to use
the number of burn-in iterations in the file; otherwise, will start
at the first sample.
end_index : {None, int}
The end index to compute the acl to. If None, will go to the end
of the current iteration.
min_nsamples : int, optional
Require a minimum number of samples to compute an ACL. If the
number of samples per walker is less than this, will just set to
``inf``. Default is 10.
Returns
-------
dict
A dictionary of ntemps x nchains arrays of the ACLs of each
parameter.
"""
# following is a convenience function to calculate the acl for each chain
# defined here so that we can use map for this below
def _getacl(si):
# si: the samples loaded for a specific chain; may have nans in it
si = si[~numpy.isnan(si)]
if len(si) < min_nsamples:
acl = numpy.inf
else:
acl = autocorrelation.calculate_acl(si)
if acl <= 0:
acl = numpy.inf
return acl
acls = {}
with loadfile(filename, 'r') as fp:
tidx = numpy.arange(fp.ntemps)
for param in fp.variable_params:
these_acls = numpy.zeros((fp.ntemps, fp.nchains))
for tk in tidx:
samples = fp.read_raw_samples(
param, thin_start=start_index, thin_interval=1,
thin_end=end_index, temps=tk, flatten=False)[param]
# flatten out the temperature
samples = samples[0, ...]
# samples now has shape nchains x maxiters
if samples.shape[-1] < min_nsamples:
these_acls[tk, :] = numpy.inf
else:
these_acls[tk, :] = list(map(_getacl, samples))
acls[param] = these_acls
# report the mean ACL: take the max over the temps and parameters
act = acl_from_raw_acls(acls)*fp.thinned_by
finite = act[numpy.isfinite(act)]
logging.info("ACTs: min %s, mean (of finite) %s, max %s",
str(act.min()),
str(finite.mean() if finite.size > 0 else numpy.inf),
str(act.max()))
return acls
def acl_from_raw_acls(acls):
"""Calculates the ACL for one or more chains from a dictionary of ACLs.
This is for parallel tempered MCMCs in which the chains are independent
of each other.
The ACL for each chain is maximized over the temperatures and parameters.
Parameters
----------
acls : dict
Dictionary of parameter names -> ntemps x nchains arrays of ACLs (the
thing returned by :py:func:`compute_acl`).
Returns
-------
array
The ACL of each chain.
"""
return numpy.array(list(acls.values())).max(axis=0).max(axis=0)
def ensemble_compute_acf(filename, start_index=None, end_index=None,
per_walker=False, walkers=None, parameters=None,
temps=None):
"""Computes the autocorrleation function for a parallel tempered, ensemble
MCMC.
By default, parameter values are averaged over all walkers at each
iteration. The ACF is then calculated over the averaged chain for each
temperature. An ACF per-walker will be returned instead if
``per_walker=True``.
Parameters
----------
filename : str
Name of a samples file to compute ACFs for.
start_index : int, optional
The start index to compute the acl from. If None (the default), will
try to use the number of burn-in iterations in the file; otherwise,
will start at the first sample.
end_index : int, optional
The end index to compute the acl to. If None (the default), will go to
the end of the current iteration.
per_walker : bool, optional
Return the ACF for each walker separately. Default is False.
walkers : int or array, optional
Calculate the ACF using only the given walkers. If None (the
default) all walkers will be used.
parameters : str or array, optional
Calculate the ACF for only the given parameters. If None (the
default) will calculate the ACF for all of the model params.
temps : (list of) int or 'all', optional
The temperature index (or list of indices) to retrieve. If None
(the default), the ACF will only be computed for the coldest (= 0)
temperature chain. To compute an ACF for all temperates pass 'all',
or a list of all of the temperatures.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker`` is True, the arrays will have shape
``ntemps x nwalkers x niterations``. Otherwise, the returned array
will have shape ``ntemps x niterations``.
"""
acfs = {}
with loadfile(filename, 'r') as fp:
if parameters is None:
parameters = fp.variable_params
if isinstance(parameters, str):
parameters = [parameters]
temps = _get_temps_idx(fp, temps)
for param in parameters:
subacfs = []
for tk in temps:
if per_walker:
# just call myself with a single walker
if walkers is None:
walkers = numpy.arange(fp.nwalkers)
arrays = [ensemble_compute_acf(filename,
start_index=start_index,
end_index=end_index,
per_walker=False,
walkers=ii,
parameters=param,
temps=tk)[param][0, :]
for ii in walkers]
# we'll stack all of the walker arrays to make a single
# nwalkers x niterations array; when these are stacked
# below, we'll get a ntemps x nwalkers x niterations
# array
subacfs.append(numpy.vstack(arrays))
else:
samples = fp.read_raw_samples(
param, thin_start=start_index,
thin_interval=1, thin_end=end_index,
walkers=walkers, temps=tk, flatten=False)[param]
# contract the walker dimension using the mean, and
# flatten the (length 1) temp dimension
samples = samples.mean(axis=1)[0, :]
thisacf = autocorrelation.calculate_acf(
samples).numpy()
subacfs.append(thisacf)
# stack the temperatures
acfs[param] = numpy.stack(subacfs)
return acfs
def ensemble_compute_acl(filename, start_index=None, end_index=None,
min_nsamples=10):
"""Computes the autocorrleation length for a parallel tempered, ensemble
MCMC.
Parameter values are averaged over all walkers at each iteration and
temperature. The ACL is then calculated over the averaged chain.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
start_index : int, optional
The start index to compute the acl from. If None (the default), will
try to use the number of burn-in iterations in the file; otherwise,
will start at the first sample.
end_index : int, optional
The end index to compute the acl to. If None, will go to the end
of the current iteration.
min_nsamples : int, optional
Require a minimum number of samples to compute an ACL. If the
number of samples per walker is less than this, will just set to
``inf``. Default is 10.
Returns
-------
dict
A dictionary of ntemps-long arrays of the ACLs of each parameter.
"""
acls = {}
with loadfile(filename, 'r') as fp:
if end_index is None:
end_index = fp.niterations
tidx = numpy.arange(fp.ntemps)
for param in fp.variable_params:
these_acls = numpy.zeros(fp.ntemps)
for tk in tidx:
samples = fp.read_raw_samples(
param, thin_start=start_index, thin_interval=1,
thin_end=end_index, temps=tk, flatten=False)[param]
# contract the walker dimension using the mean, and flatten
# the (length 1) temp dimension
samples = samples.mean(axis=1)[0, :]
if samples.size < min_nsamples:
acl = numpy.inf
else:
acl = autocorrelation.calculate_acl(samples)
if acl <= 0:
acl = numpy.inf
these_acls[tk] = acl
acls[param] = these_acls
maxacl = numpy.array(list(acls.values())).max()
logging.info("ACT: %s", str(maxacl*fp.thinned_by))
return acls
def _get_temps_idx(fp, temps):
"""Gets the indices of temperatures to load for computing ACF.
"""
if isinstance(temps, int):
temps = [temps]
elif temps == 'all':
temps = numpy.arange(fp.ntemps)
elif temps is None:
temps = [0]
return temps
| 16,327
| 38.062201
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/dynesty.py
|
# Copyright (C) 2019 Collin Capano, Sumit Kumar, Prayush Kumar
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for using the dynesty sampler
packages for parameter estimation.
"""
import logging
import time
import numpy
import dynesty, dynesty.dynesty, dynesty.nestedsamplers
from pycbc.pool import choose_pool
from dynesty import utils as dyfunc
from pycbc.inference.io import (DynestyFile, validate_checkpoint_files,
loadfile)
from .base import (BaseSampler, setup_output)
from .base_mcmc import get_optional_arg_from_config
from .base_cube import setup_calls
from .. import models
#
# =============================================================================
#
# Samplers
#
# =============================================================================
#
class DynestySampler(BaseSampler):
"""This class is used to construct an Dynesty sampler from the dynesty
package.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nlive : int
Number of live points to use in sampler.
pool : function with map, Optional
A provider of a map function that allows a function call to be run
over multiple sets of arguments and possibly maps them to
cores/nodes/etc.
"""
name = "dynesty"
_io = DynestyFile
def __init__(self, model, nlive, nprocesses=1,
checkpoint_time_interval=None, maxcall=None,
loglikelihood_function=None, use_mpi=False,
no_save_state=False,
run_kwds=None,
extra_kwds=None,
internal_kwds=None,
**kwargs):
self.model = model
self.no_save_state = no_save_state
log_likelihood_call, prior_call = setup_calls(
model,
loglikelihood_function=loglikelihood_function,
copy_prior=True)
# Set up the pool
self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)
self.maxcall = maxcall
self.checkpoint_time_interval = checkpoint_time_interval
self.run_kwds = {} if run_kwds is None else run_kwds
self.extra_kwds = {} if extra_kwds is None else extra_kwds
self.internal_kwds = {} if internal_kwds is None else internal_kwds
self.nlive = nlive
self.names = model.sampling_params
self.ndim = len(model.sampling_params)
self.checkpoint_file = None
# Enable checkpointing if checkpoint_time_interval is set in config
# file in sampler section
if self.checkpoint_time_interval:
self.run_with_checkpoint = True
if self.maxcall is None:
self.maxcall = 5000 * self.pool.size
logging.info("Checkpointing enabled, will verify every %s calls"
" and try to checkpoint every %s seconds",
self.maxcall, self.checkpoint_time_interval)
else:
self.run_with_checkpoint = False
# Check for cyclic boundaries
periodic = []
cyclic = self.model.prior_distribution.cyclic
for i, param in enumerate(self.variable_params):
if param in cyclic:
logging.info('Param: %s will be cyclic', param)
periodic.append(i)
if len(periodic) == 0:
periodic = None
# Check for reflected boundaries. Dynesty only supports
# reflection on both min and max of boundary.
reflective = []
reflect = self.model.prior_distribution.well_reflected
for i, param in enumerate(self.variable_params):
if param in reflect:
logging.info("Param: %s will be well reflected", param)
reflective.append(i)
if len(reflective) == 0:
reflective = None
if 'sample' in extra_kwds:
if 'rwalk2' in extra_kwds['sample']:
dynesty.dynesty._SAMPLING["rwalk"] = sample_rwalk_mod
dynesty.nestedsamplers._SAMPLING["rwalk"] = sample_rwalk_mod
extra_kwds['sample'] = 'rwalk'
if self.nlive < 0:
# Interpret a negative input value for the number of live points
# (which is clearly an invalid input in all senses)
# as the desire to dynamically determine that number
self._sampler = dynesty.DynamicNestedSampler(log_likelihood_call,
prior_call, self.ndim,
pool=self.pool,
reflective=reflective,
periodic=periodic,
**extra_kwds)
self.run_with_checkpoint = False
logging.info("Checkpointing not currently supported with"
"DYNAMIC nested sampler")
else:
self._sampler = dynesty.NestedSampler(log_likelihood_call,
prior_call, self.ndim,
nlive=self.nlive,
reflective=reflective,
periodic=periodic,
pool=self.pool, **extra_kwds)
self._sampler.kwargs.update(internal_kwds)
# properties of the internal sampler which should not be pickled
self.no_pickle = ['loglikelihood',
'prior_transform',
'propose_point',
'update_proposal',
'_UPDATE', '_PROPOSE',
'evolve_point', 'use_pool', 'queue_size',
'use_pool_ptform', 'use_pool_logl',
'use_pool_evolve', 'use_pool_update',
'pool', 'M']
def run(self):
diff_niter = 1
if self.run_with_checkpoint is True:
n_checkpointing = 1
t0 = time.time()
it = self._sampler.it
logging.info('Starting from iteration: %s', it)
while diff_niter != 0:
self._sampler.run_nested(maxcall=self.maxcall, **self.run_kwds)
delta_t = time.time() - t0
diff_niter = self._sampler.it - it
logging.info("Checking if we should checkpoint: %.2f s", delta_t)
if delta_t >= self.checkpoint_time_interval:
logging.info('Checkpointing N={}'.format(n_checkpointing))
self.checkpoint()
n_checkpointing += 1
t0 = time.time()
it = self._sampler.it
else:
self._sampler.run_nested(**self.run_kwds)
@property
def io(self):
return self._io
@property
def niterations(self):
return len(tuple(self.samples.values())[0])
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False, loglikelihood_function=None):
"""Loads the sampler from the given config file. Many options are
directly passed to the underlying dynesty sampler, see the official
dynesty documentation for more details on these.
The following options are retrieved in the ``[sampler]`` section:
* ``name = STR``:
Required. This must match the sampler's name.
* ``maxiter = INT``:
The maximum number of iterations to run.
* ``dlogz = FLOAT``:
The target dlogz stopping condition.
* ``logl_max = FLOAT``:
The maximum logl stopping condition.
* ``n_effective = INT``:
Target effective number of samples stopping condition
* ``sample = STR``:
The method to sample the space. Should be one of 'uniform',
'rwalk', 'rwalk2' (a modified version of rwalk), or 'slice'.
* ``walk = INT``:
Used for some of the walk methods. Sets the minimum number of
steps to take when evolving a point.
* ``maxmcmc = INT``:
Used for some of the walk methods. Sets the maximum number of steps
to take when evolving a point.
* ``nact = INT``:
used for some of the walk methods. Sets number of autorcorrelation
lengths before terminating evolution of a point.
* ``first_update_min_ncall = INT``:
The minimum number of calls before updating the bounding region
for the first time.
* ``first_update_min_neff = FLOAT``:
Don't update the the bounding region untill the efficiency drops
below this value.
* ``bound = STR``:
The method of bounding of the prior volume.
Should be one of 'single', 'balls', 'cubes', 'multi' or 'none'.
* ``update_interval = INT``:
Number of iterations between updating the bounding regions
* ``enlarge = FLOAT``:
Factor to enlarge the bonding region.
* ``bootstrap = INT``:
The number of bootstrap iterations to determine the enlargement
factor.
* ``maxcall = INT``:
The maximum number of calls before checking if we should checkpoint
* ``checkpoint_time_interval``:
Sets the time in seconds between checkpointing.
* ``loglikelihood-function``:
The attribute of the model to use for the loglikelihood. If
not provided, will default to ``loglikelihood``.
Parameters
----------
cp : WorkflowConfigParser instance
Config file object to parse.
model : pycbc.inference.model.BaseModel instance
The model to use.
output_file : str, optional
The name of the output file to checkpoint and write results to.
nprocesses : int, optional
The number of parallel processes to use. Default is 1.
use_mpi : bool, optional
Use MPI for parallelization. Default is False.
Returns
-------
DynestySampler :
The sampler instance.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of live points to use
nlive = int(cp.get(section, "nlive"))
loglikelihood_function = \
get_optional_arg_from_config(cp, section, 'loglikelihood-function')
no_save_state = cp.has_option(section, 'no-save-state')
# optional run_nested arguments for dynesty
rargs = {'maxiter': int,
'dlogz': float,
'logl_max': float,
'n_effective': int,
}
# optional arguments for dynesty
cargs = {'bound': str,
'bootstrap': int,
'enlarge': float,
'update_interval': float,
'sample': str,
'first_update_min_ncall': int,
'first_update_min_eff': float,
'walks': int,
}
# optional arguments that must be set internally
internal_args = {
'maxmcmc': int,
'nact': int,
}
extra = {}
run_extra = {}
internal_extra = {}
for args, argt in [(extra, cargs),
(run_extra, rargs),
(internal_extra, internal_args),
]:
for karg in argt:
if cp.has_option(section, karg):
args[karg] = argt[karg](cp.get(section, karg))
#This arg needs to be a dict
first_update = {}
if 'first_update_min_ncall' in extra:
first_update['min_ncall'] = extra.pop('first_update_min_ncall')
logging.info('First update: min_ncall:%s',
first_update['min_ncall'])
if 'first_update_min_eff' in extra:
first_update['min_eff'] = extra.pop('first_update_min_eff')
logging.info('First update: min_eff:%s', first_update['min_eff'])
extra['first_update'] = first_update
# populate options for checkpointing
checkpoint_time_interval = None
maxcall = None
if cp.has_option(section, 'checkpoint_time_interval'):
ck_time = float(cp.get(section, 'checkpoint_time_interval'))
checkpoint_time_interval = ck_time
if cp.has_option(section, 'maxcall'):
maxcall = int(cp.get(section, 'maxcall'))
obj = cls(model, nlive=nlive, nprocesses=nprocesses,
loglikelihood_function=loglikelihood_function,
checkpoint_time_interval=checkpoint_time_interval,
maxcall=maxcall,
no_save_state=no_save_state,
use_mpi=use_mpi, run_kwds=run_extra,
extra_kwds=extra,
internal_kwds=internal_extra,)
setup_output(obj, output_file, check_nsamples=False)
if not obj.new_checkpoint:
obj.resume_from_checkpoint()
return obj
def checkpoint(self):
"""Checkpoint function for dynesty sampler
"""
# Dynesty has its own __getstate__ which deletes
# random state information and the pool
saved = {}
for key in self.no_pickle:
if hasattr(self._sampler, key):
saved[key] = getattr(self._sampler, key)
setattr(self._sampler, key, None)
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
# Write random state
fp.write_random_state()
# Write pickled data
fp.write_pickled_data_into_checkpoint_file(self._sampler)
self.write_results(fn)
# Restore properties that couldn't be pickled if we are continuing
for key in saved:
setattr(self._sampler, key, saved[key])
def resume_from_checkpoint(self):
try:
with loadfile(self.checkpoint_file, 'r') as fp:
sampler = fp.read_pickled_data_from_checkpoint_file()
for key in sampler.__dict__:
if key not in self.no_pickle:
value = getattr(sampler, key)
setattr(self._sampler, key, value)
self.set_state_from_file(self.checkpoint_file)
logging.info("Found valid checkpoint file: %s",
self.checkpoint_file)
except Exception as e:
print(e)
logging.info("Failed to load checkpoint file")
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as fp:
state = fp.read_random_state()
# Dynesty handles most randomeness through rstate which is
# pickled along with the class instance
numpy.random.set_state(state)
def finalize(self):
"""Finalze and write it to the results file
"""
logz = self._sampler.results.logz[-1:][0]
dlogz = self._sampler.results.logzerr[-1:][0]
logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz))
if self.no_save_state:
self.write_results(self.checkpoint_file)
else:
self.checkpoint()
logging.info("Validating checkpoint and backup files")
checkpoint_valid = validate_checkpoint_files(
self.checkpoint_file, self.backup_file, check_nsamples=False)
if not checkpoint_valid:
raise IOError("error writing to checkpoint file")
@property
def samples(self):
"""Returns raw nested samples
"""
results = self._sampler.results
samples = results.samples
nest_samp = {}
for i, param in enumerate(self.variable_params):
nest_samp[param] = samples[:, i]
nest_samp['logwt'] = results.logwt
nest_samp['loglikelihood'] = results.logl
return nest_samp
def set_initial_conditions(self, initial_distribution=None,
samples_file=None):
"""Sets up the starting point for the sampler.
Should also set the sampler's random state.
"""
pass
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# Write nested samples
fp.write_raw_samples(self.samples)
# Write logz and dlogz
logz = self._sampler.results.logz[-1:][0]
dlogz = self._sampler.results.logzerr[-1:][0]
fp.write_logevidence(logz, dlogz)
@property
def model_stats(self):
pass
@property
def logz(self):
"""
return bayesian evidence estimated by
dynesty sampler
"""
return self._sampler.results.logz[-1:][0]
@property
def logz_err(self):
"""
return error in bayesian evidence estimated by
dynesty sampler
"""
return self._sampler.results.logzerr[-1:][0]
def sample_rwalk_mod(args):
""" Modified version of dynesty.sampling.sample_rwalk
Adapted from version used in bilby/dynesty
"""
try:
# dynesty <= 1.1
from dynesty.utils import unitcheck, reflect
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
except ImportError:
# dynest >= 1.2
from dynesty.utils import unitcheck, apply_reflect as reflect
(u, loglstar, axes, scale,
prior_transform, loglikelihood, _, kwargs) = args
rstate = numpy.random
# Bounds
nonbounded = kwargs.get('nonbounded', None)
periodic = kwargs.get('periodic', None)
reflective = kwargs.get('reflective', None)
# Setup.
n = len(u)
walks = kwargs.get('walks', 10 * n) # minimum number of steps
maxmcmc = kwargs.get('maxmcmc', 2000) # Maximum number of steps
nact = kwargs.get('nact', 5) # Number of ACT
old_act = kwargs.get('old_act', walks)
# Initialize internal variables
accept = 0
reject = 0
nfail = 0
act = numpy.inf
u_list = []
v_list = []
logl_list = []
ii = 0
while ii < nact * act:
ii += 1
# Propose a direction on the unit n-sphere.
drhat = rstate.randn(n)
drhat /= numpy.linalg.norm(drhat)
# Scale based on dimensionality.
dr = drhat * rstate.rand() ** (1.0 / n)
# Transform to proposal distribution.
du = numpy.dot(axes, dr)
u_prop = u + scale * du
# Wrap periodic parameters
if periodic is not None:
u_prop[periodic] = numpy.mod(u_prop[periodic], 1)
# Reflect
if reflective is not None:
u_prop[reflective] = reflect(u_prop[reflective])
# Check unit cube constraints.
if u.max() < 0:
break
if unitcheck(u_prop, nonbounded):
pass
else:
nfail += 1
# Only start appending to the chain once a single jump is made
if accept > 0:
u_list.append(u_list[-1])
v_list.append(v_list[-1])
logl_list.append(logl_list[-1])
continue
# Check proposed point.
v_prop = prior_transform(numpy.array(u_prop))
logl_prop = loglikelihood(numpy.array(v_prop))
if logl_prop > loglstar:
u = u_prop
v = v_prop
logl = logl_prop
accept += 1
u_list.append(u)
v_list.append(v)
logl_list.append(logl)
else:
reject += 1
# Only start appending to the chain once a single jump is made
if accept > 0:
u_list.append(u_list[-1])
v_list.append(v_list[-1])
logl_list.append(logl_list[-1])
# If we've taken the minimum number of steps, calculate the ACT
if accept + reject > walks:
act = estimate_nmcmc(
accept_ratio=accept / (accept + reject + nfail),
old_act=old_act, maxmcmc=maxmcmc)
# If we've taken too many likelihood evaluations then break
if accept + reject > maxmcmc:
logging.warning(
"Hit maximum number of walks {} with accept={}, reject={}, "
"and nfail={} try increasing maxmcmc"
.format(maxmcmc, accept, reject, nfail))
break
# If the act is finite, pick randomly from within the chain
if numpy.isfinite(act) and int(.5 * nact * act) < len(u_list):
idx = numpy.random.randint(int(.5 * nact * act), len(u_list))
u = u_list[idx]
v = v_list[idx]
logl = logl_list[idx]
else:
logging.debug("Unable to find a new point using walk: "
"returning a random point")
u = numpy.random.uniform(size=n)
v = prior_transform(u)
logl = loglikelihood(v)
blob = {'accept': accept, 'reject': reject, 'fail': nfail, 'scale': scale}
kwargs["old_act"] = act
ncall = accept + reject
return u, v, logl, ncall, blob
def estimate_nmcmc(accept_ratio, old_act, maxmcmc, safety=5, tau=None):
"""Estimate autocorrelation length of chain using acceptance fraction
Using ACL = (2/acc) - 1 multiplied by a safety margin. Code adapated from
CPNest:
* https://github.com/johnveitch/cpnest/blob/master/cpnest/sampler.py
* https://github.com/farr/Ensemble.jl
Parameters
----------
accept_ratio: float [0, 1]
Ratio of the number of accepted points to the total number of points
old_act: int
The ACT of the last iteration
maxmcmc: int
The maximum length of the MCMC chain to use
safety: int
A safety factor applied in the calculation
tau: int (optional)
The ACT, if given, otherwise estimated.
"""
if tau is None:
tau = maxmcmc / safety
if accept_ratio == 0.0:
Nmcmc_exact = (1 + 1 / tau) * old_act
else:
Nmcmc_exact = (
(1. - 1. / tau) * old_act +
(safety / tau) * (2. / accept_ratio - 1.)
)
Nmcmc_exact = float(min(Nmcmc_exact, maxmcmc))
return max(safety, int(Nmcmc_exact))
| 23,999
| 35.923077
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/emcee_pt.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for using the emcee_pt sampler
packages for parameter estimation.
"""
import numpy
import emcee
import logging
from pycbc.pool import choose_pool
from .base import (BaseSampler, setup_output)
from .base_mcmc import (BaseMCMC, EnsembleSupport, raw_samples_to_dict,
get_optional_arg_from_config)
from .base_multitemper import (MultiTemperedSupport,
ensemble_compute_acf, ensemble_compute_acl)
from ..burn_in import EnsembleMultiTemperedMCMCBurnInTests
from pycbc.inference.io import EmceePTFile
from .. import models
if emcee.__version__ >= '3.0.0':
raise ImportError
class EmceePTSampler(MultiTemperedSupport, EnsembleSupport, BaseMCMC,
BaseSampler):
"""This class is used to construct a parallel-tempered MCMC sampler from
the emcee package's PTSampler.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
ntemps : int
Number of temeratures to use in the sampler.
nwalkers : int
Number of walkers to use in sampler.
betas : array
An array of inverse temperature values to be used in emcee_pt's
temperature ladder. If not provided, ``emcee_pt`` will use the number
of temperatures and the number of dimensions of the parameter space to
construct the ladder with geometrically spaced temperatures.
loglikelihood_function : str, optional
Set the function to call from the model for the ``loglikelihood``.
Default is ``loglikelihood``.
nprocesses : int, optional
The number of parallel processes to use. Default is 1
(no paralleliztion).
use_mpi : bool, optional
Use MPI for parallelization. Default (False) will use python's
multiprocessing.
"""
name = "emcee_pt"
_io = EmceePTFile
burn_in_class = EnsembleMultiTemperedMCMCBurnInTests
def __init__(self, model, ntemps, nwalkers, betas=None,
checkpoint_interval=None, checkpoint_signal=None,
loglikelihood_function=None,
nprocesses=1, use_mpi=False):
self.model = model
# create a wrapper for calling the model
if loglikelihood_function is None:
loglikelihood_function = 'loglikelihood'
# frustratingly, emcee_pt does not support blob data, so we have to
# turn it off
model_call = models.CallModel(model, loglikelihood_function,
return_all_stats=False)
# these are used to help paralleize over multiple cores / MPI
models._global_instance = model_call
model_call = models._call_global_model
prior_call = models._call_global_model_logprior
self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)
# construct the sampler: PTSampler needs the likelihood and prior
# functions separately
ndim = len(model.variable_params)
self._sampler = emcee.PTSampler(ntemps, nwalkers, ndim,
model_call, prior_call, pool=self.pool,
betas=betas)
self.nwalkers = nwalkers
self._ntemps = ntemps
self._checkpoint_interval = checkpoint_interval
self._checkpoint_signal = checkpoint_signal
@property
def io(self):
return self._io
@property
def base_shape(self):
return (self.ntemps, self.nwalkers,)
@property
def betas(self):
return self._sampler.betas
@staticmethod
def compute_acf(filename, **kwargs):
r"""Computes the autocorrelation function.
Calls :py:func:`base_multitemper.ensemble_compute_acf`; see that
function for details.
Parameters
----------
filename : str
Name of a samples file to compute ACFs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_compute_acf`.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker=True`` is passed as a keyword argument, the arrays
will have shape ``ntemps x nwalkers x niterations``. Otherwise, the
returned array will have shape ``ntemps x niterations``.
"""
return ensemble_compute_acf(filename, **kwargs)
@staticmethod
def compute_acl(filename, **kwargs):
r"""Computes the autocorrelation length.
Calls :py:func:`base_multitemper.ensemble_compute_acl`; see that
function for details.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_compute_acl`.
Returns
-------
dict
A dictionary of ntemps-long arrays of the ACLs of each parameter.
"""
return ensemble_compute_acl(filename, **kwargs)
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file.
The following options are retrieved in the ``[sampler]`` section:
* ``name`` :
Required. This must match the samlper's name.
* ``nwalkers`` :
Required. The number of walkers to use.
* ``ntemps`` :
The number of temperatures to use. Either this, or
``inverse-temperatures-file`` must be provided (but not both).
* ``inverse-temperatures-file`` :
Path to an hdf file containing the inverse temperatures ("betas")
to use. The betas will be retrieved from the file's
``.attrs['betas']``. Either this or ``ntemps`` must be provided
(but not both).
* ``niterations`` :
The number of iterations to run the sampler for. Either this or
``effective-nsamples`` must be provided (but not both).
* ``effective-nsamples`` :
Run the sampler until the given number of effective samples are
obtained. A ``checkpoint-interval`` must also be provided in this
case. Either this or ``niterations`` must be provided (but not
both).
* ``thin-interval`` :
Thin the samples by the given value before saving to disk. May
provide this, or ``max-samples-per-chain``, but not both. If
neither options are provided, will save all samples.
* ``max-samples-per-chain`` :
Thin the samples such that the number of samples per chain per
temperature that are saved to disk never exceeds the given value.
May provide this, or ``thin-interval``, but not both. If neither
options are provided, will save all samples.
* ``checkpoint-interval`` :
Sets the checkpoint interval to use. Must be provided if using
``effective-nsamples``.
* ``checkpoint-signal`` :
Set the checkpoint signal, e.g., "USR2". Optional.
* ``logl-function`` :
The attribute of the model to use for the loglikelihood. If
not provided, will default to ``loglikelihood``.
Settings for burn-in tests are read from ``[sampler-burn_in]``. In
particular, the ``burn-in-test`` option is used to set the burn in
tests to perform. See
:py:func:`MultiTemperedMCMCBurnInTests.from_config` for details. If no
``burn-in-test`` is provided, no burn in tests will be carried out.
Parameters
----------
cp : WorkflowConfigParser instance
Config file object to parse.
model : pycbc.inference.model.BaseModel instance
The model to use.
output_file : str, optional
The name of the output file to checkpoint and write results to.
nprocesses : int, optional
The number of parallel processes to use. Default is 1.
use_mpi : bool, optional
Use MPI for parallelization. Default is False.
Returns
-------
EmceePTSampler :
The sampler instance.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of walkers to use
nwalkers = int(cp.get(section, "nwalkers"))
# get the temps/betas
ntemps, betas = cls.betas_from_config(cp, section)
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
# get the loglikelihood function
logl = get_optional_arg_from_config(cp, section, 'logl-function')
obj = cls(model, ntemps, nwalkers, betas=betas,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
loglikelihood_function=logl, nprocesses=nprocesses,
use_mpi=use_mpi)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
# Set up the output file
setup_output(obj, output_file)
if not obj.new_checkpoint:
obj.resume_from_checkpoint()
else:
obj.set_start_from_config(cp)
return obj
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
The arrays have shape ``ntemps x nwalkers x niterations``.
"""
# emcee stores samples to it's chain attribute as a
# nwalker x niterations x ndim array
raw_samples = self._sampler.chain
return raw_samples_to_dict(self, raw_samples)
@property
def model_stats(self):
"""Returns the log likelihood ratio and log prior as a dict of arrays.
The returned array has shape ntemps x nwalkers x niterations.
Unfortunately, because ``emcee_pt`` does not have blob support, this
will only return the loglikelihood and logprior (with the logjacobian
set to zero) regardless of what stats the model can return.
.. warning::
Since the `logjacobian` is not saved by `emcee_pt`, the `logprior`
returned here is the log of the prior pdf in the sampling
coordinate frame rather than the variable params frame. This
differs from the variable params frame by the log of the Jacobian
of the transform from one frame to the other. If no sampling
transforms were used, then the `logprior` is the same.
"""
# likelihood has shape ntemps x nwalkers x niterations
logl = self._sampler.lnlikelihood
# get prior from posterior
logp = self._sampler.lnprobability - logl
logjacobian = numpy.zeros(logp.shape)
return {'loglikelihood': logl, 'logprior': logp,
'logjacobian': logjacobian}
def clear_samples(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# now clear the chain
self._sampler.reset()
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as fp:
rstate = fp.read_random_state()
# set the numpy random state
numpy.random.set_state(rstate)
def run_mcmc(self, niterations):
"""Advance the ensemble for a number of samples.
Parameters
----------
niterations : int
Number of samples to get from sampler.
"""
pos = self._pos
if pos is None:
pos = self._p0
res = self._sampler.run_mcmc(pos, niterations)
p, _, _ = res[0], res[1], res[2]
# update the positions
self._pos = p
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples,
parameters=self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats, last_iteration=self.niterations)
# write accpetance
fp.write_acceptance_fraction(self._sampler.acceptance_fraction)
# write random state
fp.write_random_state()
@classmethod
def calculate_logevidence(cls, filename, thin_start=None, thin_end=None,
thin_interval=None):
"""Calculates the log evidence from the given file using ``emcee_pt``'s
thermodynamic integration.
Parameters
----------
filename : str
Name of the file to read the samples from. Should be an
``EmceePTFile``.
thin_start : int
Index of the sample to begin returning stats. Default is to read
stats after burn in. To start from the beginning set thin_start
to 0.
thin_interval : int
Interval to accept every i-th sample. Default is to use the
`fp.acl`. If `fp.acl` is not set, then use all stats
(set thin_interval to 1).
thin_end : int
Index of the last sample to read. If not given then
`fp.niterations` is used.
Returns
-------
lnZ : float
The estimate of log of the evidence.
dlnZ : float
The error on the estimate.
"""
with cls._io(filename, 'r') as fp:
logls = fp.read_raw_samples(['loglikelihood'],
thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end,
temps='all', flatten=False)
logls = logls['loglikelihood']
# we need the betas that were used
betas = fp.betas
# annoyingly, theromdynaimc integration in PTSampler is an instance
# method, so we'll implement a dummy one
ntemps = fp.ntemps
nwalkers = fp.nwalkers
ndim = len(fp.variable_params)
dummy_sampler = emcee.PTSampler(ntemps, nwalkers, ndim, None,
None, betas=betas)
return dummy_sampler.thermodynamic_integration_log_evidence(
logls=logls, fburnin=0.)
def _correctjacobian(self, samples):
"""Corrects the log jacobian values stored on disk.
Parameters
----------
samples : dict
Dictionary of the samples.
"""
# flatten samples for evaluating
orig_shape = list(samples.values())[0].shape
flattened_samples = {p: arr.ravel()
for p, arr in list(samples.items())}
# convert to a list of tuples so we can use map function
params = list(flattened_samples.keys())
size = flattened_samples[params[0]].size
logj = numpy.zeros(size)
for ii in range(size):
these_samples = {p: flattened_samples[p][ii] for p in params}
these_samples = self.model.sampling_transforms.apply(these_samples)
self.model.update(**these_samples)
logj[ii] = self.model.logjacobian
return logj.reshape(orig_shape)
def finalize(self):
"""Calculates the log evidence and writes to the checkpoint file.
If sampling transforms were used, this also corrects the jacobian
stored on disk.
The thin start/interval/end for calculating the log evidence are
retrieved from the checkpoint file's thinning attributes.
"""
if self.model.sampling_transforms is not None:
# fix the lobjacobian values stored on disk
logging.info("Correcting logjacobian values on disk")
with self.io(self.checkpoint_file, 'r') as fp:
samples = fp.read_raw_samples(self.variable_params,
thin_start=0,
thin_interval=1, thin_end=None,
temps='all', flatten=False)
logjacobian = self._correctjacobian(samples)
# write them back out
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
fp[fp.samples_group]['logjacobian'][()] = logjacobian
logging.info("Calculating log evidence")
# get the thinning settings
with self.io(self.checkpoint_file, 'r') as fp:
thin_start = fp.thin_start
thin_interval = fp.thin_interval
thin_end = fp.thin_end
# calculate
logz, dlogz = self.calculate_logevidence(
self.checkpoint_file, thin_start=thin_start, thin_end=thin_end,
thin_interval=thin_interval)
logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz))
# write to both the checkpoint and backup
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
fp.write_logevidence(logz, dlogz)
| 18,909
| 39.319829
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/ptemcee.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides classes and functions for using the emcee_pt sampler
packages for parameter estimation.
"""
import shlex
import numpy
import ptemcee
import logging
from pycbc.pool import choose_pool
from .base import (BaseSampler, setup_output)
from .base_mcmc import (BaseMCMC, EnsembleSupport, raw_samples_to_dict,
get_optional_arg_from_config)
from .base_multitemper import (read_betas_from_hdf,
ensemble_compute_acf, ensemble_compute_acl)
from ..burn_in import EnsembleMultiTemperedMCMCBurnInTests
from pycbc.inference.io import PTEmceeFile
from .. import models
class PTEmceeSampler(EnsembleSupport, BaseMCMC, BaseSampler):
"""This class is used to construct the parallel-tempered ptemcee sampler.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nwalkers : int
Number of walkers to use in sampler.
ntemps : int, optional
Specify the number of temps to use. Either this, ``Tmax``, or ``betas``
must be specified.
Tmax : float, optional
Specify the maximum temperature to use. This may be used with
``ntemps``; see :py:func:`ptemcee.make_ladder` for details. Either
this, ``ntemps``, or ``betas`` must be specified.
betas : list of float, optional
Specify the betas to use. Must be provided if ``ntemps`` and ``Tmax``
are not given. Will override ``ntemps`` and ``Tmax`` if provided.
adaptive : bool, optional
Whether or not to use adaptive temperature levels. Default is False.
adaptation_lag : int, optional
Only used if ``adaptive`` is True; see :py:mod:`ptemcee.Sampler` for
details. If not provided, will use ``ptemcee``'s default.
adaptation_time : int, optional
Only used if ``adaptive`` is True; see :py:mod:`ptemcee.Sampler` for
details. If not provided, will use ``ptemcee``'s default.
scale_factor : float, optional
Scale factor used for the stretch proposal; see
:py:mod:`ptemcee.Sampler` for details. If not provided, will use
``ptemcee``'s default.
loglikelihood_function : str, optional
Set the function to call from the model for the ``loglikelihood``.
Default is ``loglikelihood``.
nprocesses : int, optional
The number of parallel processes to use. Default is 1
(no paralleliztion).
use_mpi : bool, optional
Use MPI for parallelization. Default (False) will use python's
multiprocessing.
"""
name = "ptemcee"
_io = PTEmceeFile
burn_in_class = EnsembleMultiTemperedMCMCBurnInTests
def __init__(self, model, nwalkers, ntemps=None, Tmax=None, betas=None,
adaptive=False, adaptation_lag=None, adaptation_time=None,
scale_factor=None,
loglikelihood_function=None,
checkpoint_interval=None, checkpoint_signal=None,
nprocesses=1, use_mpi=False):
self.model = model
ndim = len(model.variable_params)
# create temperature ladder if needed
if ntemps is None and Tmax is None and betas is None:
raise ValueError("must provide either ntemps/Tmax or betas")
if betas is None:
betas = ptemcee.make_ladder(ndim, ntemps=ntemps, Tmax=Tmax)
# construct the keyword arguments to pass; if a kwarg is None, we
# won't pass it, resulting in ptemcee's defaults being used
kwargs = {}
kwargs['adaptive'] = adaptive
kwargs['betas'] = betas
if adaptation_lag is not None:
kwargs['adaptation_lag'] = adaptation_lag
if adaptation_time is not None:
kwargs['adaptation_time'] = adaptation_time
if scale_factor is not None:
kwargs['scale_factor'] = scale_factor
# create a wrapper for calling the model
if loglikelihood_function is None:
loglikelihood_function = 'loglikelihood'
# frustratingly, ptemcee does not support blob data, so we have to
# turn it off
model_call = models.CallModel(model, loglikelihood_function,
return_all_stats=False)
# these are used to help paralleize over multiple cores / MPI
models._global_instance = model_call
model_call = models._call_global_model
prior_call = models._call_global_model_logprior
self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)
# construct the sampler
self._sampler = ptemcee.Sampler(nwalkers=nwalkers, ndim=ndim,
logl=model_call, logp=prior_call,
mapper=self.pool.map, **kwargs)
self.nwalkers = nwalkers
self._ntemps = ntemps
self._checkpoint_interval = checkpoint_interval
self._checkpoint_signal = checkpoint_signal
# we'll initialize ensemble and chain to None
self._chain = None
self._ensemble = None
@property
def io(self):
return self._io
@property
def ntemps(self):
"""The number of temeratures that are set."""
return self._ntemps
@property
def base_shape(self):
return (self.ntemps, self.nwalkers,)
@property
def betas(self):
"""Returns the beta history currently in memory."""
# chain betas has shape niterations x ntemps; transpose to
# ntemps x niterations
return self._chain.betas.transpose()
@property
def starting_betas(self):
"""Returns the betas that were used at startup."""
# the initial betas that were used
return self._sampler.betas
@property
def adaptive(self):
"""Whether or not the betas are adapted."""
return self._sampler.adaptive
@property
def adaptation_lag(self):
"""The adaptation lag for the beta evolution."""
return self._sampler.adaptation_lag
@property
def adaptation_time(self):
"""The adaptation time for the beta evolution."""
return self._sampler.adaptation_time
@property
def scale_factor(self):
"""The scale factor used by ptemcee."""
return self._sampler.scale_factor
@property
def ensemble(self):
"""Returns the current ptemcee ensemble.
The ensemble stores the current location of and temperatures of the
walkers. If the ensemble hasn't been setup yet, will set one up
using p0 for the positions. If set_p0 hasn't been run yet, this will
result in a ValueError.
"""
if self._ensemble is None:
if self._p0 is None:
raise ValueError("initial positions not set; run set_p0")
# use the global numpy random state
rstate = numpy.random.mtrand._rand
# self._p0 has base_shape x ndim = ntemps x nwalkers x ndim (see
# BaseMCMC.set_p0). ptemcee's Ensemble expects
# ntemps x nwalkers x ndim... so we're good
self._ensemble = self._sampler.ensemble(self._p0, rstate)
return self._ensemble
@property
def _pos(self):
"""Uses the ensemble for the position."""
# BaseMCMC expects _pos to have shape ntemps x nwalkers x ndim,
# which is the same shape as ensemble.x
return self.ensemble.x
@property
def chain(self):
"""The current chain of samples in memory.
The chain is returned as a :py:mod:`ptemcee.chain.Chain` instance. If
no chain has been created yet (``_chain`` is None), then will create
a new chain using the current ``ensemble``.
"""
if self._chain is None:
# create a chain
self._chain = ptemcee.chain.Chain(self.ensemble)
return self._chain
def clear_samples(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# set _chain to None; this will both cause the current chain to
# get garbage collected, and will cause a new chain to be created
# the next time self.chain is called
self._chain = None
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
The arrays have shape ``ntemps x nwalkers x niterations``.
"""
# chain.x has shape niterations x ntemps x nwalkers x ndim
# we'll transpose to ntemps x nwalkers x niterations x ndim
raw_samples = self._chain.x.transpose((1, 2, 0, 3))
return raw_samples_to_dict(self, raw_samples)
@property
def model_stats(self):
"""Returns the log likelihood ratio and log prior as a dict of arrays.
The returned array has shape ntemps x nwalkers x niterations.
Unfortunately, because ``ptemcee`` does not have blob support, this
will only return the loglikelihood and logprior (with the logjacobian
set to zero) regardless of what stats the model can return.
.. warning::
Since the ``logjacobian`` is not saved by ``ptemcee``, the
``logprior`` returned here is the log of the prior pdf in the
sampling coordinate frame rather than the variable params frame.
This differs from the variable params frame by the log of the
Jacobian of the transform from one frame to the other. If no
sampling transforms were used, then the ``logprior`` is the same.
"""
# log likelihood and prior have shape
# niterations x ntemps x nwalkers; we'll tranpose to have shape
# ntemps x nwalkers x niterations
logl = self._chain.logl.transpose((1, 2, 0))
logp = self._chain.logP.transpose((1, 2, 0))
logjacobian = numpy.zeros(logp.shape)
return {'loglikelihood': logl, 'logprior': logp,
'logjacobian': logjacobian}
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as fp:
rstate = fp.read_random_state()
# set the numpy random state
numpy.random.set_state(rstate)
# set the ensemble to its last state
ensemble = self.ensemble
for attr, val in fp.read_ensemble_attrs().items():
setattr(ensemble, attr, val)
ensemble.betas = fp.read_betas(iteration=-1)
ensemble.time = fp.niterations
def run_mcmc(self, niterations):
"""Advance the ensemble for a number of samples.
Parameters
----------
niterations : int
Number of samples to get from sampler.
"""
self.chain.run(niterations)
@classmethod
def calculate_logevidence(cls, filename, thin_start=None, thin_end=None,
thin_interval=None):
"""Calculates the log evidence from the given file.
This uses ``ptemcee``'s thermodynamic integration.
Parameters
----------
filename : str
Name of the file to read the samples from. Should be an
``PTEmceeFile``.
thin_start : int
Index of the sample to begin returning stats. Default is to read
stats after burn in. To start from the beginning set thin_start
to 0.
thin_interval : int
Interval to accept every i-th sample. Default is to use the
`fp.acl`. If `fp.acl` is not set, then use all stats
(set thin_interval to 1).
thin_end : int
Index of the last sample to read. If not given then
`fp.niterations` is used.
Returns
-------
lnZ : float
The estimate of log of the evidence.
dlnZ : float
The error on the estimate.
"""
with cls._io(filename, 'r') as fp:
logls = fp.read_raw_samples(['loglikelihood'],
thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end,
temps='all', flatten=False)
logls = logls['loglikelihood']
# we need the betas that were used
betas = fp.read_betas(thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end)
# we'll separate betas out by their unique temperatures
# there's probably a faster way to do this...
mean_logls = []
unique_betas = []
ntemps = betas.shape[0]
for ti in range(ntemps):
ubti, idx = numpy.unique(betas[ti, :], return_inverse=True)
unique_idx = numpy.unique(idx)
loglsti = logls[ti, :, :]
for ii in unique_idx:
# average over the walkers and iterations with the same
# betas
getiters = numpy.where(ii == unique_idx)[0]
mean_logls.append(loglsti[:, getiters].mean())
unique_betas.append(ubti[ii])
return ptemcee.util.thermodynamic_integration_log_evidence(
numpy.array(unique_betas), numpy.array(mean_logls))
@staticmethod
def compute_acf(filename, **kwargs):
r"""Computes the autocorrelation function.
Calls :py:func:`base_multitemper.ensemble_compute_acf`; see that
function for details.
Parameters
----------
filename : str
Name of a samples file to compute ACFs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_compute_acf`.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker=True`` is passed as a keyword argument, the arrays
will have shape ``ntemps x nwalkers x niterations``. Otherwise, the
returned array will have shape ``ntemps x niterations``.
"""
return ensemble_compute_acf(filename, **kwargs)
@staticmethod
def compute_acl(filename, **kwargs):
r"""Computes the autocorrelation length.
Calls :py:func:`base_multitemper.ensemble_compute_acl`; see that
function for details.
Parameters
----------
filename : str
Name of a samples file to compute ACLs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_compute_acl`.
Returns
-------
dict
A dictionary of ntemps-long arrays of the ACLs of each parameter.
"""
return ensemble_compute_acl(filename, **kwargs)
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file.
The following options are retrieved in the ``[sampler]`` section:
* ``name = STR`` :
Required. This must match the sampler's name.
* ``nwalkers = INT`` :
Required. The number of walkers to use.
* ``ntemps = INT`` :
The number of temperatures to use. This may be used in combination
with ``Tmax``. Either this, ``Tmax``, ``betas`` or ``betas-file``
must be provided.
* ``tmax = FLOAT`` :
The maximum temperature to use. This may be used in combination
with ``ntemps``, or alone.
* ``betas = FLOAT1 FLOAT2 [...]`` :
Space-separated list of (intial) inverse temperatures ("betas") to
use. This sets both the number of temperatures and the tmax. A
``ValueError`` will be raised if both this and ``ntemps`` or
``Tmax`` are provided.
* ``betas-file = STR`` :
Path to an hdf file containing the inverse temperatures ("betas")
to use. The betas will be retrieved from the file's
``.attrs['betas']``. A ``ValueError`` will be raised if both this
and ``betas`` are provided.
* ``adaptive =`` :
If provided, temperature adaptation will be turned on.
* ``adaptation-lag = INT`` :
The adaptation lag to use (see ptemcee for details).
* ``adaptation-time = INT`` :
The adaptation time to use (see ptemcee for details).
* ``scale-factor = FLOAT`` :
The scale factor to use for the emcee stretch.
* ``niterations = INT`` :
The number of iterations to run the sampler for. Either this or
``effective-nsamples`` must be provided (but not both).
* ``effective-nsamples = INT`` :
Run the sampler until the given number of effective samples are
obtained. A ``checkpoint-interval`` must also be provided in this
case. Either this or ``niterations`` must be provided (but not
both).
* ``thin-interval = INT`` :
Thin the samples by the given value before saving to disk. May
provide this, or ``max-samples-per-chain``, but not both. If
neither options are provided, will save all samples.
* ``max-samples-per-chain = INT`` :
Thin the samples such that the number of samples per chain per
temperature that are saved to disk never exceeds the given value.
May provide this, or ``thin-interval``, but not both. If neither
options are provided, will save all samples.
* ``checkpoint-interval = INT`` :
Sets the checkpoint interval to use. Must be provided if using
``effective-nsamples``.
* ``checkpoint-signal = STR`` :
Set the checkpoint signal, e.g., "USR2". Optional.
* ``logl-function = STR`` :
The attribute of the model to use for the loglikelihood. If
not provided, will default to ``loglikelihood``.
Settings for burn-in tests are read from ``[sampler-burn_in]``. In
particular, the ``burn-in-test`` option is used to set the burn in
tests to perform. See
:py:func:`EnsembleMultiTemperedMCMCBurnInTests.from_config` for
details. If no ``burn-in-test`` is provided, no burn in tests will be
carried out.
Parameters
----------
cp : WorkflowConfigParser instance
Config file object to parse.
model : pycbc.inference.model.BaseModel instance
The model to use.
output_file : str, optional
The name of the output file to checkpoint and write results to.
nprocesses : int, optional
The number of parallel processes to use. Default is 1.
use_mpi : bool, optional
Use MPI for parallelization. Default is False.
Returns
-------
EmceePTSampler :
The sampler instance.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of walkers to use
nwalkers = int(cp.get(section, "nwalkers"))
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
optargs = {}
# get the temperature level settings
ntemps = get_optional_arg_from_config(cp, section, 'ntemps', int)
if ntemps is not None:
optargs['ntemps'] = ntemps
tmax = get_optional_arg_from_config(cp, section, 'tmax', float)
if tmax is not None:
optargs['Tmax'] = tmax
betas = get_optional_arg_from_config(cp, section, 'betas')
if betas is not None:
# convert to list sorted in descencding order
betas = numpy.sort(list(map(float, shlex.split(betas))))[::-1]
optargs['betas'] = betas
betas_file = get_optional_arg_from_config(cp, section, 'betas-file')
if betas_file is not None:
optargs['betas'] = read_betas_from_hdf(betas_file)
# check for consistency
if betas is not None and betas_file is not None:
raise ValueError("provide either betas or betas-file, not both")
if 'betas' in optargs and (ntemps is not None or tmax is not None):
raise ValueError("provide either ntemps/tmax or betas/betas-file, "
"not both")
# adaptation parameters
adaptive = get_optional_arg_from_config(cp, section, 'adaptive')
if adaptive is not None:
optargs['adaptive'] = True
else:
optargs['adaptive'] = False
adaptation_lag = get_optional_arg_from_config(cp, section,
'adaptation-lag', int)
if adaptation_lag is not None:
optargs['adaptation_lag'] = adaptation_lag
adaptation_time = get_optional_arg_from_config(cp, section,
'adaptation-time', int)
if adaptation_time is not None:
optargs['adaptation_time'] = adaptation_time
scale_factor = get_optional_arg_from_config(cp, section,
'scale-factor', float)
if scale_factor is not None:
optargs['scale_factor'] = scale_factor
# get the loglikelihood function
logl = get_optional_arg_from_config(cp, section, 'logl-function')
obj = cls(model, nwalkers,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
loglikelihood_function=logl, nprocesses=nprocesses,
use_mpi=use_mpi, **optargs)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
# Set up the output file
setup_output(obj, output_file)
if not obj.new_checkpoint:
obj.resume_from_checkpoint()
else:
obj.set_start_from_config(cp)
return obj
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples,
parameters=self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats, last_iteration=self.niterations)
# write random state
fp.write_random_state()
# write betas
fp.write_betas(self.betas, last_iteration=self.niterations)
# write random state
fp.write_random_state()
# write attributes of the ensemble
fp.write_ensemble_attrs(self.ensemble)
def _correctjacobian(self, samples):
"""Corrects the log jacobian values stored on disk.
Parameters
----------
samples : dict
Dictionary of the samples.
"""
# flatten samples for evaluating
orig_shape = list(samples.values())[0].shape
flattened_samples = {p: arr.ravel()
for p, arr in list(samples.items())}
# convert to a list of tuples so we can use map function
params = list(flattened_samples.keys())
size = flattened_samples[params[0]].size
logj = numpy.zeros(size)
for ii in range(size):
these_samples = {p: flattened_samples[p][ii] for p in params}
these_samples = self.model.sampling_transforms.apply(these_samples)
self.model.update(**these_samples)
logj[ii] = self.model.logjacobian
return logj.reshape(orig_shape)
def finalize(self):
"""Calculates the log evidence and writes to the checkpoint file.
If sampling transforms were used, this also corrects the jacobian
stored on disk.
The thin start/interval/end for calculating the log evidence are
retrieved from the checkpoint file's thinning attributes.
"""
if self.model.sampling_transforms is not None:
# fix the lobjacobian values stored on disk
logging.info("Correcting logjacobian values on disk")
with self.io(self.checkpoint_file, 'r') as fp:
samples = fp.read_raw_samples(self.variable_params,
thin_start=0,
thin_interval=1, thin_end=None,
temps='all', flatten=False)
logjacobian = self._correctjacobian(samples)
# write them back out
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
fp[fp.samples_group]['logjacobian'][()] = logjacobian
logging.info("Calculating log evidence")
# get the thinning settings
with self.io(self.checkpoint_file, 'r') as fp:
thin_start = fp.thin_start
thin_interval = fp.thin_interval
thin_end = fp.thin_end
# calculate
logz, dlogz = self.calculate_logevidence(
self.checkpoint_file, thin_start=thin_start, thin_end=thin_end,
thin_interval=thin_interval)
logging.info("log Z, dlog Z: {}, {}".format(logz, dlogz))
# write to both the checkpoint and backup
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
fp.write_logevidence(logz, dlogz)
| 27,259
| 41.527301
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/ultranest.py
|
# Copyright (C) 2020 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for using the ultranest sampler
packages for parameter estimation.
"""
import sys
import logging
import numpy
from pycbc.inference.io.ultranest import UltranestFile
from pycbc.io.hdf import dump_state
from pycbc.pool import use_mpi
from .base import (BaseSampler, setup_output)
from .base_cube import setup_calls
#
# =============================================================================
#
# Samplers
#
# =============================================================================
#
class UltranestSampler(BaseSampler):
"""This class is used to construct an Ultranest sampler from the ultranest
package.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
log_dir : str
Folder where files should be stored for resuming (optional).
stepsampling : bool
If false, uses rejection sampling. If true, uses
hit-and-run sampler, which scales better with dimensionality.
"""
name = "ultranest"
_io = UltranestFile
def __init__(self, model, log_dir=None,
stepsampling=False,
enable_plots=False,
**kwargs):
super(UltranestSampler, self).__init__(model)
import ultranest
log_likelihood_call, prior_call = setup_calls(model, copy_prior=True)
# Check for cyclic boundaries
periodic = []
cyclic = self.model.prior_distribution.cyclic
for param in self.variable_params:
if param in cyclic:
logging.info('Param: %s will be cyclic', param)
periodic.append(True)
else:
periodic.append(False)
self._sampler = ultranest.ReactiveNestedSampler(
list(self.model.variable_params),
log_likelihood_call,
prior_call, log_dir=log_dir,
wrapped_params=periodic,
resume=True)
if stepsampling:
import ultranest.stepsampler
self._sampler.stepsampler = ultranest.stepsampler.RegionBallSliceSampler(
nsteps=100, adaptive_nsteps='move-distance',
region_filter=True)
self.enable_plots = enable_plots
self.nlive = 0
self.ndim = len(self.model.variable_params)
self.result = None
self.kwargs = kwargs # Keywords for the run method of ultranest
do_mpi, _, rank = use_mpi()
self.main = (not do_mpi) or (rank == 0)
def run(self):
self.result = self._sampler.run(**self.kwargs)
if not self.main:
sys.exit(0)
self._sampler.print_results()
if self.enable_plots:
self._sampler.plot()
@property
def io(self):
return self._io
@property
def niterations(self):
return self.result['niter']
@classmethod
def from_config(cls, cp, model, output_file=None, **kwds):
"""
Loads the sampler from the given config file.
"""
skeys = {}
opts = {'update_interval_iter_fraction': float,
'update_interval_ncall': int,
'log_interval': int,
'show_status': bool,
'dlogz': float,
'dKL': float,
'frac_remain': float,
'Lepsilon': float,
'min_ess': int,
'max_iters': int,
'max_ncalls': int,
'log_dir': str,
'stepsampling': bool,
'enable_plots': bool,
'max_num_improvement_loops': int,
'min_num_live_points': int,
'cluster_num_live_points:': int}
for opt_name in opts:
if cp.has_option('sampler', opt_name):
value = cp.get('sampler', opt_name)
skeys[opt_name] = opts[opt_name](value)
inst = cls(model, **skeys)
do_mpi, _, rank = use_mpi()
if not do_mpi or (rank == 0):
setup_output(inst, output_file)
return inst
def checkpoint(self):
pass
def resume_from_checkpoint(self):
pass
def finalize(self):
logging.info("Writing samples to files")
for fn in [self.checkpoint_file, self.backup_file]:
self.write_results(fn)
@property
def model_stats(self):
return {}
@property
def samples(self):
from ultranest.utils import resample_equal
# we'll do the resampling ourselves so we can pick up
# additional parameters
try: # Remove me on next ultranest release
wsamples = self.result['weighted_samples']['v']
weights = self.result['weighted_samples']['w']
logl = self.result['weighted_samples']['L']
except KeyError:
wsamples = self.result['weighted_samples']['points']
weights = self.result['weighted_samples']['weights']
logl = self.result['weighted_samples']['logl']
wsamples = numpy.column_stack((wsamples, logl))
params = list(self.model.variable_params) + ['loglikelihood']
samples = resample_equal(wsamples, weights / weights.sum())
samples_dict = {p: samples[:, i] for i, p in enumerate(params)}
return samples_dict
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples, self.samples.keys())
# write log evidence
fp.write_logevidence(self.logz, self.logz_err)
# write full ultranest formatted results
dump_state(self.result, fp,
path='sampler_info',
dsetname='presult')
@property
def logz(self):
"""Return bayesian evidence estimated by ultranest sampler.
"""
return self.result['logz']
@property
def logz_err(self):
"""Return error in bayesian evidence estimated by ultranest sampler.
"""
return self.result['logzerr']
| 7,359
| 31.711111
| 85
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/base_mcmc.py
|
# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides constructor classes and convenience functions for MCMC samplers."""
import logging
from abc import (ABCMeta, abstractmethod, abstractproperty)
import configparser as ConfigParser
import numpy
from pycbc.filter import autocorrelation
from pycbc.inference.io import (validate_checkpoint_files, loadfile)
from pycbc.inference.io.base_mcmc import nsamples_in_chain
from .base import initial_dist_from_config
#
# =============================================================================
#
# Convenience functions
#
# =============================================================================
#
def raw_samples_to_dict(sampler, raw_samples):
"""Convenience function for converting ND array to a dict of samples.
The samples are assumed to have dimension
``[sampler.base_shape x] niterations x len(sampler.sampling_params)``.
Parameters
----------
sampler : sampler instance
An instance of an MCMC sampler.
raw_samples : array
The array of samples to convert.
Returns
-------
dict :
A dictionary mapping the raw samples to the variable params. If the
sampling params are not the same as the variable params, they will
also be included. Each array will have shape
``[sampler.base_shape x] niterations``.
"""
sampling_params = sampler.sampling_params
# convert to dictionary
samples = {param: raw_samples[..., ii] for
ii, param in enumerate(sampling_params)}
# apply boundary conditions
samples = sampler.model.prior_distribution.apply_boundary_conditions(
**samples)
# apply transforms to go to model's variable params space
if sampler.model.sampling_transforms is not None:
samples = sampler.model.sampling_transforms.apply(
samples, inverse=True)
return samples
def blob_data_to_dict(stat_names, blobs):
"""Converts list of "blobs" to a dictionary of model stats.
Samplers like ``emcee`` store the extra tuple returned by ``CallModel`` to
a list called blobs. This is a list of lists of tuples with shape
niterations x nwalkers x nstats, where nstats is the number of stats
returned by the model's ``default_stats``. This converts that list to a
dictionary of arrays keyed by the stat names.
Parameters
----------
stat_names : list of str
The list of the stat names.
blobs : list of list of tuples
The data to convert.
Returns
-------
dict :
A dictionary mapping the model's ``default_stats`` to arrays of values.
Each array will have shape ``nwalkers x niterations``.
"""
# get the dtypes of each of the stats; we'll just take this from the
# first iteration and walker
dtypes = [type(val) for val in blobs[0][0]]
assert len(stat_names) == len(dtypes), (
"number of stat names must match length of tuples in the blobs")
# convert to an array; to ensure that we get the dtypes correct, we'll
# cast to a structured array
raw_stats = numpy.array(blobs, dtype=list(zip(stat_names, dtypes)))
# transpose so that it has shape nwalkers x niterations
raw_stats = raw_stats.transpose()
# now return as a dictionary
return {stat: raw_stats[stat] for stat in stat_names}
def get_optional_arg_from_config(cp, section, arg, dtype=str):
"""Convenience function to retrieve an optional argument from a config
file.
Parameters
----------
cp : ConfigParser
Open config parser to retrieve the argument from.
section : str
Name of the section to retrieve from.
arg : str
Name of the argument to retrieve.
dtype : datatype, optional
Cast the retrieved value (if it exists) to the given datatype. Default
is ``str``.
Returns
-------
val : None or str
If the argument is present, the value. Otherwise, None.
"""
if cp.has_option(section, arg):
val = dtype(cp.get(section, arg))
else:
val = None
return val
#
# =============================================================================
#
# BaseMCMC definition
#
# =============================================================================
#
class BaseMCMC(object, metaclass=ABCMeta):
"""Abstract base class that provides methods common to MCMCs.
This is not a sampler class itself. Sampler classes can inherit from this
along with ``BaseSampler``.
This class provides ``set_initial_conditions``, ``run``, and ``checkpoint``
methods, which are some of the abstract methods required by
``BaseSampler``.
This class introduces the following abstract properties and methods:
* base_shape
[`property`] Should give the shape of the samples arrays used by the
sampler, excluding the iteraitons dimension. Needed for writing
results.
* run_mcmc(niterations)
Should run the sampler for the given number of iterations. Called by
``run``.
* clear_samples()
Should clear samples from memory. Called by ``run``.
* set_state_from_file(filename)
Should set the random state of the sampler using the given filename.
Called by ``set_initial_conditions``.
* write_results(filename)
Writes results to the given filename. Called by ``checkpoint``.
* compute_acf(filename, \**kwargs)
[`classmethod`] Should compute the autocorrelation function using
the given filename. Also allows for other keyword arguments.
* compute_acl(filename, \**kwargs)
[`classmethod`] Should compute the autocorrelation length using
the given filename. Also allows for other keyword arguments.
"""
_lastclear = None # the iteration when samples were cleared from memory
_itercounter = None # the number of iterations since the last clear
_pos = None
_p0 = None
_nchains = None
_burn_in = None
_acls = None
_checkpoint_interval = None
_checkpoint_signal = None
_target_niterations = None
_target_eff_nsamples = None
_thin_interval = 1
_max_samples_per_chain = None
@abstractproperty
def base_shape(self):
"""What shape the sampler's samples arrays are in, excluding
the iterations dimension.
For example, if a sampler uses 20 chains and 3 temperatures, this
would be ``(3, 20)``. If a sampler only uses a single walker and no
temperatures this would be ``()``.
"""
pass
@property
def nchains(self):
"""The number of chains used."""
if self._nchains is None:
raise ValueError("number of chains not set")
return self._nchains
@nchains.setter
def nchains(self, value):
"""Sets the number of chains."""
# we'll actually store it to the nchains attribute
self._nchains = int(value)
@property
def niterations(self):
"""The current number of iterations."""
itercounter = self._itercounter
if itercounter is None:
itercounter = 0
lastclear = self._lastclear
if lastclear is None:
lastclear = 0
return itercounter + lastclear
@property
def checkpoint_interval(self):
"""The number of iterations to do between checkpoints."""
return self._checkpoint_interval
@property
def checkpoint_signal(self):
"""The signal to use when checkpointing."""
return self._checkpoint_signal
@property
def target_niterations(self):
"""The number of iterations the sampler should run for."""
return self._target_niterations
@property
def target_eff_nsamples(self):
"""The target number of effective samples the sampler should get."""
return self._target_eff_nsamples
@property
def thin_interval(self):
"""Returns the thin interval being used."""
return self._thin_interval
@thin_interval.setter
def thin_interval(self, interval):
"""Sets the thin interval to use.
If ``None`` provided, will default to 1.
"""
if interval is None:
interval = 1
if interval < 1:
raise ValueError("thin interval must be >= 1")
self._thin_interval = interval
@property
def thin_safety_factor(self):
"""The minimum value that ``max_samples_per_chain`` may be set to."""
return 100
@property
def max_samples_per_chain(self):
"""The maximum number of samplers per chain that is written to disk."""
return self._max_samples_per_chain
@max_samples_per_chain.setter
def max_samples_per_chain(self, n):
if n is not None:
n = int(n)
if n < self.thin_safety_factor:
raise ValueError("max samples per chain must be >= {}"
.format(self.thin_safety_factor))
# also check that this is consistent with the target number of
# effective samples
if self.target_eff_nsamples is not None:
target_samps_per_chain = int(numpy.ceil(
self.target_eff_nsamples / self.nchains))
if n <= target_samps_per_chain:
raise ValueError("max samples per chain must be > target "
"effective number of samples per walker "
"({})".format(target_samps_per_chain))
self._max_samples_per_chain = n
def get_thin_interval(self):
"""Gets the thin interval to use.
If ``max_samples_per_chain`` is set, this will figure out what thin
interval is needed to satisfy that criteria. In that case, the thin
interval used must be a multiple of the currently used thin interval.
"""
if self.max_samples_per_chain is not None:
# the extra factor of 2 is to account for the fact that the thin
# interval will need to be at least twice as large as a previously
# used interval
thinfactor = 2*(self.niterations // self.max_samples_per_chain)
# make sure it's at least 1
thinfactor = max(thinfactor, 1)
# make the new interval is a multiple of the previous, to ensure
# that any samples currently on disk can be thinned accordingly
if thinfactor < self.thin_interval:
thin_interval = self.thin_interval
else:
thin_interval = (thinfactor // self.thin_interval) * \
self.thin_interval
else:
thin_interval = self.thin_interval
return thin_interval
def set_target(self, niterations=None, eff_nsamples=None):
"""Sets the target niterations/nsamples for the sampler.
One or the other must be provided, not both.
"""
if niterations is None and eff_nsamples is None:
raise ValueError("Must provide a target niterations or "
"eff_nsamples")
if niterations is not None and eff_nsamples is not None:
raise ValueError("Must provide a target niterations or "
"eff_nsamples, not both")
self._target_niterations = int(niterations) \
if niterations is not None else None
self._target_eff_nsamples = int(eff_nsamples) \
if eff_nsamples is not None else None
@abstractmethod
def clear_samples(self):
"""A method to clear samples from memory."""
pass
@property
def pos(self):
"""A dictionary of the current walker positions.
If the sampler hasn't been run yet, returns p0.
"""
pos = self._pos
if pos is None:
return self.p0
# convert to dict
pos = {param: self._pos[..., k]
for (k, param) in enumerate(self.sampling_params)}
return pos
@property
def p0(self):
"""A dictionary of the initial position of the chains.
This is set by using ``set_p0``. If not set yet, a ``ValueError`` is
raised when the attribute is accessed.
"""
if self._p0 is None:
raise ValueError("initial positions not set; run set_p0")
# convert to dict
p0 = {param: self._p0[..., k]
for (k, param) in enumerate(self.sampling_params)}
return p0
def set_p0(self, samples_file=None, prior=None):
"""Sets the initial position of the chains.
Parameters
----------
samples_file : InferenceFile, optional
If provided, use the last iteration in the given file for the
starting positions.
prior : JointDistribution, optional
Use the given prior to set the initial positions rather than
``model``'s prior.
Returns
-------
p0 : dict
A dictionary maping sampling params to the starting positions.
"""
# if samples are given then use those as initial positions
if samples_file is not None:
with self.io(samples_file, 'r') as fp:
samples = fp.read_samples(self.variable_params,
iteration=-1, flatten=False)
# remove the (length 1) niterations dimension
samples = samples[..., 0]
# make sure we have the same shape
assert samples.shape == self.base_shape, (
"samples in file {} have shape {}, but I have shape {}".
format(samples_file, samples.shape, self.base_shape))
# transform to sampling parameter space
if self.model.sampling_transforms is not None:
samples = self.model.sampling_transforms.apply(samples)
# draw random samples if samples are not provided
else:
nsamples = numpy.prod(self.base_shape)
samples = self.model.prior_rvs(size=nsamples, prior=prior).reshape(
self.base_shape)
# store as ND array with shape [base_shape] x nparams
ndim = len(self.variable_params)
p0 = numpy.ones(list(self.base_shape)+[ndim])
for i, param in enumerate(self.sampling_params):
p0[..., i] = samples[param]
self._p0 = p0
return self.p0
@abstractmethod
def set_state_from_file(self, filename):
"""Sets the state of the sampler to the instance saved in a file.
"""
pass
def set_start_from_config(self, cp):
"""Sets the initial state of the sampler from config file
"""
if cp.has_option('sampler', 'start-file'):
start_file = cp.get('sampler', 'start-file')
logging.info("Using file %s for initial positions", start_file)
init_prior = None
else:
start_file = None
init_prior = initial_dist_from_config(
cp, self.variable_params, self.static_params)
self.set_p0(samples_file=start_file, prior=init_prior)
def resume_from_checkpoint(self):
"""Resume the sampler from the checkpoint file
"""
with self.io(self.checkpoint_file, "r") as fp:
self._lastclear = fp.niterations
self.set_p0(samples_file=self.checkpoint_file)
self.set_state_from_file(self.checkpoint_file)
def run(self):
"""Runs the sampler."""
if self.target_eff_nsamples and self.checkpoint_interval is None:
raise ValueError("A checkpoint interval must be set if "
"targetting an effective number of samples")
# get the starting number of samples:
# "nsamples" keeps track of the number of samples we've obtained (if
# target_eff_nsamples is not None, this is the effective number of
# samples; otherwise, this is the total number of samples).
# contains (either due to sampler burn-in, or a previous checkpoint)
if self.new_checkpoint:
self._lastclear = 0
else:
with self.io(self.checkpoint_file, "r") as fp:
self._lastclear = fp.niterations
self.thin_interval = fp.thinned_by
if self.target_eff_nsamples is not None:
target_nsamples = self.target_eff_nsamples
with self.io(self.checkpoint_file, "r") as fp:
nsamples = fp.effective_nsamples
elif self.target_niterations is not None:
# the number of samples is the number of iterations times the
# number of chains
target_nsamples = self.nchains * self.target_niterations
nsamples = self._lastclear * self.nchains
else:
raise ValueError("must set either target_eff_nsamples or "
"target_niterations; see set_target")
self._itercounter = 0
# figure out the interval to use
iterinterval = self.checkpoint_interval
if iterinterval is None:
iterinterval = self.target_niterations
# run sampler until we have the desired number of samples
while nsamples < target_nsamples:
# adjust the interval if we would go past the number of iterations
if self.target_niterations is not None and (
self.niterations + iterinterval > self.target_niterations):
iterinterval = self.target_niterations - self.niterations
# run sampler and set initial values to None so that sampler
# picks up from where it left off next call
logging.info("Running sampler for {} to {} iterations".format(
self.niterations, self.niterations + iterinterval))
# run the underlying sampler for the desired interval
self.run_mcmc(iterinterval)
# update the itercounter
self._itercounter = self._itercounter + iterinterval
# dump the current results
self.checkpoint()
# update nsamples for next loop
if self.target_eff_nsamples is not None:
nsamples = self.effective_nsamples
logging.info("Have {} effective samples post burn in".format(
nsamples))
else:
nsamples += iterinterval * self.nchains
@property
def burn_in(self):
"""The class for doing burn-in tests (if specified)."""
return self._burn_in
def set_burn_in(self, burn_in):
"""Sets the object to use for doing burn-in tests."""
self._burn_in = burn_in
@abstractmethod
def effective_nsamples(self):
"""The effective number of samples post burn-in that the sampler has
acquired so far.
"""
pass
@abstractmethod
def run_mcmc(self, niterations):
"""Run the MCMC for the given number of iterations."""
pass
@abstractmethod
def write_results(self, filename):
"""Should write all samples currently in memory to the given file."""
pass
def checkpoint(self):
"""Dumps current samples to the checkpoint file."""
# thin and write new samples
# get the updated thin interval to use
thin_interval = self.get_thin_interval()
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
# write the current number of iterations
fp.write_niterations(self.niterations)
# thin samples on disk if it changed
if thin_interval > 1:
# if this is the first time writing, set the file's
# thinned_by
if fp.last_iteration() == 0:
fp.thinned_by = thin_interval
elif thin_interval < fp.thinned_by:
# whatever was done previously resulted in a larger
# thin interval, so we'll set it to the file's
thin_interval = fp.thinned_by
elif thin_interval > fp.thinned_by:
# we need to thin the samples on disk
logging.info("Thinning samples in %s by a factor "
"of %i", fn, int(thin_interval))
fp.thin(thin_interval)
fp_lastiter = fp.last_iteration()
logging.info("Writing samples to %s with thin interval %i", fn,
thin_interval)
self.write_results(fn)
# update the running thin interval
self.thin_interval = thin_interval
# see if we had anything to write after thinning; if not, don't try
# to compute anything
with self.io(self.checkpoint_file, "r") as fp:
nsamples_written = fp.last_iteration() - fp_lastiter
if nsamples_written == 0:
logging.info("No samples written due to thinning")
else:
# check for burn in, compute the acls
self.raw_acls = None
if self.burn_in is not None:
logging.info("Updating burn in")
self.burn_in.evaluate(self.checkpoint_file)
# write
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
self.burn_in.write(fp)
logging.info("Computing autocorrelation time")
self.raw_acls = self.compute_acl(self.checkpoint_file)
# write acts, effective number of samples
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
if self.raw_acls is not None:
fp.raw_acls = self.raw_acls
fp.acl = self.acl
# write effective number of samples
fp.write_effective_nsamples(self.effective_nsamples)
# write history
for fn in [self.checkpoint_file, self.backup_file]:
with self.io(fn, "a") as fp:
fp.update_checkpoint_history()
# check validity
logging.info("Validating checkpoint and backup files")
checkpoint_valid = validate_checkpoint_files(
self.checkpoint_file, self.backup_file)
if not checkpoint_valid:
raise IOError("error writing to checkpoint file")
elif self.checkpoint_signal:
# kill myself with the specified signal
logging.info("Exiting with SIG{}".format(self.checkpoint_signal))
kill_cmd="os.kill(os.getpid(), signal.SIG{})".format(
self.checkpoint_signal)
exec(kill_cmd)
# clear the in-memory chain to save memory
logging.info("Clearing samples from memory")
self.clear_samples()
@staticmethod
def checkpoint_from_config(cp, section):
"""Gets the checkpoint interval from the given config file.
This looks for 'checkpoint-interval' in the section.
Parameters
----------
cp : ConfigParser
Open config parser to retrieve the argument from.
section : str
Name of the section to retrieve from.
Return
------
int or None :
The checkpoint interval, if it is in the section. Otherw
"""
return get_optional_arg_from_config(cp, section, 'checkpoint-interval',
dtype=int)
@staticmethod
def ckpt_signal_from_config(cp, section):
"""Gets the checkpoint signal from the given config file.
This looks for 'checkpoint-signal' in the section.
Parameters
----------
cp : ConfigParser
Open config parser to retrieve the argument from.
section : str
Name of the section to retrieve from.
Return
------
int or None :
The checkpoint interval, if it is in the section. Otherw
"""
return get_optional_arg_from_config(cp, section, 'checkpoint-signal',
dtype=str)
def set_target_from_config(self, cp, section):
"""Sets the target using the given config file.
This looks for ``niterations`` to set the ``target_niterations``, and
``effective-nsamples`` to set the ``target_eff_nsamples``.
Parameters
----------
cp : ConfigParser
Open config parser to retrieve the argument from.
section : str
Name of the section to retrieve from.
"""
if cp.has_option(section, "niterations"):
niterations = int(cp.get(section, "niterations"))
else:
niterations = None
if cp.has_option(section, "effective-nsamples"):
nsamples = int(cp.get(section, "effective-nsamples"))
else:
nsamples = None
self.set_target(niterations=niterations, eff_nsamples=nsamples)
def set_burn_in_from_config(self, cp):
"""Sets the burn in class from the given config file.
If no burn-in section exists in the file, then this just set the
burn-in class to None.
"""
try:
bit = self.burn_in_class.from_config(cp, self)
except ConfigParser.Error:
bit = None
self.set_burn_in(bit)
def set_thin_interval_from_config(self, cp, section):
"""Sets thinning options from the given config file.
"""
if cp.has_option(section, "thin-interval"):
thin_interval = int(cp.get(section, "thin-interval"))
logging.info("Will thin samples using interval %i", thin_interval)
else:
thin_interval = None
if cp.has_option(section, "max-samples-per-chain"):
max_samps_per_chain = int(cp.get(section, "max-samples-per-chain"))
logging.info("Setting max samples per chain to %i",
max_samps_per_chain)
else:
max_samps_per_chain = None
# check for consistency
if thin_interval is not None and max_samps_per_chain is not None:
raise ValueError("provide either thin-interval or "
"max-samples-per-chain, not both")
# check that the thin interval is < then the checkpoint interval
if thin_interval is not None and self.checkpoint_interval is not None \
and thin_interval >= self.checkpoint_interval:
raise ValueError("thin interval must be less than the checkpoint "
"interval")
self.thin_interval = thin_interval
self.max_samples_per_chain = max_samps_per_chain
@property
def raw_acls(self):
"""Dictionary of parameter names -> autocorrelation lengths.
Depending on the sampler, the ACLs may be an integer, or an arrray of
values per chain and/or per temperature.
Returns ``None`` if no ACLs have been calculated.
"""
return self._acls
@raw_acls.setter
def raw_acls(self, acls):
"""Sets the raw acls."""
self._acls = acls
@abstractmethod
def acl(self):
"""The autocorrelation length.
This method should convert the raw ACLs into an integer or array that
can be used to extract independent samples from a chain.
"""
pass
@property
def raw_acts(self):
"""Dictionary of parameter names -> autocorrelation time(s).
Returns ``None`` if no ACLs have been calculated.
"""
acls = self.raw_acls
if acls is None:
return None
return {p: acl * self.thin_interval
for (p, acl) in acls.items()}
@property
def act(self):
"""The autocorrelation time(s).
The autocorrelation time is defined as the autocorrelation length times
the ``thin_interval``. It gives the number of iterations between
independent samples. Depending on the sampler, this may either be
a single integer or an array of values.
Returns ``None`` if no ACLs have been calculated.
"""
acl = self.acl
if acl is None:
return None
return acl * self.thin_interval
@abstractmethod
def compute_acf(cls, filename, **kwargs):
"""A method to compute the autocorrelation function of samples in the
given file."""
pass
@abstractmethod
def compute_acl(cls, filename, **kwargs):
"""A method to compute the autocorrelation length of samples in the
given file."""
pass
class EnsembleSupport(object):
"""Adds support for ensemble MCMC samplers."""
@property
def nwalkers(self):
"""The number of walkers used.
Alias of ``nchains``.
"""
return self.nchains
@nwalkers.setter
def nwalkers(self, value):
"""Sets the number of walkers."""
# we'll actually store it to the nchains attribute
self.nchains = value
@property
def acl(self):
"""The autocorrelation length of the ensemble.
This is calculated by taking the maximum over all of the ``raw_acls``.
This works for both single and parallel-tempered ensemble samplers.
Returns ``None`` if no ACLs have been set.
"""
acls = self.raw_acls
if acls is None:
return None
return numpy.array(list(acls.values())).max()
@property
def effective_nsamples(self):
"""The effective number of samples post burn-in that the sampler has
acquired so far.
"""
if self.burn_in is not None and not self.burn_in.is_burned_in:
# not burned in, so there's no effective samples
return 0
act = self.act
if act is None:
act = numpy.inf
if self.burn_in is None:
start_iter = 0
else:
start_iter = self.burn_in.burn_in_iteration
nperwalker = nsamples_in_chain(start_iter, act, self.niterations)
if self.burn_in is not None:
# after burn in, we always have atleast 1 sample per walker
nperwalker = max(nperwalker, 1)
return int(self.nwalkers * nperwalker)
#
# =============================================================================
#
# Functions for computing autocorrelation lengths
#
# =============================================================================
#
def ensemble_compute_acf(filename, start_index=None, end_index=None,
per_walker=False, walkers=None, parameters=None):
"""Computes the autocorrleation function for an ensemble MCMC.
By default, parameter values are averaged over all walkers at each
iteration. The ACF is then calculated over the averaged chain. An
ACF per-walker will be returned instead if ``per_walker=True``.
Parameters
-----------
filename : str
Name of a samples file to compute ACFs for.
start_index : int, optional
The start index to compute the acl from. If None (the default), will
try to use the number of burn-in iterations in the file; otherwise,
will start at the first sample.
end_index : int, optional
The end index to compute the acl to. If None (the default), will go to
the end of the current iteration.
per_walker : bool, optional
Return the ACF for each walker separately. Default is False.
walkers : int or array, optional
Calculate the ACF using only the given walkers. If None (the
default) all walkers will be used.
parameters : str or array, optional
Calculate the ACF for only the given parameters. If None (the
default) will calculate the ACF for all of the model params.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker`` is True, the arrays will have shape
``nwalkers x niterations``.
"""
acfs = {}
with loadfile(filename, 'r') as fp:
if parameters is None:
parameters = fp.variable_params
if isinstance(parameters, str):
parameters = [parameters]
for param in parameters:
if per_walker:
# just call myself with a single walker
if walkers is None:
walkers = numpy.arange(fp.nwalkers)
arrays = [
ensemble_compute_acf(filename, start_index=start_index,
end_index=end_index,
per_walker=False, walkers=ii,
parameters=param)[param]
for ii in walkers]
acfs[param] = numpy.vstack(arrays)
else:
samples = fp.read_raw_samples(
param, thin_start=start_index, thin_interval=1,
thin_end=end_index, walkers=walkers,
flatten=False)[param]
samples = samples.mean(axis=0)
acfs[param] = autocorrelation.calculate_acf(
samples).numpy()
return acfs
def ensemble_compute_acl(filename, start_index=None, end_index=None,
min_nsamples=10):
"""Computes the autocorrleation length for an ensemble MCMC.
Parameter values are averaged over all walkers at each iteration.
The ACL is then calculated over the averaged chain. If an ACL cannot
be calculated because there are not enough samples, it will be set
to ``inf``.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
start_index : int, optional
The start index to compute the acl from. If None, will try to use
the number of burn-in iterations in the file; otherwise, will start
at the first sample.
end_index : int, optional
The end index to compute the acl to. If None, will go to the end
of the current iteration.
min_nsamples : int, optional
Require a minimum number of samples to compute an ACL. If the
number of samples per walker is less than this, will just set to
``inf``. Default is 10.
Returns
-------
dict
A dictionary giving the ACL for each parameter.
"""
acls = {}
with loadfile(filename, 'r') as fp:
for param in fp.variable_params:
samples = fp.read_raw_samples(
param, thin_start=start_index, thin_interval=1,
thin_end=end_index, flatten=False)[param]
samples = samples.mean(axis=0)
# if < min number of samples, just set to inf
if samples.size < min_nsamples:
acl = numpy.inf
else:
acl = autocorrelation.calculate_acl(samples)
if acl <= 0:
acl = numpy.inf
acls[param] = acl
maxacl = numpy.array(list(acls.values())).max()
logging.info("ACT: %s", str(maxacl*fp.thinned_by))
return acls
| 36,376
| 37.331928
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/emcee.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for using the emcee sampler
packages for parameter estimation.
"""
import numpy
import emcee
from pycbc.pool import choose_pool
from .base import (BaseSampler, setup_output)
from .base_mcmc import (BaseMCMC, EnsembleSupport,
ensemble_compute_acf, ensemble_compute_acl,
raw_samples_to_dict,
blob_data_to_dict, get_optional_arg_from_config)
from ..burn_in import EnsembleMCMCBurnInTests
from pycbc.inference.io import EmceeFile
from .. import models
#
# =============================================================================
#
# Samplers
#
# =============================================================================
#
if emcee.__version__ >= '3.0.0':
raise ImportError
class EmceeEnsembleSampler(EnsembleSupport, BaseMCMC, BaseSampler):
"""This class is used to construct an MCMC sampler from the emcee
package's EnsembleSampler.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nwalkers : int
Number of walkers to use in sampler.
pool : function with map, Optional
A provider of a map function that allows a function call to be run
over multiple sets of arguments and possibly maps them to
cores/nodes/etc.
"""
name = "emcee"
_io = EmceeFile
burn_in_class = EnsembleMCMCBurnInTests
def __init__(self, model, nwalkers,
checkpoint_interval=None, checkpoint_signal=None,
logpost_function=None, nprocesses=1, use_mpi=False):
self.model = model
# create a wrapper for calling the model
if logpost_function is None:
logpost_function = 'logposterior'
model_call = models.CallModel(model, logpost_function)
# these are used to help paralleize over multiple cores / MPI
models._global_instance = model_call
model_call = models._call_global_model
pool = choose_pool(mpi=use_mpi, processes=nprocesses)
# set up emcee
self.nwalkers = nwalkers
ndim = len(model.variable_params)
self._sampler = emcee.EnsembleSampler(nwalkers, ndim, model_call,
pool=pool)
# emcee uses it's own internal random number generator; we'll set it
# to have the same state as the numpy generator
rstate = numpy.random.get_state()
self._sampler.random_state = rstate
self._checkpoint_interval = checkpoint_interval
self._checkpoint_signal = checkpoint_signal
@property
def io(self):
return self._io
@property
def base_shape(self):
return (self.nwalkers,)
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
The arrays have shape ``nwalkers x niterations``.
"""
# emcee stores samples to it's chain attribute as a
# nwalker x niterations x ndim array
raw_samples = self._sampler.chain
return raw_samples_to_dict(self, raw_samples)
@property
def model_stats(self):
"""A dict mapping the model's ``default_stats`` to arrays of values.
The returned array has shape ``nwalkers x niterations``.
"""
stats = self.model.default_stats
return blob_data_to_dict(stats, self._sampler.blobs)
def clear_samples(self):
"""Clears the samples and stats from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# now clear the chain
self._sampler.reset()
self._sampler.clear_blobs()
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as fp:
rstate = fp.read_random_state()
# set the numpy random state
numpy.random.set_state(rstate)
# set emcee's generator to the same state
self._sampler.random_state = rstate
def run_mcmc(self, niterations):
"""Advance the ensemble for a number of samples.
Parameters
----------
niterations : int
Number of iterations to run the sampler for.
"""
pos = self._pos
if pos is None:
pos = self._p0
res = self._sampler.run_mcmc(pos, niterations)
p, _, _ = res[0], res[1], res[2]
# update the positions
self._pos = p
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples,
parameters=self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats,
last_iteration=self.niterations)
# write accpetance
fp.write_acceptance_fraction(self._sampler.acceptance_fraction)
# write random state
fp.write_random_state(state=self._sampler.random_state)
def finalize(self):
"""All data is written by the last checkpoint in the run method, so
this just passes."""
pass
@staticmethod
def compute_acf(filename, **kwargs):
r"""Computes the autocorrelation function.
Calls :py:func:`base_mcmc.ensemble_compute_acf`; see that
function for details.
Parameters
----------
filename : str
Name of a samples file to compute ACFs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_mcmc.ensemble_compute_acf`.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker`` is True, the arrays will have shape
``nwalkers x niterations``.
"""
return ensemble_compute_acf(filename, **kwargs)
@staticmethod
def compute_acl(filename, **kwargs):
r"""Computes the autocorrelation length.
Calls :py:func:`base_mcmc.ensemble_compute_acl`; see that
function for details.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_mcmc.ensemble_compute_acf`.
Returns
-------
dict
A dictionary giving the ACL for each parameter.
"""
return ensemble_compute_acl(filename, **kwargs)
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file."""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of walkers to use
nwalkers = int(cp.get(section, "nwalkers"))
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
# get the logpost function
lnpost = get_optional_arg_from_config(cp, section, 'logpost-function')
obj = cls(model, nwalkers,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
logpost_function=lnpost, nprocesses=nprocesses,
use_mpi=use_mpi)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
# Set up the output file
setup_output(obj, output_file)
if not obj.new_checkpoint:
obj.resume_from_checkpoint()
else:
obj.set_start_from_config(cp)
return obj
| 9,506
| 33.69708
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/__init__.py
|
# Copyright (C) 2016 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides a list of implemented samplers for parameter estimation.
"""
import logging
# pylint: disable=unused-import
from .base import (initial_dist_from_config, create_new_output_file)
from .multinest import MultinestSampler
from .ultranest import UltranestSampler
from .dummy import DummySampler
# list of available samplers
samplers = {cls.name: cls for cls in (
MultinestSampler,
UltranestSampler,
DummySampler,
)}
try:
from .emcee import EmceeEnsembleSampler
from .emcee_pt import EmceePTSampler
samplers[EmceeEnsembleSampler.name] = EmceeEnsembleSampler
samplers[EmceePTSampler.name] = EmceePTSampler
except ImportError:
pass
try:
from .epsie import EpsieSampler
samplers[EpsieSampler.name] = EpsieSampler
except ImportError:
pass
try:
from .ptemcee import PTEmceeSampler
samplers[PTEmceeSampler.name] = PTEmceeSampler
except ImportError:
pass
try:
from .cpnest import CPNestSampler
samplers[CPNestSampler.name] = CPNestSampler
except ImportError:
pass
try:
from .dynesty import DynestySampler
samplers[DynestySampler.name] = DynestySampler
except ImportError:
pass
def load_from_config(cp, model, **kwargs):
"""Loads a sampler from the given config file.
This looks for a name in the section ``[sampler]`` to determine which
sampler class to load. That sampler's ``from_config`` is then called.
Parameters
----------
cp : WorkflowConfigParser
Config parser to read from.
model : pycbc.inference.model
Which model to pass to the sampler.
\**kwargs :
All other keyword arguments are passed directly to the sampler's
``from_config`` file.
Returns
-------
sampler :
The initialized sampler.
"""
if len(model.variable_params) == 0:
logging.info('No variable params, so assuming Dummy Sampler')
return DummySampler.from_config(cp, model, **kwargs)
name = cp.get('sampler', 'name')
return samplers[name].from_config(cp, model, **kwargs)
| 2,813
| 28.93617
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/multinest.py
|
# Copyright (C) 2018 Daniel Finstad
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for using the Multinest sampler
packages for parameter estimation.
"""
import logging
import sys
import numpy
from pycbc.inference.io import (MultinestFile, validate_checkpoint_files)
from pycbc.distributions import read_constraints_from_config
from pycbc.pool import is_main_process
from pycbc.transforms import apply_transforms
from .base import (BaseSampler, setup_output)
from .base_mcmc import get_optional_arg_from_config
#
# =============================================================================
#
# Samplers
#
# =============================================================================
#
class MultinestSampler(BaseSampler):
"""This class is used to construct a nested sampler from
the Multinest package.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nlivepoints : int
Number of live points to use in sampler.
"""
name = "multinest"
_io = MultinestFile
def __init__(self, model, nlivepoints, checkpoint_interval=1000,
importance_nested_sampling=False,
evidence_tolerance=0.1, sampling_efficiency=0.01,
constraints=None):
try:
loglevel = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARNING)
from pymultinest import Analyzer, run
self.run_multinest = run
self.analyzer = Analyzer
logging.getLogger().setLevel(loglevel)
except ImportError:
raise ImportError("pymultinest is not installed.")
super(MultinestSampler, self).__init__(model)
self._constraints = constraints
self._nlivepoints = nlivepoints
self._ndim = len(model.variable_params)
self._random_state = numpy.random.get_state()
self._checkpoint_interval = checkpoint_interval
self._ztol = evidence_tolerance
self._eff = sampling_efficiency
self._ins = importance_nested_sampling
self._samples = None
self._itercount = None
self._logz = None
self._dlogz = None
self._importance_logz = None
self._importance_dlogz = None
self.is_main_process = is_main_process()
@property
def io(self):
return self._io
@property
def niterations(self):
"""Get the current number of iterations.
"""
itercount = self._itercount
if itercount is None:
itercount = 0
return itercount
@property
def checkpoint_interval(self):
"""Get the number of iterations between checkpoints.
"""
return self._checkpoint_interval
@property
def nlivepoints(self):
"""Get the number of live points used in sampling.
"""
return self._nlivepoints
@property
def logz(self):
"""Get the current estimate of the log evidence.
"""
return self._logz
@property
def dlogz(self):
"""Get the current error estimate of the log evidence.
"""
return self._dlogz
@property
def importance_logz(self):
"""Get the current importance weighted estimate of the log
evidence.
"""
return self._importance_logz
@property
def importance_dlogz(self):
"""Get the current error estimate of the importance
weighted log evidence.
"""
return self._importance_dlogz
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
"""
samples_dict = {p: self._samples[:, i] for i, p in
enumerate(self.model.variable_params)}
return samples_dict
@property
def model_stats(self):
"""A dict mapping the model's ``default_stats`` to arrays of values.
"""
stats = []
for sample in self._samples:
params = dict(zip(self.model.variable_params, sample))
if self.model.sampling_transforms is not None:
params = self.model.sampling_transforms.apply(params)
self.model.update(**params)
self.model.logposterior
stats.append(self.model.get_current_stats())
stats = numpy.array(stats)
return {s: stats[:, i] for i, s in enumerate(self.model.default_stats)}
def get_posterior_samples(self):
"""Read posterior samples from ASCII output file created by
multinest.
"""
post_file = self.backup_file[:-9]+'-post_equal_weights.dat'
return numpy.loadtxt(post_file, ndmin=2)
def check_if_finished(self):
"""Estimate remaining evidence to see if desired evidence-tolerance
stopping criterion has been reached.
"""
resume_file = self.backup_file[:-9] + '-resume.dat'
current_vol, _, _ = numpy.loadtxt(
resume_file, skiprows=6, unpack=True)
maxloglike = max(self.get_posterior_samples()[:, -1])
logz_remain = numpy.exp(maxloglike +
numpy.log(current_vol) - self.logz)
logging.info("Estimate of remaining logZ is %s", logz_remain)
done = logz_remain < self._ztol
return done
def set_initial_conditions(self, initial_distribution=None,
samples_file=None):
"""Sets the initial starting point for the sampler.
If a starting samples file is provided, will also load the random
state from it.
"""
# use samples file to set the state of the sampler
if samples_file is not None:
self.set_state_from_file(samples_file)
def resume_from_checkpoint(self):
"""Resume sampler from checkpoint
"""
pass
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as f_p:
rstate = f_p.read_random_state()
# set the numpy random state
numpy.random.set_state(rstate)
# set sampler's generator to the same state
self._random_state = rstate
def loglikelihood(self, cube, *extra_args):
"""Log likelihood evaluator that gets passed to multinest.
"""
params = {p: v for p, v in zip(self.model.variable_params, cube)}
# apply transforms
if self.model.sampling_transforms is not None:
params = self.model.sampling_transforms.apply(params)
if self.model.waveform_transforms is not None:
params = apply_transforms(params, self.model.waveform_transforms)
# apply constraints
if (self._constraints is not None and
not all([c(params) for c in self._constraints])):
return -numpy.inf
self.model.update(**params)
return self.model.loglikelihood
def transform_prior(self, cube, *extra_args):
"""Transforms the unit hypercube that multinest makes its draws
from, into the prior space defined in the config file.
"""
dict_cube = dict(zip(self.model.variable_params, cube))
inv = self.model.prior_distribution.cdfinv(**dict_cube)
for i, param in enumerate(self.model.variable_params):
cube[i] = inv[param]
return cube
def run(self):
"""Runs the sampler until the specified evidence tolerance
is reached.
"""
if self.new_checkpoint:
self._itercount = 0
else:
self.set_initial_conditions(samples_file=self.checkpoint_file)
with self.io(self.checkpoint_file, "r") as f_p:
self._itercount = f_p.niterations
outputfiles_basename = self.backup_file[:-9] + '-'
analyzer = self.analyzer(self._ndim,
outputfiles_basename=outputfiles_basename)
iterinterval = self.checkpoint_interval
done = False
while not done:
logging.info("Running sampler for %s to %s iterations",
self.niterations, self.niterations + iterinterval)
# run multinest
self.run_multinest(self.loglikelihood, self.transform_prior,
self._ndim, n_live_points=self.nlivepoints,
evidence_tolerance=self._ztol,
sampling_efficiency=self._eff,
importance_nested_sampling=self._ins,
max_iter=iterinterval,
n_iter_before_update=iterinterval,
seed=numpy.random.randint(0, 1e6),
outputfiles_basename=outputfiles_basename,
multimodal=False, verbose=True)
# parse results from multinest output files
nest_stats = analyzer.get_mode_stats()
self._logz = nest_stats["nested sampling global log-evidence"]
self._dlogz = nest_stats[
"nested sampling global log-evidence error"]
if self._ins:
self._importance_logz = nest_stats[
"nested importance sampling global log-evidence"]
self._importance_dlogz = nest_stats[
"nested importance sampling global log-evidence error"]
self._samples = self.get_posterior_samples()[:, :-1]
logging.info("Have %s posterior samples", self._samples.shape[0])
# update the itercounter
self._itercount += iterinterval
# make sure there's at least 1 posterior sample
if self._samples.shape[0] == 0:
continue
# dump the current results
if self.is_main_process:
self.checkpoint()
# check if we're finished
done = self.check_if_finished()
if not self.is_main_process:
sys.exit()
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as f_p:
# write samples
f_p.write_samples(self.samples, self.model.variable_params)
# write stats
f_p.write_samples(self.model_stats)
# write evidence
f_p.write_logevidence(self.logz, self.dlogz,
self.importance_logz,
self.importance_dlogz)
# write random state (use default numpy.random_state)
f_p.write_random_state()
def checkpoint(self):
"""Dumps current samples to the checkpoint file."""
logging.info("Writing samples to files")
for f_n in [self.checkpoint_file, self.backup_file]:
self.write_results(f_n)
with self.io(f_n, "a") as f_p:
f_p.write_niterations(self.niterations)
logging.info("Validating checkpoint and backup files")
checkpoint_valid = validate_checkpoint_files(
self.checkpoint_file, self.backup_file, check_nsamples=False)
if not checkpoint_valid:
raise IOError("error writing to checkpoint file")
def setup_output(self, output_file):
"""Sets up the sampler's checkpoint and output files.
The checkpoint file has the same name as the output file, but with
``.checkpoint`` appended to the name. A backup file will also be
created.
Parameters
----------
sampler : sampler instance
Sampler
output_file : str
Name of the output file.
"""
if self.is_main_process:
setup_output(self, output_file)
else:
# child processes just store filenames
checkpoint_file = output_file + '.checkpoint'
backup_file = output_file + '.bkup'
self.checkpoint_file = checkpoint_file
self.backup_file = backup_file
self.checkpoint_valid = True
self.new_checkpoint = True
def finalize(self):
"""All data is written by the last checkpoint in the run method, so
this just passes."""
pass
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file."""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of live points to use
nlivepoints = int(cp.get(section, "nlivepoints"))
# get the checkpoint interval, if it's specified
checkpoint = get_optional_arg_from_config(
cp, section, 'checkpoint-interval', dtype=int)
# get the evidence tolerance, if specified
ztol = get_optional_arg_from_config(cp, section, 'evidence-tolerance',
dtype=float)
# get the sampling efficiency, if specified
eff = get_optional_arg_from_config(cp, section, 'sampling-efficiency',
dtype=float)
# get importance nested sampling setting, if specified
ins = get_optional_arg_from_config(cp, section,
'importance-nested-sampling',
dtype=bool)
# get constraints since we can't use the joint prior distribution
constraints = read_constraints_from_config(cp)
# build optional kwarg dict
kwarg_names = ['evidence_tolerance', 'sampling_efficiency',
'importance_nested_sampling',
'checkpoint_interval']
optional_kwargs = {k: v for k, v in
zip(kwarg_names, [ztol, eff, ins, checkpoint]) if
v is not None}
obj = cls(model, nlivepoints, constraints=constraints,
**optional_kwargs)
obj.setup_output(output_file)
return obj
| 15,427
| 37.666667
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/sampler/dummy.py
|
""" Dummy class when no actual sampling is needed, but we may want to do
some reconstruction supported by the likelihood model.
"""
import numpy
from pycbc.inference.io import PosteriorFile
from pycbc.inference import models
from pycbc.pool import choose_pool
from .base import (BaseSampler, setup_output)
def call_reconstruct(iteration):
""" Accessor to update the global model and call its reconstruction
routine.
"""
models._global_instance.update()
return models._global_instance.reconstruct(seed=iteration)
class DummySampler(BaseSampler):
"""Dummy sampler for not doing sampling
Parameters
----------
model : Model
An instance of a model from ``pycbc.inference.models``.
"""
name = 'dummy'
def __init__(self, model, *args, nprocesses=1, use_mpi=False,
num_samples=1000, **kwargs):
super().__init__(model, *args)
models._global_instance = model
self.num_samples = int(num_samples)
self.pool = choose_pool(mpi=use_mpi, processes=nprocesses)
self._samples = {}
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""This should initialize the sampler given a config file.
"""
kwargs = {k: cp.get('sampler', k) for k in cp.options('sampler')}
obj = cls(model, nprocesses=nprocesses, use_mpi=use_mpi, **kwargs)
setup_output(obj, output_file, check_nsamples=False, validate=False)
return obj
@property
def samples(self):
"""A dict mapping variable_params to arrays of samples currently
in memory. The dictionary may also contain sampling_params.
The sample arrays may have any shape, and may or may not be thinned.
"""
return self._samples
@property
def model_stats(self):
pass
def run(self):
samples = self.pool.map(call_reconstruct,
range(self.num_samples))
self._samples = {k: numpy.array([x[k] for x in samples])
for k in samples[0]}
def finalize(self):
with self.io(self.checkpoint_file, "a") as fp:
fp.write_samples(samples=self._samples)
checkpoint = resume_from_checkpoint = run
@property
def io(self):
"""A class that inherits from ``BaseInferenceFile`` to handle IO with
an hdf file.
This should be a class, not an instance of class, so that the sampler
can initialize it when needed.
"""
return PosteriorFile
| 2,594
| 29.529412
| 77
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/epsie.py
|
# Copyright (C) 2019 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides IO classes for epsie samplers.
"""
import numpy
from pickle import UnpicklingError
from epsie import load_state
from .base_sampler import BaseSamplerFile
from .base_mcmc import MCMCMetadataIO
from .base_multitemper import (CommonMultiTemperedMetadataIO,
write_samples,
read_raw_samples)
class EpsieFile(MCMCMetadataIO, CommonMultiTemperedMetadataIO,
BaseSamplerFile):
"""Class to handle IO for Epsie's parallel-tempered sampler."""
name = 'epsie_file'
@property
def nchains(self):
"""Alias for nwalkers."""
return self.nwalkers
@property
def betas(self):
"""The betas that were used."""
return self[self.sampler_group]['betas'][()]
@property
def swap_interval(self):
"""The interval that temperature swaps occurred at."""
return self[self.sampler_group].attrs['swap_interval']
@swap_interval.setter
def swap_interval(self, swap_interval):
"""Stores the swap interval to the sampler group's attrs."""
self[self.sampler_group].attrs['swap_interval'] = swap_interval
@property
def seed(self):
"""The sampler's seed."""
# convert seed from str back to int (see setter below for reason)
return int(self[self.sampler_group].attrs['seed'])
@seed.setter
def seed(self, seed):
"""Store the sampler's seed."""
# epsie uses the numpy's new random generators, which use long integers
# for seeds. hdf5 doesn't know how to handle long integers, so we'll
# store it as a string
self[self.sampler_group].attrs['seed'] = str(seed)
def write_sampler_metadata(self, sampler):
"""Adds writing seed and betas to MultiTemperedMCMCIO.
"""
super(EpsieFile, self).write_sampler_metadata(sampler)
self.seed = sampler.seed
self.write_data("betas", sampler.betas, path=self.sampler_group)
def thin(self, thin_interval):
"""Thins the samples on disk to the given thinning interval.
Also thins the acceptance ratio and the temperature data, both of
which are stored in the ``sampler_info`` group.
"""
# We'll need to know what the new interval to thin by will be
# so we can properly thin the acceptance ratio and temperatures swaps.
# We need to do this before calling the base thin, as we need to know
# what the current thinned by is.
new_interval = thin_interval // self.thinned_by
# now thin the samples
super(EpsieFile, self).thin(thin_interval)
# thin the acceptance ratio
self._thin_data(self.sampler_group, ['acceptance_ratio'],
new_interval)
# thin the temperature swaps; since these may not happen every
# iteration, the thin interval we use for these is different
ts_group = '/'.join([self.sampler_group, 'temperature_swaps'])
ts_thin_interval = new_interval // self.swap_interval
if ts_thin_interval > 1:
self._thin_data(ts_group, ['swap_index'],
ts_thin_interval)
self._thin_data(ts_group, ['acceptance_ratio'],
ts_thin_interval)
def write_samples(self, samples, **kwargs):
r"""Writes samples to the given file.
Calls :py:func:`base_multitemper.write_samples`. See that function for
details.
Parameters
----------
samples : dict
The samples to write. Each array in the dictionary should have
shape ntemps x nwalkers x niterations.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.write_samples`.
"""
write_samples(self, samples, **kwargs)
def read_raw_samples(self, fields, **kwargs):
r"""Base function for reading samples.
Calls :py:func:`base_multitemper.read_raw_samples`. See that
function for details.
Parameters
-----------
fields : list
The list of field names to retrieve.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.read_raw_samples`.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
return read_raw_samples(self, fields, **kwargs)
def write_acceptance_ratio(self, acceptance_ratio, last_iteration=None):
"""Writes the acceptance ratios to the sampler info group.
Parameters
----------
acceptance_ratio : array
The acceptance ratios to write. Should have shape
``ntemps x nchains x niterations``.
"""
# we'll use the write_samples machinery to write the acceptance ratios
self.write_samples({'acceptance_ratio': acceptance_ratio},
last_iteration=last_iteration,
samples_group=self.sampler_group)
def read_acceptance_ratio(self, temps=None, chains=None):
"""Reads the acceptance ratios.
Ratios larger than 1 are set back to 1 before returning.
Parameters
-----------
temps : (list of) int, optional
The temperature index (or a list of indices) to retrieve. If None,
acceptance ratios from all temperatures and all chains will be
retrieved.
chains : (list of) int, optional
The chain index (or a list of indices) to retrieve. If None,
ratios from all chains will be obtained.
Returns
-------
array
Array of acceptance ratios with shape (requested temps,
requested chains, niterations).
"""
group = self.sampler_group + '/acceptance_ratio'
if chains is None:
wmask = numpy.ones(self.nchains, dtype=bool)
else:
wmask = numpy.zeros(self.nchains, dtype=bool)
wmask[chains] = True
if temps is None:
tmask = numpy.ones(self.ntemps, dtype=bool)
else:
tmask = numpy.zeros(self.ntemps, dtype=bool)
tmask[temps] = True
all_ratios = self[group][:]
# make sure values > 1 are set back to 1
all_ratios[all_ratios > 1] = 1.
return all_ratios[numpy.ix_(tmask, wmask)]
def read_acceptance_rate(self, temps=None, chains=None):
"""Reads the acceptance rate.
This calls :py:func:`read_acceptance_ratio`, then averages the ratios
over all iterations to get the average rate.
Parameters
-----------
temps : (list of) int, optional
The temperature index (or a list of indices) to retrieve. If None,
acceptance rates from all temperatures and all chains will be
retrieved.
chains : (list of) int, optional
The chain index (or a list of indices) to retrieve. If None,
rates from all chains will be obtained.
Returns
-------
array
Array of acceptance ratios with shape (requested temps,
requested chains).
"""
all_ratios = self.read_acceptance_ratio(temps, chains)
# average over the number of iterations
all_ratios = all_ratios.mean(axis=-1)
return all_ratios
def read_acceptance_fraction(self, temps=None, walkers=None):
"""Alias for :py:func:`read_acceptance_rate`.
"""
return self.read_acceptance_rate(temps=temps, chains=walkers)
def write_temperature_data(self, swap_index, acceptance_ratio,
swap_interval, last_iteration):
"""Writes temperature swaps and acceptance ratios.
Parameters
----------
swap_index : array
The indices indicating which temperatures were swapped. Should have
shape ``ntemps x nchains x (niterations/swap_interval)``.
acceptance_ratio : array
The array of acceptance ratios between temperatures. Should
have shape ``(ntemps-1) x nchains x (niterations/swap_interval)``.
arrays.
swap_interval : int
The number of iterations between temperature swaps.
last_iteration : int
The iteration of the last sample.
"""
self.swap_interval = swap_interval
group = '/'.join([self.sampler_group, 'temperature_swaps'])
# we'll use the write_samples machinery to write the acceptance ratios;
# if temperature swaps didn't happen every iteration, then a smaller
# thinning interval than what is used for the samples should be used
thin_by = self.thinned_by // swap_interval
# we'll also tell the write samples that the last "iteration" is the
# last iteration / the swap interval, to get the spacing correct
last_iteration = last_iteration // swap_interval
# we need to write the two arrays separately, since they have different
# dimensions in temperature
self.write_samples({'swap_index': swap_index},
last_iteration=last_iteration,
samples_group=group, thin_by=thin_by)
self.write_samples({'acceptance_ratio': acceptance_ratio},
last_iteration=last_iteration,
samples_group=group, thin_by=thin_by)
def validate(self):
"""Adds attemp to load checkpoint to validation test."""
valid = super(EpsieFile, self).validate()
# try to load the checkpoint
if valid:
try:
load_state(self, self.sampler_group)
except (KeyError, UnpicklingError):
# will get this if the state wasn't written, or it was
# corrupted for some reason
valid = False
return valid
@staticmethod
def _get_optional_args(args, opts, err_on_missing=False, **kwargs):
# need this to make sure options called "walkers" are renamed to
# "chains"
parsed = BaseSamplerFile._get_optional_args(
args, opts, err_on_missing=err_on_missing, **kwargs)
try:
chains = parsed.pop('walkers')
parsed['chains'] = chains
except KeyError:
pass
return parsed
| 11,226
| 38.255245
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/cpnest.py
|
# Copyright (C) 2019 Sumit Kumar and Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides IO for the emcee sampler.
"""
from .base_nested_sampler import BaseNestedSamplerFile
class CPNestFile(BaseNestedSamplerFile):
"""Class to handle file IO for the ``cpnest`` sampler."""
name = 'cpnest_file'
| 1,205
| 35.545455
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/base_multitemper.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides I/O support for multi-tempered sampler.
"""
import argparse
import numpy
from .base_mcmc import (CommonMCMCMetadataIO, thin_samples_for_writing,
_ensemble_get_index, _ensemble_get_walker_index,
_get_index)
class ParseTempsArg(argparse.Action):
"""Argparse action that will parse temps argument.
If the provided argument is 'all', sets 'all' in the namespace dest. If a
a sequence of numbers are provided, converts those numbers to ints before
saving to the namespace.
"""
def __init__(self, type=str, **kwargs): # pylint: disable=redefined-builtin
# check that type is string
if type != str:
raise ValueError("the type for this action must be a string")
super(ParseTempsArg, self).__init__(type=type, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
singlearg = isinstance(values, str)
if singlearg:
values = [values]
if values[0] == 'all':
# check that only a single value was provided
if len(values) > 1:
raise ValueError("if provide 'all', should not specify any "
"other temps")
temps = 'all'
else:
temps = []
for val in values:
try:
val = int(val)
except ValueError:
pass
temps.append(val)
if singlearg:
temps = temps[0]
setattr(namespace, self.dest, temps)
class CommonMultiTemperedMetadataIO(CommonMCMCMetadataIO):
"""Adds support for reading/writing multi-tempered metadata to
:py:class:`~pycbc.inference.io.base_mcmc.CommonMCMCMetadatIO`.
"""
@property
def ntemps(self):
"""Returns the number of temperatures used by the sampler."""
return self[self.sampler_group].attrs['ntemps']
def write_sampler_metadata(self, sampler):
"""Adds writing ntemps to file.
"""
super(CommonMultiTemperedMetadataIO, self).write_sampler_metadata(
sampler)
self[self.sampler_group].attrs["ntemps"] = sampler.ntemps
@staticmethod
def extra_args_parser(parser=None, skip_args=None, **kwargs):
"""Adds --temps to MCMCIO parser.
"""
if skip_args is None:
skip_args = []
parser, actions = CommonMCMCMetadataIO.extra_args_parser(
parser=parser, skip_args=skip_args, **kwargs)
if 'temps' not in skip_args:
act = parser.add_argument(
"--temps", nargs="+", default=0, action=ParseTempsArg,
help="Get the given temperatures. May provide either a "
"sequence of integers specifying the temperatures to "
"plot, or 'all' for all temperatures. Default is to only "
"plot the coldest (= 0) temperature chain.")
actions.append(act)
return parser, actions
def write_samples(fp, samples, parameters=None, last_iteration=None,
samples_group=None, thin_by=None):
"""Writes samples to the given file.
This works both for standard MCMC and ensemble MCMC samplers with
parallel tempering.
Results are written to ``samples_group/{vararg}``, where ``{vararg}``
is the name of a model params. The samples are written as an
``ntemps x nwalkers x niterations`` array.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to write files to. Must be an instance of
BaseInferenceFile with CommonMultiTemperedMetadataIO methods added.
samples : dict
The samples to write. Each array in the dictionary should have
shape ntemps x nwalkers x niterations.
parameters : list, optional
Only write the specified parameters to the file. If None, will
write all of the keys in the ``samples`` dict.
last_iteration : int, optional
The iteration of the last sample. If the file's ``thinned_by``
attribute is > 1, this is needed to determine where to start
thinning the samples to match what has already been stored on disk.
samples_group : str, optional
Which group to write the samples to. Default (None) will result
in writing to "samples".
thin_by : int, optional
Override the ``thinned_by`` attribute in the file with the given
value. **Only set this if you are using this function to write
something other than inference samples!**
"""
ntemps, nwalkers, niterations = tuple(samples.values())[0].shape
assert all(p.shape == (ntemps, nwalkers, niterations)
for p in samples.values()), (
"all samples must have the same shape")
if samples_group is None:
samples_group = fp.samples_group
if parameters is None:
parameters = list(samples.keys())
# thin the samples
samples = thin_samples_for_writing(fp, samples, parameters,
last_iteration, samples_group,
thin_by=thin_by)
# loop over number of dimensions
group = samples_group + '/{name}'
for param in parameters:
dataset_name = group.format(name=param)
data = samples[param]
# check that there's something to write after thinning
if data.shape[2] == 0:
# nothing to write, move along
continue
try:
fp_niterations = fp[dataset_name].shape[-1]
istart = fp_niterations
istop = istart + data.shape[2]
if istop > fp_niterations:
# resize the dataset
fp[dataset_name].resize(istop, axis=2)
except KeyError:
# dataset doesn't exist yet
istart = 0
istop = istart + data.shape[2]
fp.create_dataset(dataset_name, (ntemps, nwalkers, istop),
maxshape=(ntemps, nwalkers, None),
dtype=data.dtype,
fletcher32=True)
fp[dataset_name][:, :, istart:istop] = data
def read_raw_samples(fp, fields,
thin_start=None, thin_interval=None, thin_end=None,
iteration=None, temps='all', chains=None,
flatten=True, group=None):
"""Base function for reading samples from a collection of independent
MCMC chains file with parallel tempering.
This may collect differing numbering of samples from each chains,
depending on the thinning settings for each chain. If not flattened the
returned array will have dimensions requested temps x requested chains x
max samples, where max samples is the largest number of samples retrieved
from a single chain. Chains that retrieve fewer samples will be padded with
``numpy.nan``. If flattened, the NaNs are removed prior to returning.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to read samples from. Must be an instance of
BaseInferenceFile with CommonMultiTemperedMetadataIO methods added.
fields : list
The list of field names to retrieve.
thin_start : array or int, optional
Start reading from the given sample. May either provide an array
indicating the start index for each chain, or an integer. If the
former, the array must have the same length as the number of chains
that will be retrieved. If the latter, the given value will be used
for all chains. Default (None) is to use the file's ``thin_start``
attribute.
thin_interval : array or int, optional
Only read every ``thin_interval``-th sample. May either provide an
array indicating the interval to use for each chain, or an integer. If
the former, the array must have the same length as the number of chains
that will be retrieved. If the latter, the given value will be used for
all chains. Default (None) is to use the file's ``thin_interval``
attribute.
thin_end : array or int, optional
Stop reading at the given sample index. May either provide an
array indicating the end index to use for each chain, or an integer. If
the former, the array must have the same length as the number of chains
that will be retrieved. If the latter, the given value will be used for
all chains. Default (None) is to use the the file's ``thin_end``
attribute.
iteration : int, optional
Only read the given iteration from all chains. If provided, it
overrides the ``thin_(start|interval|end)`` options.
temps : 'all' or (list of) int, optional
The temperature index (or list of indices) to retrieve. To retrieve
all temperates pass 'all', or a list of all of the temperatures.
Default is 'all'.
chains : (list of) int, optional
Only read from the given chains. Default is to read all.
flatten : bool, optional
Remove NaNs and flatten the samples to 1D arrays before returning.
Otherwise, the returned arrays will have shape (requested temps x
requested chains x max requested iteration(s)), with chains that return
fewer samples padded with NaNs. Default is True.
group : str, optional
The name of the group to read sample datasets from. Default is
the file's ``samples_group``.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
if isinstance(fields, str):
fields = [fields]
if group is None:
group = fp.samples_group
group = group + '/{name}'
# chains to load
if chains is None:
chains = numpy.arange(fp.nchains)
elif not isinstance(chains, (list, numpy.ndarray)):
chains = numpy.array([chains]).astype(int)
get_index = _get_index(fp, chains, thin_start, thin_interval, thin_end,
iteration)
# load the samples
arrays = {}
for name in fields:
dset = group.format(name=name)
# get the temperatures to load
tidx, selecttemps, ntemps = _get_temps_index(temps, fp, dset)
alist = []
maxiters = 0
for ii, cidx in enumerate(chains):
idx = get_index[ii]
# load the data
thisarr = fp[dset][tidx, cidx, idx]
if thisarr.size == 0:
# no samples were loaded; skip this chain
alist.append(None)
continue
if isinstance(idx, (int, numpy.int_)):
# make sure the last dimension corresponds to iteration
thisarr = thisarr.reshape(list(thisarr.shape)+[1])
# pull out the temperatures we need
if selecttemps:
thisarr = thisarr[temps, ...]
# make sure its 2D
thisarr = thisarr.reshape(ntemps, thisarr.shape[-1])
alist.append(thisarr)
maxiters = max(maxiters, thisarr.shape[-1])
# stack into a single array
arr = numpy.full((ntemps, len(chains), maxiters), numpy.nan,
dtype=fp[dset].dtype)
for ii, thisarr in enumerate(alist):
if thisarr is not None:
arr[:, ii, :thisarr.shape[-1]] = thisarr
if flatten:
# flatten and remove nans
arr = arr.flatten()
arr = arr[~numpy.isnan(arr)]
arrays[name] = arr
return arrays
def ensemble_read_raw_samples(fp, fields, thin_start=None,
thin_interval=None, thin_end=None,
iteration=None, temps='all', walkers=None,
flatten=True, group=None):
"""Base function for reading samples from ensemble MCMC file with
parallel tempering.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to write files to. Must be an instance of
BaseInferenceFile with CommonMultiTemperedMetadataIO methods added.
fields : list
The list of field names to retrieve.
thin_start : int, optional
Start reading from the given iteration. Default is to start from
the first iteration.
thin_interval : int, optional
Only read every ``thin_interval`` -th sample. Default is 1.
thin_end : int, optional
Stop reading at the given iteration. Default is to end at the last
iteration.
iteration : int, optional
Only read the given iteration. If this provided, it overrides
the ``thin_(start|interval|end)`` options.
temps : 'all' or (list of) int, optional
The temperature index (or list of indices) to retrieve. To retrieve
all temperates pass 'all', or a list of all of the temperatures.
Default is 'all'.
walkers : (list of) int, optional
Only read from the given walkers. Default (``None``) is to read all.
flatten : bool, optional
Flatten the samples to 1D arrays before returning. Otherwise, the
returned arrays will have shape (requested temps x
requested walkers x requested iteration(s)). Default is True.
group : str, optional
The name of the group to read sample datasets from. Default is
the file's ``samples_group``.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
if isinstance(fields, str):
fields = [fields]
# walkers to load
widx, nwalkers = _ensemble_get_walker_index(fp, walkers)
# get the slice to use
get_index = _ensemble_get_index(fp, thin_start, thin_interval, thin_end,
iteration)
# load
if group is None:
group = fp.samples_group
group = group + '/{name}'
arrays = {}
for name in fields:
dset = group.format(name=name)
tidx, selecttemps, ntemps = _get_temps_index(temps, fp, dset)
arr = fp[dset][tidx, widx, get_index]
niterations = arr.shape[-1] if iteration is None else 1
if selecttemps:
# pull out the temperatures we need
arr = arr[temps, ...]
if flatten:
arr = arr.flatten()
else:
# ensure that the returned array is 3D
arr = arr.reshape((ntemps, nwalkers, niterations))
arrays[name] = arr
return arrays
def _get_temps_index(temps, fp, dataset):
"""Convenience function to determine which temperatures to load.
Parameters
-----------
temps : 'all' or (list of) int
The temperature index (or list of indices) to retrieve. To retrieve
all temperates pass 'all', or a list of all of the temperatures.
fp : BaseInferenceFile
Open file handler to read samples from. Must be an instance of
BaseInferenceFile with CommonMultiTemperedMetadataIO methods added.
dataset : str
The name of the dataset that samples will be loaded from.
Returns
-------
tidx : slice or list of int
The temperature indices to load from the file.
selecttemps : bool
Whether specific temperatures need to be pulled out of the samples
array after it is loaded from the file.
ntemps : int
The number of temperatures that will be loaded.
"""
if temps == 'all':
# all temperatures were requested; just need to know how many
ntemps = fp[dataset].shape[0]
tidx = slice(None, None)
selecttemps = False
elif isinstance(temps, (int, numpy.int_)):
# only a single temperature is requested
ntemps = 1
tidx = temps
selecttemps = False
else:
# a select set of temperatures are requested
tidx = slice(None, None)
ntemps = len(temps)
selecttemps = True
return tidx, selecttemps, ntemps
| 16,944
| 40.633907
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/dynesty.py
|
# Copyright (C) 2019 Collin Capano, Sumit Kumar
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides IO for the dynesty sampler.
"""
import argparse
import numpy
from pycbc.io.hdf import (dump_state, load_state)
from .base_nested_sampler import BaseNestedSamplerFile
from .posterior import write_samples_to_file, read_raw_samples_from_file
class CommonNestedMetadataIO(object):
"""Provides functions for reading/writing dynesty metadata to file.
"""
@staticmethod
def extra_args_parser(parser=None, skip_args=None, **kwargs):
"""Create a parser to parse sampler-specific arguments for loading
samples.
Parameters
----------
parser : argparse.ArgumentParser, optional
Instead of creating a parser, add arguments to the given one. If
none provided, will create one.
skip_args : list, optional
Don't parse the given options. Options should be given as the
option string, minus the '--'. For example,
``skip_args=['iteration']`` would cause the ``--iteration``
argument not to be included.
\**kwargs :
All other keyword arguments are passed to the parser that is
created.
Returns
-------
parser : argparse.ArgumentParser
An argument parser with th extra arguments added.
actions : list of argparse.Action
A list of the actions that were added.
"""
if parser is None:
parser = argparse.ArgumentParser(**kwargs)
elif kwargs:
raise ValueError("No other keyword arguments should be provded if "
"a parser is provided.")
if skip_args is None:
skip_args = []
actions = []
if 'raw_samples' not in skip_args:
act = parser.add_argument(
"--raw-samples", action='store_true', default=False,
help="Extract raw samples rather than a posterior. "
"Raw samples are the unweighted samples obtained from "
"the nested sampler. Default value is False, which means "
"raw samples are weighted by the log-weight array "
"obtained from the sampler, giving an estimate of the "
"posterior.")
actions.append(act)
if 'seed' not in skip_args:
act = parser.add_argument(
"--seed", type=int, default=0,
help="Set the random-number seed used for extracting the "
"posterior samples. This is needed because the "
"unweighted samples are randomly shuffled to produce "
"a posterior. Default is 0. Ignored if raw-samples are "
"extracted instead.")
return parser, actions
class DynestyFile(CommonNestedMetadataIO, BaseNestedSamplerFile):
"""Class to handle file IO for the ``dynesty`` sampler."""
name = 'dynesty_file'
def read_raw_samples(self, fields, raw_samples=False, seed=0):
"""Reads samples from a dynesty file and constructs a posterior.
Parameters
----------
fields : list of str
The names of the parameters to load. Names must correspond to
dataset names in the file's ``samples`` group.
raw_samples : bool, optional
Return the raw (unweighted) samples instead of the estimated
posterior samples. Default is False.
seed : int, optional
When extracting the posterior, samples are randomly shuffled. To
make this reproduceable, numpy's random generator seed is set with
the given value prior to the extraction. Default is 0.
Returns
-------
dict :
Dictionary of parameter names -> samples.
"""
samples = read_raw_samples_from_file(self, fields)
logwt = read_raw_samples_from_file(self, ['logwt'])['logwt']
loglikelihood = read_raw_samples_from_file(
self, ['loglikelihood'])['loglikelihood']
logz = self.attrs.get('log_evidence')
if not raw_samples:
weights = numpy.exp(logwt - logz)
N = len(weights)
positions = (numpy.random.random() + numpy.arange(N)) / N
idx = numpy.zeros(N, dtype=int)
cumulative_sum = numpy.cumsum(weights)
cumulative_sum /= cumulative_sum[-1]
i, j = 0, 0
while i < N:
if positions[i] < cumulative_sum[j]:
idx[i] = j
i += 1
else:
j += 1
try:
rng = numpy.random.default_rng(seed)
except AttributeError:
# numpy pre-1.17 uses RandomState
# Py27: delete this after we drop python 2.7 support
rng = numpy.random.RandomState(seed)
rng.shuffle(idx)
post = {'loglikelihood': loglikelihood[idx]}
for i, param in enumerate(fields):
post[param] = samples[param][idx]
return post
else:
return samples
def write_pickled_data_into_checkpoint_file(self, state):
"""Dump the sampler state into checkpoint file
"""
if 'sampler_info/saved_state' not in self:
self.create_group('sampler_info/saved_state')
dump_state(state, self, path='sampler_info/saved_state')
def read_pickled_data_from_checkpoint_file(self):
"""Load the sampler state (pickled) from checkpoint file
"""
return load_state(self, path='sampler_info/saved_state')
def write_raw_samples(self, data, parameters=None):
"""Write the nested samples to the file
"""
if 'samples' not in self:
self.create_group('samples')
write_samples_to_file(self, data, parameters=parameters,
group='samples')
def validate(self):
"""Runs a validation test.
This checks that a samples group exist, and that pickeled data can
be loaded.
Returns
-------
bool :
Whether or not the file is valid as a checkpoint file.
"""
try:
if 'sampler_info/saved_state' in self:
load_state(self, path='sampler_info/saved_state')
checkpoint_valid = True
except KeyError:
checkpoint_valid = False
return checkpoint_valid
| 7,473
| 38.544974
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/base_nested_sampler.py
|
# Copyright (C) 2019 Sumit Kumar, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides IO for the dynesty sampler.
"""
from .base_sampler import BaseSamplerFile
from .posterior import read_raw_samples_from_file, write_samples_to_file
class BaseNestedSamplerFile(BaseSamplerFile):
"""Class to handle file IO for the nested samplers cpnest and dynesty."""
name = 'base_nest_file'
def read_raw_samples(self, fields, **kwargs):
return read_raw_samples_from_file(self, fields, **kwargs)
def write_resume_point(self):
pass
def write_niterations(self, niterations):
"""
Writes the given number of iterations to the sampler group.
"""
self[self.sampler_group].attrs['niterations'] = niterations
def write_sampler_metadata(self, sampler):
"""
Adds writing betas to MultiTemperedMCMCIO.
"""
self.attrs['sampler'] = sampler.name
if self.sampler_group not in self.keys():
# create the sampler group
self.create_group(self.sampler_group)
self[self.sampler_group].attrs['nlivepoints'] = sampler.nlive
# write the model's metadata
sampler.model.write_metadata(self)
def write_samples(self, samples, parameters=None):
"""Writes samples to the given file.
Results are written to ``samples_group/{vararg}``, where ``{vararg}``
is the name of a model params. The samples are written as an
array of length ``niterations``.
Parameters
-----------
samples : dict
The samples to write. Each array in the dictionary should have
length niterations.
parameters : list, optional
Only write the specified parameters to the file. If None, will
write all of the keys in the ``samples`` dict.
"""
# since we're just writing a posterior use
# PosteriorFile's write_samples
write_samples_to_file(self, samples, parameters=parameters)
| 2,918
| 36.423077
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/emcee_pt.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides I/O support for emcee_pt.
"""
import numpy
from .base_sampler import BaseSamplerFile
from .base_mcmc import EnsembleMCMCMetadataIO
from .base_multitemper import (CommonMultiTemperedMetadataIO,
write_samples,
ensemble_read_raw_samples)
class EmceePTFile(EnsembleMCMCMetadataIO, CommonMultiTemperedMetadataIO,
BaseSamplerFile):
"""Class to handle file IO for the ``emcee`` sampler."""
name = 'emcee_pt_file'
@property
def betas(self):
"""The betas that were used."""
return self[self.sampler_group].attrs["betas"]
def write_samples(self, samples, **kwargs):
r"""Writes samples to the given file.
Calls :py:func:`base_multitemper.write_samples`. See that function for
details.
Parameters
----------
samples : dict
The samples to write. Each array in the dictionary should have
shape ntemps x nwalkers x niterations.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.write_samples`.
"""
write_samples(self, samples, **kwargs)
def read_raw_samples(self, fields, **kwargs):
r"""Base function for reading samples.
Calls :py:func:`base_multitemper.ensemble_read_raw_samples`. See that
function for details.
Parameters
-----------
fields : list
The list of field names to retrieve.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_read_raw_samples`.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
return ensemble_read_raw_samples(self, fields, **kwargs)
def write_sampler_metadata(self, sampler):
"""Adds writing betas to MultiTemperedMCMCIO.
"""
super(EmceePTFile, self).write_sampler_metadata(sampler)
self[self.sampler_group].attrs["betas"] = sampler.betas
def read_acceptance_fraction(self, temps=None, walkers=None):
"""Reads the acceptance fraction.
Parameters
-----------
temps : (list of) int, optional
The temperature index (or a list of indices) to retrieve. If None,
acfs from all temperatures and all walkers will be retrieved.
walkers : (list of) int, optional
The walker index (or a list of indices) to retrieve. If None,
samples from all walkers will be obtained.
Returns
-------
array
Array of acceptance fractions with shape (requested temps,
requested walkers).
"""
group = self.sampler_group + '/acceptance_fraction'
if walkers is None:
wmask = numpy.ones(self.nwalkers, dtype=bool)
else:
wmask = numpy.zeros(self.nwalkers, dtype=bool)
wmask[walkers] = True
if temps is None:
tmask = numpy.ones(self.ntemps, dtype=bool)
else:
tmask = numpy.zeros(self.ntemps, dtype=bool)
tmask[temps] = True
return self[group][:][numpy.ix_(tmask, wmask)]
def write_acceptance_fraction(self, acceptance_fraction):
"""Write acceptance_fraction data to file.
Results are written to ``[sampler_group]/acceptance_fraction``; the
resulting dataset has shape (ntemps, nwalkers).
Parameters
-----------
acceptance_fraction : numpy.ndarray
Array of acceptance fractions to write. Must have shape
ntemps x nwalkers.
"""
# check
assert acceptance_fraction.shape == (self.ntemps, self.nwalkers), (
"acceptance fraction must have shape ntemps x nwalker")
group = self.sampler_group + '/acceptance_fraction'
try:
self[group][:] = acceptance_fraction
except KeyError:
# dataset doesn't exist yet, create it
self[group] = acceptance_fraction
| 4,859
| 34.474453
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/base_hdf.py
|
# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This modules defines functions for reading and writing samples that the
inference samplers generate.
"""
import sys
import logging
from io import StringIO
from abc import (ABCMeta, abstractmethod)
import numpy
import h5py
from pycbc.io import FieldArray
from pycbc.inject import InjectionSet
from pycbc.io import (dump_state, load_state)
from pycbc.workflow import WorkflowConfigParser
from pycbc.types import FrequencySeries
def format_attr(val):
"""Formats an attr so that it can be read in either python 2 or 3.
In python 2, strings that are saved as an attribute in an hdf file default
to unicode. Since unicode was removed in python 3, if you load that file
in a python 3 environment, the strings will be read as bytes instead, which
causes a number of issues. This attempts to fix that. If the value is
a bytes string, then it will be decoded into a string. If the value is
a numpy array of byte strings, it will convert the array to a list of
strings.
Parameters
----------
val : obj
The value to format. This will try to apply decoding to the value
Returns
-------
obj
If ``val`` was a byte string, the value as a ``str``. If the value
was a numpy array of ``bytes_``, the value as a list of ``str``.
Otherwise, just returns the value.
"""
try:
val = str(val.decode())
except AttributeError:
pass
if isinstance(val, numpy.ndarray) and val.dtype.type == numpy.bytes_:
val = val.astype(numpy.unicode_).tolist()
return val
class BaseInferenceFile(h5py.File, metaclass=ABCMeta):
"""Base class for all inference hdf files.
This is a subclass of the h5py.File object. It adds functions for
handling reading and writing the samples from the samplers.
Parameters
-----------
path : str
The path to the HDF file.
mode : {None, str}
The mode to open the file, eg. "w" for write and "r" for read.
"""
name = None
samples_group = 'samples'
sampler_group = 'sampler_info'
data_group = 'data'
injections_group = 'injections'
config_group = 'config_file'
def __init__(self, path, mode=None, **kwargs):
super(BaseInferenceFile, self).__init__(path, mode, **kwargs)
# check that file type matches self
try:
filetype = self.attrs['filetype']
except KeyError:
if mode == 'w':
# first time creating the file, add this class's name
filetype = self.name
self.attrs['filetype'] = filetype
else:
filetype = None
try:
filetype = str(filetype.decode())
except AttributeError:
pass
if filetype != self.name:
raise ValueError("This file has filetype {}, whereas this class "
"is named {}. This indicates that the file was "
"not written by this class, and so cannot be "
"read by this class.".format(filetype, self.name))
def __getattr__(self, attr):
"""Things stored in ``.attrs`` are promoted to instance attributes.
Note that properties will be called before this, so if there are any
properties that share the same name as something in ``.attrs``, that
property will get returned.
"""
return self.attrs[attr]
def getattrs(self, group=None, create_missing=True):
"""Convenience function for getting the `attrs` from the file or group.
Parameters
----------
group : str, optional
Get the attrs of the specified group. If None or ``/``, will
retrieve the file's ``attrs``.
create_missing: bool, optional
If ``group`` is provided, but doesn't yet exist in the file, create
the group. Otherwise, a KeyError will be raised. Default is True.
Returns
-------
h5py.File.attrs
An attrs instance of the file or requested group.
"""
if group is None or group == "/":
attrs = self.attrs
else:
try:
attrs = self[group].attrs
except KeyError as e:
if create_missing:
self.create_group(group)
attrs = self[group].attrs
else:
raise e
return attrs
@abstractmethod
def write_samples(self, samples, **kwargs):
"""This should write all of the provided samples.
This function should be used to write both samples and model stats.
Parameters
----------
samples : dict
Samples should be provided as a dictionary of numpy arrays.
\**kwargs :
Any other keyword args the sampler needs to write data.
"""
pass
def parse_parameters(self, parameters, array_class=None):
"""Parses a parameters arg to figure out what fields need to be loaded.
Parameters
----------
parameters : (list of) strings
The parameter(s) to retrieve. A parameter can be the name of any
field in ``samples_group``, a virtual field or method of
``FieldArray`` (as long as the file contains the necessary fields
to derive the virtual field or method), and/or a function of
these.
array_class : array class, optional
The type of array to use to parse the parameters. The class must
have a ``parse_parameters`` method. Default is to use a
``FieldArray``.
Returns
-------
list :
A list of strings giving the fields to load from the file.
"""
# get the type of array class to use
if array_class is None:
array_class = FieldArray
# get the names of fields needed for the given parameters
possible_fields = self[self.samples_group].keys()
return array_class.parse_parameters(parameters, possible_fields)
def read_samples(self, parameters, array_class=None, **kwargs):
"""Reads samples for the given parameter(s).
The ``parameters`` can be the name of any dataset in ``samples_group``,
a virtual field or method of ``FieldArray`` (as long as the file
contains the necessary fields to derive the virtual field or method),
and/or any numpy function of these.
The ``parameters`` are parsed to figure out what datasets are needed.
Only those datasets will be loaded, and will be the base-level fields
of the returned ``FieldArray``.
The ``static_params`` are also added as attributes of the returned
``FieldArray``.
Parameters
-----------
parameters : (list of) strings
The parameter(s) to retrieve.
array_class : FieldArray-like class, optional
The type of array to return. The class must have ``from_kwargs``
and ``parse_parameters`` methods. If None, will return a
``FieldArray``.
\**kwargs :
All other keyword arguments are passed to ``read_raw_samples``.
Returns
-------
FieldArray :
The samples as a ``FieldArray``.
"""
# get the type of array class to use
if array_class is None:
array_class = FieldArray
# get the names of fields needed for the given parameters
possible_fields = self[self.samples_group].keys()
loadfields = array_class.parse_parameters(parameters, possible_fields)
samples = self.read_raw_samples(loadfields, **kwargs)
# convert to FieldArray
samples = array_class.from_kwargs(**samples)
# add the static params and attributes
addatrs = (list(self.static_params.items()) +
list(self[self.samples_group].attrs.items()))
for (p, val) in addatrs:
if p in loadfields:
continue
setattr(samples, format_attr(p), format_attr(val))
return samples
@abstractmethod
def read_raw_samples(self, fields, **kwargs):
"""Low level function for reading datasets in the samples group.
This should return a dictionary of numpy arrays.
"""
pass
@staticmethod
def extra_args_parser(parser=None, skip_args=None, **kwargs):
"""Provides a parser that can be used to parse sampler-specific command
line options for loading samples.
This is optional. Inheriting classes may override this if they want to
implement their own options.
Parameters
----------
parser : argparse.ArgumentParser, optional
Instead of creating a parser, add arguments to the given one. If
none provided, will create one.
skip_args : list, optional
Don't include the given options. Options should be given as the
option string, minus the '--'. For example,
``skip_args=['iteration']`` would cause the ``--iteration``
argument not to be included.
\**kwargs :
All other keyword arguments are passed to the parser that is
created.
Returns
-------
parser : argparse.ArgumentParser or None
If this class adds extra arguments, an argument parser with the
extra arguments. Otherwise, will just return whatever was passed
for the ``parser`` argument (default is None).
actions : list of argparse.Action
List of the actions that were added.
"""
return parser, []
@staticmethod
def _get_optional_args(args, opts, err_on_missing=False, **kwargs):
"""Convenience function to retrieve arguments from an argparse
namespace.
Parameters
----------
args : list of str
List of arguments to retreive.
opts : argparse.namespace
Namespace to retreive arguments for.
err_on_missing : bool, optional
If an argument is not found in the namespace, raise an
AttributeError. Otherwise, just pass. Default is False.
\**kwargs :
All other keyword arguments are added to the return dictionary.
Any keyword argument that is the same as an argument in ``args``
will override what was retrieved from ``opts``.
Returns
-------
dict :
Dictionary mapping arguments to values retrieved from ``opts``. If
keyword arguments were provided, these will also be included in the
dictionary.
"""
parsed = {}
for arg in args:
try:
parsed[arg] = getattr(opts, arg)
except AttributeError as e:
if err_on_missing:
raise AttributeError(e)
else:
continue
parsed.update(kwargs)
return parsed
def samples_from_cli(self, opts, parameters=None, **kwargs):
"""Reads samples from the given command-line options.
Parameters
----------
opts : argparse Namespace
The options with the settings to use for loading samples (the sort
of thing returned by ``ArgumentParser().parse_args``).
parameters : (list of) str, optional
A list of the parameters to load. If none provided, will try to
get the parameters to load from ``opts.parameters``.
\**kwargs :
All other keyword arguments are passed to ``read_samples``. These
will override any options with the same name.
Returns
-------
FieldArray :
Array of the loaded samples.
"""
if parameters is None and opts.parameters is None:
parameters = self.variable_params
elif parameters is None:
parameters = opts.parameters
# parse optional arguments
_, extra_actions = self.extra_args_parser()
extra_args = [act.dest for act in extra_actions]
kwargs = self._get_optional_args(extra_args, opts, **kwargs)
return self.read_samples(parameters, **kwargs)
@property
def static_params(self):
"""Returns a dictionary of the static_params. The keys are the argument
names, values are the value they were set to.
"""
return {arg: self.attrs[arg] for arg in self.attrs["static_params"]}
@property
def effective_nsamples(self):
"""Returns the effective number of samples stored in the file.
"""
try:
return self.attrs['effective_nsamples']
except KeyError:
return 0
def write_effective_nsamples(self, effective_nsamples):
"""Writes the effective number of samples stored in the file."""
self.attrs['effective_nsamples'] = effective_nsamples
@property
def thin_start(self):
"""The default start index to use when reading samples.
Unless overridden by sub-class attribute, just returns 0.
"""
return 0
@property
def thin_interval(self):
"""The default interval to use when reading samples.
Unless overridden by sub-class attribute, just returns 1.
"""
return 1
@property
def thin_end(self):
"""The defaut end index to use when reading samples.
Unless overriden by sub-class attribute, just return ``None``.
"""
return None
@property
def cmd(self):
"""Returns the (last) saved command line.
If the file was created from a run that resumed from a checkpoint, only
the last command line used is returned.
Returns
-------
cmd : string
The command line that created this InferenceFile.
"""
cmd = self.attrs["cmd"]
if isinstance(cmd, numpy.ndarray):
cmd = cmd[-1]
return cmd
def write_logevidence(self, lnz, dlnz):
"""Writes the given log evidence and its error.
Results are saved to file's 'log_evidence' and 'dlog_evidence'
attributes.
Parameters
----------
lnz : float
The log of the evidence.
dlnz : float
The error in the estimate of the log evidence.
"""
self.attrs['log_evidence'] = lnz
self.attrs['dlog_evidence'] = dlnz
@property
def log_evidence(self):
"""Returns the log of the evidence and its error, if they exist in the
file. Raises a KeyError otherwise.
"""
return self.attrs["log_evidence"], self.attrs["dlog_evidence"]
def write_random_state(self, group=None, state=None):
"""Writes the state of the random number generator from the file.
The random state is written to ``sampler_group``/random_state.
Parameters
----------
group : str
Name of group to write random state to.
state : tuple, optional
Specify the random state to write. If None, will use
``numpy.random.get_state()``.
"""
# Write out the default numpy random state
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
if state is None:
state = numpy.random.get_state()
s, arr, pos, has_gauss, cached_gauss = state
if dataset_name in self:
self[dataset_name][:] = arr
else:
self.create_dataset(dataset_name, arr.shape, fletcher32=True,
dtype=arr.dtype)
self[dataset_name][:] = arr
self[dataset_name].attrs["s"] = s
self[dataset_name].attrs["pos"] = pos
self[dataset_name].attrs["has_gauss"] = has_gauss
self[dataset_name].attrs["cached_gauss"] = cached_gauss
def read_random_state(self, group=None):
"""Reads the state of the random number generator from the file.
Parameters
----------
group : str
Name of group to read random state from.
Returns
-------
tuple
A tuple with 5 elements that can be passed to numpy.set_state.
"""
# Read numpy randomstate
group = self.sampler_group if group is None else group
dataset_name = "/".join([group, "random_state"])
arr = self[dataset_name][:]
s = self[dataset_name].attrs["s"]
pos = self[dataset_name].attrs["pos"]
has_gauss = self[dataset_name].attrs["has_gauss"]
cached_gauss = self[dataset_name].attrs["cached_gauss"]
state = s, arr, pos, has_gauss, cached_gauss
return state
def write_strain(self, strain_dict, group=None):
"""Writes strain for each IFO to file.
Parameters
-----------
strain : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = self.data_group + "/{ifo}/strain"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo, strain in strain_dict.items():
self[group.format(ifo=ifo)] = strain
self[group.format(ifo=ifo)].attrs['delta_t'] = strain.delta_t
self[group.format(ifo=ifo)].attrs['start_time'] = \
float(strain.start_time)
def write_stilde(self, stilde_dict, group=None):
"""Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = self.data_group + "/{ifo}/stilde"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo, stilde in stilde_dict.items():
self[group.format(ifo=ifo)] = stilde
self[group.format(ifo=ifo)].attrs['delta_f'] = stilde.delta_f
self[group.format(ifo=ifo)].attrs['epoch'] = float(stilde.epoch)
def write_psd(self, psds, group=None):
"""Writes PSD for each IFO to file.
PSDs are written to ``[{group}/]data/{detector}/psds/0``, where {group}
is the optional keyword argument.
Parameters
-----------
psds : dict
A dict of detector name -> FrequencySeries.
group : str, optional
Specify a top-level group to write the data to. If ``None`` (the
default), data will be written to the file's top level.
"""
subgroup = self.data_group + "/{ifo}/psds/0"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo in psds:
self[group.format(ifo=ifo)] = psds[ifo]
self[group.format(ifo=ifo)].attrs['delta_f'] = psds[ifo].delta_f
def write_injections(self, injection_file, group=None):
"""Writes injection parameters from the given injection file.
Everything in the injection file is copied to
``[{group}/]injections_group``, where ``{group}`` is the optional
keyword argument.
Parameters
----------
injection_file : str
Path to HDF injection file.
group : str, optional
Specify a top-level group to write the injections group to. If
``None`` (the default), injections group will be written to the
file's top level.
"""
logging.info("Writing injection file to output")
if group is None or group == '/':
group = self.injections_group
else:
group = '/'.join([group, self.injections_group])
try:
with h5py.File(injection_file, "r") as fp:
super(BaseInferenceFile, self).copy(fp, group)
except IOError:
logging.warn("Could not read %s as an HDF file", injection_file)
def read_injections(self, group=None):
"""Gets injection parameters.
Injections are retrieved from ``[{group}/]injections``.
Parameters
----------
group : str, optional
Group that the injections group is in. Default (None) is to look
in the top-level.
Returns
-------
FieldArray
Array of the injection parameters.
"""
if group is None or group == '/':
group = self.injections_group
else:
group = '/'.join([group, self.injections_group])
injset = InjectionSet(self.filename, hdf_group=group)
injections = injset.table.view(FieldArray)
# close the new open filehandler to self
injset._injhandler.filehandler.close()
return injections
def write_command_line(self):
"""Writes command line to attributes.
The command line is written to the file's ``attrs['cmd']``. If this
attribute already exists in the file (this can happen when resuming
from a checkpoint), ``attrs['cmd']`` will be a list storing the current
command line and all previous command lines.
"""
cmd = [" ".join(sys.argv)]
try:
previous = self.attrs["cmd"]
if isinstance(previous, str):
# convert to list
previous = [previous]
elif isinstance(previous, numpy.ndarray):
previous = previous.tolist()
except KeyError:
previous = []
self.attrs["cmd"] = cmd + previous
@staticmethod
def get_slice(thin_start=None, thin_interval=None, thin_end=None):
"""Formats a slice to retrieve a thinned array from an HDF file.
Parameters
----------
thin_start : float or int, optional
The starting index to use. If provided, the ``int`` will be taken.
thin_interval : float or int, optional
The interval to use. If provided the ceiling of it will be taken.
thin_end : float or int, optional
The end index to use. If provided, the ``int`` will be taken.
Returns
-------
slice :
The slice needed.
"""
if thin_start is not None:
thin_start = int(thin_start)
if thin_interval is not None:
thin_interval = int(numpy.ceil(thin_interval))
if thin_end is not None:
thin_end = int(thin_end)
return slice(thin_start, thin_end, thin_interval)
def copy_metadata(self, other):
"""Copies all metadata from this file to the other file.
Metadata is defined as everything in the top-level ``.attrs``.
Parameters
----------
other : InferenceFile
An open inference file to write the data to.
"""
logging.info("Copying metadata")
# copy attributes
for key in self.attrs.keys():
other.attrs[key] = self.attrs[key]
def copy_info(self, other, ignore=None):
"""Copies "info" from this file to the other.
"Info" is defined all groups that are not the samples group.
Parameters
----------
other : output file
The output file. Must be an hdf file.
ignore : (list of) str
Don't copy the given groups.
"""
logging.info("Copying info")
# copy non-samples/stats data
if ignore is None:
ignore = []
if isinstance(ignore, str):
ignore = [ignore]
ignore = set(ignore + [self.samples_group])
copy_groups = set(self.keys()) - ignore
for key in copy_groups:
super(BaseInferenceFile, self).copy(key, other)
def copy_samples(self, other, parameters=None, parameter_names=None,
read_args=None, write_args=None):
"""Should copy samples to the other files.
Parameters
----------
other : InferenceFile
An open inference file to write to.
parameters : list of str, optional
List of parameters to copy. If None, will copy all parameters.
parameter_names : dict, optional
Rename one or more parameters to the given name. The dictionary
should map parameter -> parameter name. If None, will just use the
original parameter names.
read_args : dict, optional
Arguments to pass to ``read_samples``.
write_args : dict, optional
Arguments to pass to ``write_samples``.
"""
# select the samples to copy
logging.info("Reading samples to copy")
if parameters is None:
parameters = self.variable_params
# if list of desired parameters is different, rename
if set(parameters) != set(self.variable_params):
other.attrs['variable_params'] = parameters
if read_args is None:
read_args = {}
samples = self.read_samples(parameters, **read_args)
logging.info("Copying {} samples".format(samples.size))
# if different parameter names are desired, get them from the samples
if parameter_names:
arrs = {pname: samples[p] for p, pname in parameter_names.items()}
arrs.update({p: samples[p] for p in parameters if
p not in parameter_names})
samples = FieldArray.from_kwargs(**arrs)
other.attrs['variable_params'] = samples.fieldnames
logging.info("Writing samples")
if write_args is None:
write_args = {}
other.write_samples({p: samples[p] for p in samples.fieldnames},
**write_args)
def copy(self, other, ignore=None, parameters=None, parameter_names=None,
read_args=None, write_args=None):
"""Copies metadata, info, and samples in this file to another file.
Parameters
----------
other : str or InferenceFile
The file to write to. May be either a string giving a filename,
or an open hdf file. If the former, the file will be opened with
the write attribute (note that if a file already exists with that
name, it will be deleted).
ignore : (list of) strings
Don't copy the given groups. If the samples group is included, no
samples will be copied.
parameters : list of str, optional
List of parameters in the samples group to copy. If None, will copy
all parameters.
parameter_names : dict, optional
Rename one or more parameters to the given name. The dictionary
should map parameter -> parameter name. If None, will just use the
original parameter names.
read_args : dict, optional
Arguments to pass to ``read_samples``.
write_args : dict, optional
Arguments to pass to ``write_samples``.
Returns
-------
InferenceFile
The open file handler to other.
"""
if not isinstance(other, h5py.File):
# check that we're not trying to overwrite this file
if other == self.name:
raise IOError("destination is the same as this file")
other = self.__class__(other, 'w')
# metadata
self.copy_metadata(other)
# info
if ignore is None:
ignore = []
if isinstance(ignore, str):
ignore = [ignore]
self.copy_info(other, ignore=ignore)
# samples
if self.samples_group not in ignore:
self.copy_samples(other, parameters=parameters,
parameter_names=parameter_names,
read_args=read_args,
write_args=write_args)
# if any down selection was done, re-set the default
# thin-start/interval/end
p = tuple(self[self.samples_group].keys())[0]
my_shape = self[self.samples_group][p].shape
p = tuple(other[other.samples_group].keys())[0]
other_shape = other[other.samples_group][p].shape
if my_shape != other_shape:
other.attrs['thin_start'] = 0
other.attrs['thin_interval'] = 1
other.attrs['thin_end'] = None
return other
@classmethod
def write_kwargs_to_attrs(cls, attrs, **kwargs):
"""Writes the given keywords to the given ``attrs``.
If any keyword argument points to a dict, the keyword will point to a
list of the dict's keys. Each key is then written to the attrs with its
corresponding value.
Parameters
----------
attrs : an HDF attrs
The ``attrs`` of an hdf file or a group in an hdf file.
\**kwargs :
The keywords to write.
"""
for arg, val in kwargs.items():
if val is None:
val = str(None)
if isinstance(val, dict):
attrs[str(arg)] = list(map(str, val.keys()))
# just call self again with the dict as kwargs
cls.write_kwargs_to_attrs(attrs, **val)
else:
attrs[str(arg)] = val
def write_data(self, name, data, path=None, append=False):
"""Convenience function to write data.
Given ``data`` is written as a dataset with ``name`` in ``path``.
If the dataset or path do not exist yet, the dataset and path will
be created.
Parameters
----------
name : str
The name to associate with the data. This will be the dataset
name (if data is array-like) or the key in the attrs.
data : array, dict, or atomic
The data to write. If a dictionary, a subgroup will be created
for each key, and the values written there. This will be done
recursively until an array or atomic (e.g., float, int, str), is
found. Otherwise, the data is written to the given name.
path : str, optional
Write to the given path. Default (None) will write to the top
level. If the path does not exist in the file, it will be
created.
append : bool, optional
Append the data to what is currently in the file if ``path/name``
already exists in the file, and if it does not, create the dataset
so that its last dimension can be resized. The data can only
be appended along the last dimension, and if it already exists in
the data, it must be resizable along this dimension. If ``False``
(the default) what is in the file will be overwritten, and the
given data must have the same shape.
"""
if path is None:
path = '/'
try:
group = self[path]
except KeyError:
# create the group
self.create_group(path)
group = self[path]
if isinstance(data, dict):
# call myself for each key, value pair in the dictionary
for key, val in data.items():
self.write_data(key, val, path='/'.join([path, name]),
append=append)
# if appending, we need to resize the data on disk, or, if it doesn't
# exist yet, create a dataset that is resizable along the last
# dimension
elif append:
# cast the data to an array if it isn't already one
if isinstance(data, (list, tuple)):
data = numpy.array(data)
if not isinstance(data, numpy.ndarray):
data = numpy.array([data])
dshape = data.shape
ndata = dshape[-1]
try:
startidx = group[name].shape[-1]
group[name].resize(dshape[-1]+group[name].shape[-1],
axis=len(group[name].shape)-1)
except KeyError:
# dataset doesn't exist yet
group.create_dataset(name, dshape,
maxshape=tuple(list(dshape)[:-1]+[None]),
dtype=data.dtype, fletcher32=True)
startidx = 0
group[name][..., startidx:startidx+ndata] = data[..., :]
else:
try:
group[name][()] = data
except KeyError:
# dataset doesn't exist yet
group[name] = data
def write_config_file(self, cp):
"""Writes the given config file parser.
File is stored as a pickled buffer array to ``config_parser/{index}``,
where ``{index}`` is an integer corresponding to the number of config
files that have been saved. The first time a save is called, it is
stored to ``0``, and incremented from there.
Parameters
----------
cp : ConfigParser
Config parser to save.
"""
# get the index of the last saved file
try:
index = list(map(int, self[self.config_group].keys()))
except KeyError:
index = []
if index == []:
# hasn't been written yet
index = 0
else:
index = max(index) + 1
# we'll store the config file as a text file that is pickled
out = StringIO()
cp.write(out)
# now pickle it
dump_state(out, self, path=self.config_group, dsetname=str(index))
def read_config_file(self, return_cp=True, index=-1):
"""Reads the config file that was used.
A ``ValueError`` is raised if no config files have been saved, or if
the requested index larger than the number of stored config files.
Parameters
----------
return_cp : bool, optional
If true, returns the loaded config file as
:py:class:`pycbc.workflow.configuration.WorkflowConfigParser`
type. Otherwise will return as string buffer. Default is True.
index : int, optional
The config file to load. If ``write_config_file`` has been called
multiple times (as would happen if restarting from a checkpoint),
there will be config files stored. Default (-1) is to load the
last saved file.
Returns
-------
WorkflowConfigParser or StringIO :
The parsed config file.
"""
# get the stored indices
try:
indices = sorted(map(int, self[self.config_group].keys()))
index = indices[index]
except KeyError:
raise ValueError("no config files saved in hdf")
except IndexError:
raise ValueError("no config file matches requested index")
cf = load_state(self, path=self.config_group, dsetname=str(index))
cf.seek(0)
if return_cp:
cp = WorkflowConfigParser()
cp.read_file(cf)
return cp
return cf
def read_data(self, group=None):
"""Loads the data stored in the file as a FrequencySeries.
Only works for models that store data as a frequency series in
``data/DET/stilde``. A ``KeyError`` will be raised if the model used
did not store data in that path.
Parameters
----------
group : str, optional
Group that the data group is in. Default (None) is to look in the
top-level.
Returns
-------
dict :
Dictionary of detector name -> FrequencySeries.
"""
fmt = '{}/{}/stilde'
if group is None or group == '/':
path = self.data_group
else:
path = '/'.join([group, self.data_group])
data = {}
for det in self[path].keys():
group = self[fmt.format(path, det)]
data[det] = FrequencySeries(
group[()], delta_f=group.attrs['delta_f'],
epoch=group.attrs['epoch'])
return data
def read_psds(self, group=None):
"""Loads the PSDs stored in the file as a FrequencySeries.
Only works for models that store PSDs in
``data/DET/psds/0``. A ``KeyError`` will be raised if the model used
did not store PSDs in that path.
Parameters
----------
group : str, optional
Group that the data group is in. Default (None) is to look in the
top-level.
Returns
-------
dict :
Dictionary of detector name -> FrequencySeries.
"""
fmt = '{}/{}/psds/0'
if group is None or group == '/':
path = self.data_group
else:
path = '/'.join([group, self.data_group])
psds = {}
for det in self[path].keys():
group = self[fmt.format(path, det)]
psds[det] = FrequencySeries(
group[()], delta_f=group.attrs['delta_f'])
return psds
| 38,576
| 36.417071
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/txt.py
|
# Copyright (C) 2017 Christopher M. Biwer
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
""" This modules defines functions for reading and samples that the
inference samplers generate and are stored in an ASCII TXT file.
"""
import numpy
class InferenceTXTFile(object):
""" A class that has extra functions for handling reading the samples
from posterior-only TXT files.
Parameters
-----------
path : str
The path to the TXT file.
mode : {None, str}
The mode to open the file. Only accepts "r" or "rb" for reading.
delimiter : str
Delimiter to use for TXT file. Default is space-delimited.
"""
name = "txt"
comments = ""
delimiter = " "
def __init__(self, path, mode=None, delimiter=None):
self.path = path
self.delimiter = delimiter if delimiter is not None else self.delimiter
if mode in ["r", "rb"]:
self.mode = mode
else:
raise ValueError("Mode for InferenceTXTFile must be 'r' or 'rb'.")
@classmethod
def write(cls, output_file, samples, labels, delimiter=None):
""" Writes a text file with samples.
Parameters
-----------
output_file : str
The path of the file to write.
samples : FieldArray
Samples to write to file.
labels : list
A list of strings to include as header in TXT file.
delimiter : str
Delimiter to use in TXT file.
"""
delimiter = delimiter if delimiter is not None else cls.delimiter
header = delimiter.join(labels)
numpy.savetxt(output_file, samples,
comments=cls.comments, header=header,
delimiter=delimiter)
| 2,421
| 35.149254
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/base_sampler.py
|
# Copyright (C) 2019 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides abstract base class for all samplers."""
import time
from abc import (ABCMeta, abstractmethod)
from .base_hdf import BaseInferenceFile
class BaseSamplerFile(BaseInferenceFile, metaclass=ABCMeta):
"""Base HDF class for all samplers.
This adds abstract methods ``write_resume_point`` and
``write_sampler_metadata`` to :py:class:`BaseInferenceFile`.
"""
def write_run_start_time(self):
"""Writes the current (UNIX) time to the file.
Times are stored as a list in the file's ``attrs``, with name
``run_start_time``. If the attrbute already exists, the current time
is appended. Otherwise, the attribute will be created and time added.
"""
attrname = "run_start_time"
try:
times = self.attrs[attrname].tolist()
except KeyError:
times = []
times.append(time.time())
self.attrs[attrname] = times
@property
def run_start_time(self):
"""The (UNIX) time pycbc inference began running.
If the run resumed from a checkpoint, the time the last checkpoint
started is reported.
"""
return self.attrs['run_start_time'][-1]
def write_run_end_time(self):
""""Writes the curent (UNIX) time as the ``run_end_time`` attribute.
"""
self.attrs["run_end_time"] = time.time()
@property
def run_end_time(self):
"""The (UNIX) time pycbc inference finished.
"""
return self.attrs["run_end_time"]
@abstractmethod
def write_resume_point(self):
"""Should write the point that a sampler starts up.
How the resume point is indexed is up to the sampler. For example,
MCMC samplers use the number of iterations that are stored in the
checkpoint file.
"""
pass
@abstractmethod
def write_sampler_metadata(self, sampler):
"""This should write the given sampler's metadata to the file.
This should also include the model's metadata.
"""
pass
def update_checkpoint_history(self):
"""Writes a copy of relevant metadata to the file's checkpoint history.
All data are written to ``sampler_info/checkpoint_history``. If the
group does not exist yet, it will be created.
This function writes the current time and the time since the last
checkpoint to the file. It will also call
:py:func:`_update_sampler_history` to write sampler-specific history.
"""
path = '/'.join([self.sampler_group, 'checkpoint_history'])
try:
history = self[path]
except KeyError:
# assume history doesn't exist yet
self.create_group(path)
history = self[path]
# write the checkpoint time
current_time = time.time()
self.write_data('checkpoint_time', current_time, path=path,
append=True)
# get the amount of time since the last checkpoint
checkpoint_times = history['checkpoint_time'][()]
if len(checkpoint_times) == 1:
# this is the first checkpoint, get the run time for comparison
lasttime = self.run_start_time
else:
lasttime = checkpoint_times[-2]
# if a resume happened since the last checkpoint, use the resume
# time instad
if lasttime < self.run_start_time:
lasttime = self.run_start_time
self.write_data('checkpoint_dt', current_time-lasttime, path=path,
append=True)
# write any sampler-specific history
self._update_sampler_history()
def _update_sampler_history(self):
"""Writes sampler-specific history to the file.
This function does nothing. Classes that inherit from it may override
it to add any extra information they would like written. This is
called by :py:func:`update_checkpoint_history`.
"""
pass
def validate(self):
"""Runs a validation test.
This checks that a samples group exist, and that there are more than
one sample stored to it.
Returns
-------
bool :
Whether or not the file is valid as a checkpoint file.
"""
try:
group = '{}/{}'.format(self.samples_group, self.variable_params[0])
checkpoint_valid = self[group].size != 0
except KeyError:
checkpoint_valid = False
return checkpoint_valid
| 5,298
| 34.804054
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/ptemcee.py
|
# Copyright (C) 2020 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides I/O support for ptemcee.
"""
from .base_sampler import BaseSamplerFile
from . import base_mcmc
from .base_mcmc import EnsembleMCMCMetadataIO
from .base_multitemper import (CommonMultiTemperedMetadataIO,
write_samples,
ensemble_read_raw_samples)
class PTEmceeFile(EnsembleMCMCMetadataIO, CommonMultiTemperedMetadataIO,
BaseSamplerFile):
"""Class to handle file IO for the ``ptemcee`` sampler."""
name = 'ptemcee_file'
# attributes for setting up an ensemble from file
_ensemble_attrs = ['jumps_proposed', 'jumps_accepted', 'swaps_proposed',
'swaps_accepted', 'logP', 'logl']
def write_sampler_metadata(self, sampler):
"""Adds writing ptemcee-specific metadata to MultiTemperedMCMCIO.
"""
super(PTEmceeFile, self).write_sampler_metadata(sampler)
group = self[self.sampler_group]
group.attrs["starting_betas"] = sampler.starting_betas
group.attrs["adaptive"] = sampler.adaptive
group.attrs["adaptation_lag"] = sampler.adaptation_lag
group.attrs["adaptation_time"] = sampler.adaptation_time
group.attrs["scale_factor"] = sampler.scale_factor
@property
def starting_betas(self):
"""The starting betas that were used."""
return self[self.sampler_group].attrs["starting_betas"]
def write_betas(self, betas, last_iteration=None):
"""Writes the betas to sampler group.
As the betas may change with iterations, this writes the betas as
a ntemps x niterations array to the file.
"""
# we'll use the single temperature write_samples to write the betas,
# so that we get the thinning settings
base_mcmc.write_samples(self, {'betas': betas},
last_iteration=last_iteration,
samples_group=self.sampler_group)
def read_betas(self, thin_start=None, thin_interval=None, thin_end=None,
iteration=None):
"""Reads betas from the file.
Parameters
-----------
thin_start : int, optional
Start reading from the given iteration. Default is to start from
the first iteration.
thin_interval : int, optional
Only read every ``thin_interval`` -th sample. Default is 1.
thin_end : int, optional
Stop reading at the given iteration. Default is to end at the last
iteration.
iteration : int, optional
Only read the given iteration. If this provided, it overrides
the ``thin_(start|interval|end)`` options.
Returns
-------
array
A ntemps x niterations array of the betas.
"""
slc = base_mcmc._ensemble_get_index(self, thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end,
iteration=iteration)
betas = self[self.sampler_group]['betas'][:]
return betas[:, slc]
def write_ensemble_attrs(self, ensemble):
"""Writes ensemble attributes necessary to restart from checkpoint.
Parameters
----------
ensemble : ptemcee.Ensemble
The ensemble to write attributes for.
"""
group = self[self.sampler_group]
for attr in self._ensemble_attrs:
vals = getattr(ensemble, attr)
try:
group[attr][:] = vals
except KeyError:
group[attr] = vals
def read_ensemble_attrs(self):
"""Reads ensemble attributes from the file.
Returns
-------
dict :
Dictionary of the ensemble attributes.
"""
group = self[self.sampler_group]
return {attr: group[attr][:] for attr in self._ensemble_attrs}
def write_samples(self, samples, **kwargs):
r"""Writes samples to the given file.
Calls :py:func:`base_multitemper.write_samples`. See that function for
details.
Parameters
----------
samples : dict
The samples to write. Each array in the dictionary should have
shape ntemps x nwalkers x niterations.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.write_samples`.
"""
write_samples(self, samples, **kwargs)
def read_raw_samples(self, fields, **kwargs):
r"""Base function for reading samples.
Calls :py:func:`base_multitemper.ensemble_read_raw_samples`. See that
function for details.
Parameters
----------
fields : list
The list of field names to retrieve.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_read_raw_samples`.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
return ensemble_read_raw_samples(self, fields, **kwargs)
| 5,947
| 35.944099
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/ultranest.py
|
# Copyright (C) 2019 Collin Capano, Sumit Kumar, Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides IO for the ultranest sampler.
"""
from .base_nested_sampler import BaseNestedSamplerFile
class UltranestFile(BaseNestedSamplerFile):
"""Class to handle file IO for the ``ultranest`` sampler."""
name = 'ultranest_file'
| 1,226
| 36.181818
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/base_mcmc.py
|
# Copyright (C) 2016 Christopher M. Biwer, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides I/O that is specific to MCMC samplers.
"""
import numpy
import argparse
class CommonMCMCMetadataIO(object):
"""Provides functions for reading/writing MCMC metadata to file.
The functions here are common to both standard MCMC (in which chains
are independent) and ensemble MCMC (in which chains/walkers share
information).
"""
def write_resume_point(self):
"""Keeps a list of the number of iterations that were in a file when a
run was resumed from a checkpoint."""
try:
resume_pts = self.attrs["resume_points"].tolist()
except KeyError:
resume_pts = []
try:
niterations = self.niterations
except KeyError:
niterations = 0
resume_pts.append(niterations)
self.attrs["resume_points"] = resume_pts
def write_niterations(self, niterations):
"""Writes the given number of iterations to the sampler group."""
self[self.sampler_group].attrs['niterations'] = niterations
@property
def niterations(self):
"""Returns the number of iterations the sampler was run for."""
return self[self.sampler_group].attrs['niterations']
@property
def nwalkers(self):
"""Returns the number of walkers used by the sampler.
Alias of ``nchains``.
"""
try:
return self[self.sampler_group].attrs['nwalkers']
except KeyError:
return self[self.sampler_group].attrs['nchains']
@property
def nchains(self):
"""Returns the number of chains used by the sampler.
Alias of ``nwalkers``.
"""
try:
return self[self.sampler_group].attrs['nchains']
except KeyError:
return self[self.sampler_group].attrs['nwalkers']
def _thin_data(self, group, params, thin_interval):
"""Thins data on disk by the given interval.
This makes no effort to record the thinning interval that is applied.
Parameters
----------
group : str
The group where the datasets to thin live.
params : list
The list of dataset names to thin.
thin_interval : int
The interval to thin the samples on disk by.
"""
samples = self.read_raw_samples(params, thin_start=0,
thin_interval=thin_interval,
thin_end=None, flatten=False,
group=group)
# now resize and write the data back to disk
fpgroup = self[group]
for param in params:
data = samples[param]
# resize the arrays on disk
fpgroup[param].resize(data.shape)
# and write
fpgroup[param][:] = data
def thin(self, thin_interval):
"""Thins the samples on disk to the given thinning interval.
The interval must be a multiple of the file's current ``thinned_by``.
Parameters
----------
thin_interval : int
The interval the samples on disk should be thinned by.
"""
# get the new interval to thin by
new_interval = thin_interval / self.thinned_by
if new_interval % 1:
raise ValueError("thin interval ({}) must be a multiple of the "
"current thinned_by ({})"
.format(thin_interval, self.thinned_by))
new_interval = int(new_interval)
# now thin the data on disk
params = list(self[self.samples_group].keys())
self._thin_data(self.samples_group, params, new_interval)
# store the interval that samples were thinned by
self.thinned_by = thin_interval
@property
def thinned_by(self):
"""Returns interval samples have been thinned by on disk.
This looks for ``thinned_by`` in the samples group attrs. If none is
found, will just return 1.
"""
try:
thinned_by = self.attrs['thinned_by']
except KeyError:
thinned_by = 1
return thinned_by
@thinned_by.setter
def thinned_by(self, thinned_by):
"""Sets the thinned_by attribute.
This is the interval that samples have been thinned by on disk. The
given value is written to
``self[self.samples_group].attrs['thinned_by']``.
"""
self.attrs['thinned_by'] = int(thinned_by)
def last_iteration(self, parameter=None, group=None):
"""Returns the iteration of the last sample of the given parameter.
Parameters
----------
parameter : str, optional
The name of the parameter to get the last iteration for. If
None provided, will just use the first parameter in ``group``.
group : str, optional
The name of the group to get the last iteration from. Default is
the ``samples_group``.
"""
if group is None:
group = self.samples_group
if parameter is None:
try:
parameter = list(self[group].keys())[0]
except (IndexError, KeyError):
# nothing has been written yet, just return 0
return 0
try:
lastiter = self[group][parameter].shape[-1]
except KeyError:
# no samples have been written, just return 0
lastiter = 0
# account for thinning
return lastiter * self.thinned_by
def iterations(self, parameter):
"""Returns the iteration each sample occurred at."""
return numpy.arange(0, self.last_iteration(parameter), self.thinned_by)
def write_sampler_metadata(self, sampler):
"""Writes the sampler's metadata."""
self.attrs['sampler'] = sampler.name
try:
self[self.sampler_group].attrs['nchains'] = sampler.nchains
except ValueError:
self[self.sampler_group].attrs['nwalkers'] = sampler.nwalkers
# write the model's metadata
sampler.model.write_metadata(self)
@property
def is_burned_in(self):
"""Returns whether or not chains are burned in.
Raises a ``ValueError`` if no burn in tests were done.
"""
try:
return self[self.sampler_group]['is_burned_in'][()]
except KeyError:
raise ValueError("No burn in tests were performed")
@property
def burn_in_iteration(self):
"""Returns the burn in iteration of all the chains.
Raises a ``ValueError`` if no burn in tests were done.
"""
try:
return self[self.sampler_group]['burn_in_iteration'][()]
except KeyError:
raise ValueError("No burn in tests were performed")
@property
def burn_in_index(self):
"""Returns the burn in index.
This is the burn in iteration divided by the file's ``thinned_by``.
Requires the class that this is used with has a ``burn_in_iteration``
attribute.
"""
return self.burn_in_iteration // self.thinned_by
@property
def act(self):
"""The autocorrelation time (ACT).
This is the ACL times the file's thinned by. Raises a ``ValueError``
if the ACT has not been calculated.
"""
try:
return self[self.sampler_group]['act'][()]
except KeyError:
raise ValueError("ACT has not been calculated")
@act.setter
def act(self, act):
"""Writes the autocorrelation time(s).
ACT(s) are written to the ``sample_group`` as a dataset with name
``act``.
Parameters
----------
act : array or int
ACT(s) to write.
"""
# pylint: disable=no-member
self.write_data('act', act, path=self.sampler_group)
@property
def raw_acts(self):
"""Dictionary of parameter names -> raw autocorrelation time(s).
Depending on the sampler, the autocorrelation times may be floats,
or [ntemps x] [nchains x] arrays.
Raises a ``ValueError`` is no raw acts have been set.
"""
try:
group = self[self.sampler_group]['raw_acts']
except KeyError:
raise ValueError("ACTs have not been calculated")
acts = {}
for param in group:
acts[param] = group[param][()]
return acts
@raw_acts.setter
def raw_acts(self, acts):
"""Writes the raw autocorrelation times.
The ACT of each parameter is saved to
``[sampler_group]/raw_acts/{param}']``. Works for all types of MCMC
samplers (independent chains, ensemble, parallel tempering).
Parameters
----------
acts : dict
A dictionary of ACTs keyed by the parameter.
"""
path = self.sampler_group + '/raw_acts'
for param in acts:
self.write_data(param, acts[param], path=path)
@property
def acl(self):
"""The autocorrelation length (ACL) of the samples.
This is the autocorrelation time (ACT) divided by the file's
``thinned_by`` attribute. Raises a ``ValueError`` if the ACT has not
been calculated.
"""
return self.act / self.thinned_by
@acl.setter
def acl(self, acl):
"""Sets the autocorrelation length (ACL) of the samples.
This will convert the given value(s) to autocorrelation time(s) and
save to the ``act`` attribute; see that attribute for details.
"""
self.act = acl * self.thinned_by
@property
def raw_acls(self):
"""Dictionary of parameter names -> raw autocorrelation length(s).
Depending on the sampler, the autocorrelation lengths may be floats,
or [ntemps x] [nchains x] arrays.
The ACLs are the autocorrelation times (ACT) divided by the file's
``thinned_by`` attribute. Raises a ``ValueError`` is no raw acts have
been set.
"""
return {p: self.raw_acts[p] / self.thinned_by for p in self.raw_acts}
@raw_acls.setter
def raw_acls(self, acls):
"""Sets the raw autocorrelation lengths.
The given ACLs are converted to autocorrelation times (ACTs) and saved
to the ``raw_acts`` attribute; see that attribute for details.
Parameters
----------
acls : dict
A dictionary of ACLs keyed by the parameter.
"""
self.raw_acts = {p: acls[p] * self.thinned_by for p in acls}
def _update_sampler_history(self):
"""Writes the number of iterations, effective number of samples,
autocorrelation times, and burn-in iteration to the history.
"""
path = '/'.join([self.sampler_group, 'checkpoint_history'])
# write the current number of iterations
self.write_data('niterations', self.niterations, path=path,
append=True)
self.write_data('effective_nsamples', self.effective_nsamples,
path=path, append=True)
# write the act: we'll make sure that this is 2D, so that the acts
# can be appened along the last dimension
try:
act = self.act
except ValueError:
# no acts were calculate
act = None
if act is not None:
act = act.reshape(tuple(list(act.shape)+[1]))
self.write_data('act', act, path=path, append=True)
# write the burn in iteration in the same way
try:
burn_in = self.burn_in_iteration
except ValueError:
# no burn in tests were done
burn_in = None
if burn_in is not None:
burn_in = burn_in.reshape(tuple(list(burn_in.shape)+[1]))
self.write_data('burn_in_iteration', burn_in, path=path,
append=True)
@staticmethod
def extra_args_parser(parser=None, skip_args=None, **kwargs):
"""Create a parser to parse sampler-specific arguments for loading
samples.
Parameters
----------
parser : argparse.ArgumentParser, optional
Instead of creating a parser, add arguments to the given one. If
none provided, will create one.
skip_args : list, optional
Don't parse the given options. Options should be given as the
option string, minus the '--'. For example,
``skip_args=['iteration']`` would cause the ``--iteration``
argument not to be included.
\**kwargs :
All other keyword arguments are passed to the parser that is
created.
Returns
-------
parser : argparse.ArgumentParser
An argument parser with th extra arguments added.
actions : list of argparse.Action
A list of the actions that were added.
"""
if parser is None:
parser = argparse.ArgumentParser(**kwargs)
elif kwargs:
raise ValueError("No other keyword arguments should be provded if "
"a parser is provided.")
if skip_args is None:
skip_args = []
actions = []
if 'thin-start' not in skip_args:
act = parser.add_argument(
"--thin-start", type=int, default=None,
help="Sample number to start collecting samples. If "
"none provided, will use the input file's `thin_start` "
"attribute.")
actions.append(act)
if 'thin-interval' not in skip_args:
act = parser.add_argument(
"--thin-interval", type=int, default=None,
help="Interval to use for thinning samples. If none provided, "
"will use the input file's `thin_interval` attribute.")
actions.append(act)
if 'thin-end' not in skip_args:
act = parser.add_argument(
"--thin-end", type=int, default=None,
help="Sample number to stop collecting samples. If "
"none provided, will use the input file's `thin_end` "
"attribute.")
actions.append(act)
if 'iteration' not in skip_args:
act = parser.add_argument(
"--iteration", type=int, default=None,
help="Only retrieve the given iteration. To load "
"the last n-th sampe use -n, e.g., -1 will "
"load the last iteration. This overrides "
"the thin-start/interval/end options.")
actions.append(act)
if 'walkers' not in skip_args and 'chains' not in skip_args:
act = parser.add_argument(
"--walkers", "--chains", type=int, nargs="+", default=None,
help="Only retrieve samples from the listed "
"walkers/chains. Default is to retrieve from all "
"walkers/chains.")
actions.append(act)
return parser, actions
class MCMCMetadataIO(object):
"""Provides functions for reading/writing metadata to file for MCMCs in
which all chains are independent of each other.
Overrides the ``BaseInference`` file's ``thin_start`` and ``thin_interval``
attributes. Instead of integers, these return arrays.
"""
@property
def thin_start(self):
"""Returns the default thin start to use for reading samples.
If burn-in tests were done, this will return the burn-in index of every
chain that has burned in. The start index for chains that have not
burned in will be greater than the number of samples, so that those
chains return no samples. If no burn-in tests were done, returns 0
for all chains.
"""
# pylint: disable=no-member
try:
thin_start = self.burn_in_index
# replace any that have not been burned in with the number
# of iterations; this will cause those chains to not return
# any samples
thin_start[~self.is_burned_in] = \
int(numpy.ceil(self.niterations/self.thinned_by))
return thin_start
except ValueError:
# no burn in, just return array of zeros
return numpy.zeros(self.nchains, dtype=int)
@property
def thin_interval(self):
"""Returns the default thin interval to use for reading samples.
If a finite ACL exists in the file, will return that. Otherwise,
returns 1.
"""
try:
acl = self.acl
except ValueError:
return numpy.ones(self.nchains, dtype=int)
# replace any infs with the number of samples
acl[numpy.isinf(acl)] = self.niterations / self.thinned_by
return numpy.ceil(acl).astype(int)
class EnsembleMCMCMetadataIO(object):
"""Provides functions for reading/writing metadata to file for ensemble
MCMCs.
"""
@property
def thin_start(self):
"""Returns the default thin start to use for reading samples.
If burn-in tests were done, returns the burn in index. Otherwise,
returns 0.
"""
try:
return self.burn_in_index
except ValueError:
# no burn in, just return 0
return 0
@property
def thin_interval(self):
"""Returns the default thin interval to use for reading samples.
If a finite ACL exists in the file, will return that. Otherwise,
returns 1.
"""
try:
acl = self.acl
except ValueError:
acl = 1
if numpy.isfinite(acl):
acl = int(numpy.ceil(acl))
else:
acl = 1
return acl
def write_samples(fp, samples, parameters=None, last_iteration=None,
samples_group=None, thin_by=None):
"""Writes samples to the given file.
This works for both standard MCMC and ensemble MCMC samplers without
parallel tempering.
Results are written to ``samples_group/{vararg}``, where ``{vararg}``
is the name of a model params. The samples are written as an
``nwalkers x niterations`` array. If samples already exist, the new
samples are appended to the current.
If the current samples on disk have been thinned (determined by the
``thinned_by`` attribute in the samples group), then the samples will
be thinned by the same amount before being written. The thinning is
started at the sample in ``samples`` that occured at the iteration
equal to the last iteration on disk plus the ``thinned_by`` interval.
If this iteration is larger than the iteration of the last given
sample, then none of the samples will be written.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to write files to. Must be an instance of
BaseInferenceFile with CommonMCMCMetadataIO methods added.
samples : dict
The samples to write. Each array in the dictionary should have
shape nwalkers x niterations.
parameters : list, optional
Only write the specified parameters to the file. If None, will
write all of the keys in the ``samples`` dict.
last_iteration : int, optional
The iteration of the last sample. If the file's ``thinned_by``
attribute is > 1, this is needed to determine where to start
thinning the samples such that the interval between the last sample
currently on disk and the first new sample is the same as all of
the other samples.
samples_group : str, optional
Which group to write the samples to. Default (None) will result
in writing to "samples".
thin_by : int, optional
Override the ``thinned_by`` attribute in the file with the given
value. **Only set this if you are using this function to write
something other than inference samples!**
"""
nwalkers, nsamples = list(samples.values())[0].shape
assert all(p.shape == (nwalkers, nsamples)
for p in samples.values()), (
"all samples must have the same shape")
if samples_group is None:
samples_group = fp.samples_group
if parameters is None:
parameters = samples.keys()
# thin the samples
samples = thin_samples_for_writing(fp, samples, parameters,
last_iteration, samples_group,
thin_by=thin_by)
# loop over number of dimensions
group = samples_group + '/{name}'
for param in parameters:
dataset_name = group.format(name=param)
data = samples[param]
# check that there's something to write after thinning
if data.shape[1] == 0:
# nothing to write, move along
continue
try:
fp_nsamples = fp[dataset_name].shape[-1]
istart = fp_nsamples
istop = istart + data.shape[1]
if istop > fp_nsamples:
# resize the dataset
fp[dataset_name].resize(istop, axis=1)
except KeyError:
# dataset doesn't exist yet
istart = 0
istop = istart + data.shape[1]
fp.create_dataset(dataset_name, (nwalkers, istop),
maxshape=(nwalkers, None),
dtype=data.dtype,
fletcher32=True)
fp[dataset_name][:, istart:istop] = data
def ensemble_read_raw_samples(fp, fields, thin_start=None,
thin_interval=None, thin_end=None,
iteration=None, walkers=None, flatten=True,
group=None):
"""Base function for reading samples from ensemble MCMC files without
parallel tempering.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to write files to. Must be an instance of
BaseInferenceFile with EnsembleMCMCMetadataIO methods added.
fields : list
The list of field names to retrieve.
thin_start : int, optional
Start reading from the given iteration. Default is to start from
the first iteration.
thin_interval : int, optional
Only read every ``thin_interval`` -th sample. Default is 1.
thin_end : int, optional
Stop reading at the given iteration. Default is to end at the last
iteration.
iteration : int, optional
Only read the given iteration. If this provided, it overrides
the ``thin_(start|interval|end)`` options.
walkers : (list of) int, optional
Only read from the given walkers. Default (``None``) is to read all.
flatten : bool, optional
Flatten the samples to 1D arrays before returning. Otherwise, the
returned arrays will have shape (requested walkers x
requested iteration(s)). Default is True.
group : str, optional
The name of the group to read sample datasets from. Default is
the file's ``samples_group``.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
if isinstance(fields, str):
fields = [fields]
# walkers to load
widx, nwalkers = _ensemble_get_walker_index(fp, walkers)
# get the slice to use
get_index = _ensemble_get_index(fp, thin_start, thin_interval, thin_end,
iteration)
# load
if group is None:
group = fp.samples_group
group = group + '/{name}'
arrays = {}
for name in fields:
arr = fp[group.format(name=name)][widx, get_index]
niterations = arr.shape[-1] if iteration is None else 1
if flatten:
arr = arr.flatten()
else:
# ensure that the returned array is 2D
arr = arr.reshape((nwalkers, niterations))
arrays[name] = arr
return arrays
def _ensemble_get_walker_index(fp, walkers=None):
"""Convenience function to determine which walkers to load.
Parameters
----------
fp : BaseInferenceFile
Open file handler to write files to. Must be an instance of
BaseInferenceFile with EnsembleMCMCMetadataIO methods added.
walkers : (list of) int, optional
Only read from the given walkers. Default (``None``) is to read all.
Returns
-------
widx : array or slice
The walker indices to load.
nwalkers : int
The number of walkers that will be loaded.
"""
if walkers is not None:
widx = numpy.zeros(fp.nwalkers, dtype=bool)
widx[walkers] = True
nwalkers = widx.sum()
else:
widx = slice(None, None)
nwalkers = fp.nwalkers
return widx, nwalkers
def _ensemble_get_index(fp, thin_start=None, thin_interval=None, thin_end=None,
iteration=None):
"""Determines the sample indices to retrieve for an ensemble MCMC.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to write files to. Must be an instance of
BaseInferenceFile with EnsembleMCMCMetadataIO methods added.
thin_start : int, optional
Start reading from the given iteration. Default is to start from
the first iteration.
thin_interval : int, optional
Only read every ``thin_interval`` -th sample. Default is 1.
thin_end : int, optional
Stop reading at the given iteration. Default is to end at the last
iteration.
iteration : int, optional
Only read the given iteration. If this provided, it overrides
the ``thin_(start|interval|end)`` options.
Returns
-------
slice or int
The indices to retrieve.
"""
if iteration is not None:
get_index = int(iteration)
else:
if thin_start is None:
thin_start = fp.thin_start
if thin_interval is None:
thin_interval = fp.thin_interval
if thin_end is None:
thin_end = fp.thin_end
get_index = fp.get_slice(thin_start=thin_start,
thin_interval=thin_interval,
thin_end=thin_end)
return get_index
def _get_index(fp, chains, thin_start=None, thin_interval=None, thin_end=None,
iteration=None):
"""Determines the sample indices to retrieve for an MCMC with independent
chains.
Parameters
-----------
fp : BaseInferenceFile
Open file handler to read samples from. Must be an instance of
BaseInferenceFile with EnsembleMCMCMetadataIO methods added.
chains : array of int
The chains to load.
thin_start : array or int, optional
Start reading from the given sample. May either provide an array
indicating the start index for each chain, or an integer. If the
former, the array must have the same length as the number of chains
that will be retrieved. If the latter, the given value will be used
for all chains. Default (None) is to use the file's ``thin_start``
attribute.
thin_interval : array or int, optional
Only read every ``thin_interval``-th sample. May either provide an
array indicating the interval to use for each chain, or an integer. If
the former, the array must have the same length as the number of chains
that will be retrieved. If the latter, the given value will be used for
all chains. Default (None) is to use the file's ``thin_interval``
attribute.
thin_end : array or int, optional
Stop reading at the given sample index. May either provide an
array indicating the end index to use for each chain, or an integer. If
the former, the array must have the same length as the number of chains
that will be retrieved. If the latter, the given value will be used for
all chains. Default (None) is to use the the file's ``thin_end``
attribute.
iteration : int, optional
Only read the given iteration from all chains. If provided, it
overrides the ``thin_(start|interval|end)`` options.
Returns
-------
get_index : list of slice or int
The indices to retrieve.
"""
nchains = len(chains)
# convenience function to get the right thin start/interval/end
if iteration is not None:
get_index = [int(iteration)]*nchains
else:
# get the slice arguments
thin_start = _format_slice_arg(thin_start, fp.thin_start, chains)
thin_interval = _format_slice_arg(thin_interval, fp.thin_interval,
chains)
thin_end = _format_slice_arg(thin_end, fp.thin_end, chains)
# the slices to use for each chain
get_index = [fp.get_slice(thin_start=thin_start[ci],
thin_interval=thin_interval[ci],
thin_end=thin_end[ci])
for ci in range(nchains)]
return get_index
def _format_slice_arg(value, default, chains):
"""Formats a start/interval/end argument for picking out chains.
Parameters
----------
value : None, int, array or list of int
The thin-start/interval/end value to format. ``None`` indicates the
user did not specify anything, in which case ``default`` will be used.
If an integer, then it will be repeated to match the length of
``chains```. If an array or list, it must have the same length as
``chains``.
default : array
What to use instead if ``value`` is ``None``.
chains : array of int
The index values of chains that will be loaded.
Returns
-------
array
Array giving the value to use for each chain in ``chains``. The array
will have the same length as ``chains``.
"""
if value is None and default is None:
# no value provided, and default is None, just return Nones with the
# same length as chains
value = [None]*len(chains)
elif value is None:
# use the default, with the desired values extracted
value = default[chains]
elif isinstance(value, (int, numpy.int_)):
# a single integer was provided, repeat into an array
value = numpy.repeat(value, len(chains))
elif len(value) != len(chains):
# a list of values was provided, but the length does not match the
# chains, raise an error
raise ValueError("Number of requested thin-start/interval/end values "
"({}) does not match number of requested chains ({})"
.format(len(value), len(chains)))
return value
def thin_samples_for_writing(fp, samples, parameters, last_iteration,
group, thin_by=None):
"""Thins samples for writing to disk.
The thinning interval to use is determined by the given file handler's
``thinned_by`` attribute. If that attribute is 1, just returns the samples.
Parameters
----------
fp : CommonMCMCMetadataIO instance
The file the sampels will be written to. Needed to determine the
thin interval used on disk.
samples : dict
Dictionary mapping parameter names to arrays of (unthinned) samples.
The arrays are thinned along their last dimension.
parameters : list of str
The parameters to thin in ``samples`` before writing. All listed
parameters must be in ``samples``.
last_iteration : int
The iteration that the last sample in ``samples`` occurred at. This is
needed to figure out where to start the thinning in ``samples``, such
that the interval between the last sample on disk and the first new
sample is the same as all of the other samples.
group : str
The name of the group that the samples will be written to. This is
needed to determine what the last iteration saved on disk was.
thin_by : int, optional
Override the ``thinned_by`` attribute in the file for with the given
value. **Only do this if you are thinning something other than
inference samples!**
Returns
-------
dict :
Dictionary of the thinned samples to write.
"""
if thin_by is None:
thin_by = fp.thinned_by
if thin_by > 1:
if last_iteration is None:
raise ValueError("File's thinned_by attribute is > 1 ({}), "
"but last_iteration not provided."
.format(thin_by))
thinned_samples = {}
for param in parameters:
data = samples[param]
nsamples = data.shape[-1]
# To figure out where to start:
# the last iteration in the file + the file's thinning interval
# gives the iteration of the next sample that should be written;
# last_iteration - nsamples gives the iteration of the first
# sample in samples. Subtracting the latter from the former - 1
# (-1 to convert from iteration to index) therefore gives the index
# in the samples data to start using samples.
thin_start = fp.last_iteration(param, group) + thin_by \
- (last_iteration - nsamples) - 1
thinned_samples[param] = data[..., thin_start::thin_by]
else:
thinned_samples = samples
return thinned_samples
def nsamples_in_chain(start_iter, interval, niterations):
"""Calculates the number of samples in an MCMC chain given a thinning
start, end, and interval.
This function will work with either python scalars, or numpy arrays.
Parameters
----------
start_iter : (array of) int
Start iteration. If negative, will count as being how many iterations
to start before the end; otherwise, counts how many iterations to
start before the beginning. If this is larger than niterations, will
just return 0.
interval : (array of) int
Thinning interval.
niterations : (array of) int
The number of iterations.
Returns
-------
num_samples : (array of) numpy.int
The number of samples in a chain, >= 0.
"""
# this is written in a slightly wonky way so that it will work with either
# python scalars or numpy arrays; it is equivalent to:
# if start_iter < 0:
# count = min(abs(start_iter), niterations)
# else:
# count = max(niterations - start_iter, 0)
slt0 = start_iter < 0
sgt0 = start_iter >= 0
count = slt0*abs(start_iter) + sgt0*(niterations - start_iter)
# ensure count is in [0, niterations]
cgtn = count > niterations
cok = (count >= 0) & (count <= niterations)
count = cgtn*niterations + cok*count
return numpy.ceil(count / interval).astype(int)
| 36,040
| 37.341489
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/emcee.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides IO for the emcee sampler.
"""
import numpy
from .base_sampler import BaseSamplerFile
from .base_mcmc import (EnsembleMCMCMetadataIO, CommonMCMCMetadataIO,
write_samples, ensemble_read_raw_samples)
class EmceeFile(EnsembleMCMCMetadataIO, CommonMCMCMetadataIO, BaseSamplerFile):
"""Class to handle file IO for the ``emcee`` sampler."""
name = 'emcee_file'
def write_samples(self, samples, **kwargs):
r"""Writes samples to the given file.
Calls :py:func:`base_mcmc.write_samples`. See that function for
details.
Parameters
----------
samples : dict
The samples to write. Each array in the dictionary should have
shape nwalkers x niterations.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_mcmc.write_samples`.
"""
write_samples(self, samples, **kwargs)
def read_raw_samples(self, fields, **kwargs):
r"""Base function for reading samples.
Calls :py:func:`base_mcmc.ensemble_read_raw_samples`. See that function
for details.
Parameters
-----------
fields : list
The list of field names to retrieve.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_mcmc.ensemble_read_raw_samples`.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
return ensemble_read_raw_samples(self, fields, **kwargs)
def read_acceptance_fraction(self, walkers=None):
"""Reads the acceptance fraction.
Parameters
-----------
walkers : (list of) int, optional
The walker index (or a list of indices) to retrieve. If None,
samples from all walkers will be obtained.
Returns
-------
array
Array of acceptance fractions with shape (requested walkers,).
"""
group = self.sampler_group + '/acceptance_fraction'
if walkers is None:
wmask = numpy.ones(self.nwalkers, dtype=bool)
else:
wmask = numpy.zeros(self.nwalkers, dtype=bool)
wmask[walkers] = True
return self[group][wmask]
def write_acceptance_fraction(self, acceptance_fraction):
"""Write acceptance_fraction data to file. Results are written to
the ``[sampler_group]/acceptance_fraction``.
Parameters
-----------
acceptance_fraction : numpy.ndarray
Array of acceptance fractions to write.
"""
group = self.sampler_group + '/acceptance_fraction'
try:
self[group][:] = acceptance_fraction
except KeyError:
# dataset doesn't exist yet, create it
self[group] = acceptance_fraction
| 3,848
| 33.061947
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/__init__.py
|
# Copyright (C) 2018 Collin Capano
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""I/O utilities for pycbc inference
"""
import os
import argparse
import shutil
import textwrap
import numpy
import logging
import h5py as _h5py
from pycbc.io.record import (FieldArray, _numpy_function_lib)
from pycbc import waveform as _waveform
from pycbc.io.hdf import (dump_state, load_state)
from pycbc.inference.option_utils import (ParseLabelArg, ParseParametersArg)
from .emcee import EmceeFile
from .emcee_pt import EmceePTFile
from .ptemcee import PTEmceeFile
from .cpnest import CPNestFile
from .multinest import MultinestFile
from .dynesty import DynestyFile
from .ultranest import UltranestFile
from .posterior import PosteriorFile
from .txt import InferenceTXTFile
filetypes = {
EmceeFile.name: EmceeFile,
EmceePTFile.name: EmceePTFile,
PTEmceeFile.name: PTEmceeFile,
CPNestFile.name: CPNestFile,
MultinestFile.name: MultinestFile,
DynestyFile.name: DynestyFile,
PosteriorFile.name: PosteriorFile,
UltranestFile.name: UltranestFile,
}
try:
from .epsie import EpsieFile
filetypes[EpsieFile.name] = EpsieFile
except ImportError:
pass
def get_file_type(filename):
""" Returns I/O object to use for file.
Parameters
----------
filename : str
Name of file.
Returns
-------
file_type : {InferenceFile, InferenceTXTFile}
The type of inference file object to use.
"""
txt_extensions = [".txt", ".dat", ".csv"]
hdf_extensions = [".hdf", ".h5", ".bkup", ".checkpoint"]
for ext in hdf_extensions:
if filename.endswith(ext):
with _h5py.File(filename, 'r') as fp:
filetype = fp.attrs['filetype']
try:
filetype = str(filetype.decode())
except AttributeError:
pass
return filetypes[filetype]
for ext in txt_extensions:
if filename.endswith(ext):
return InferenceTXTFile
raise TypeError("Extension is not supported.")
def loadfile(path, mode=None, filetype=None, **kwargs):
"""Loads the given file using the appropriate InferenceFile class.
If ``filetype`` is not provided, this will try to retreive the ``filetype``
from the file's ``attrs``. If the file does not exist yet, an IOError will
be raised if ``filetype`` is not provided.
Parameters
----------
path : str
The filename to load.
mode : str, optional
What mode to load the file with, e.g., 'w' for write, 'r' for read,
'a' for append. Default will default to h5py.File's mode, which is 'a'.
filetype : str, optional
Force the file to be loaded with the given class name. This must be
provided if creating a new file.
Returns
-------
filetype instance
An open file handler to the file. The class used for IO with the file
is determined by the ``filetype`` keyword (if provided) or the
``filetype`` stored in the file (if not provided).
"""
if filetype is None:
# try to read the file to get its filetype
try:
fileclass = get_file_type(path)
except IOError:
# file doesn't exist, filetype must be provided
raise IOError("The file appears not to exist. In this case, "
"filetype must be provided.")
else:
fileclass = filetypes[filetype]
return fileclass(path, mode=mode, **kwargs)
#
# =============================================================================
#
# HDF Utilities
#
# =============================================================================
#
def check_integrity(filename):
"""Checks the integrity of an InferenceFile.
Checks done are:
* can the file open?
* do all of the datasets in the samples group have the same shape?
* can the first and last sample in all of the datasets in the samples
group be read?
If any of these checks fail, an IOError is raised.
Parameters
----------
filename: str
Name of an InferenceFile to check.
Raises
------
ValueError
If the given file does not exist.
KeyError
If the samples group does not exist.
IOError
If any of the checks fail.
"""
# check that the file exists
if not os.path.exists(filename):
raise ValueError("file {} does not exist".format(filename))
# if the file is corrupted such that it cannot be opened, the next line
# will raise an IOError
with loadfile(filename, 'r') as fp:
# check that all datasets in samples have the same shape
parameters = list(fp[fp.samples_group].keys())
# but only do the check if parameters have been written
if len(parameters) > 0:
group = fp.samples_group + '/{}'
# use the first parameter as a reference shape
ref_shape = fp[group.format(parameters[0])].shape
if not all(fp[group.format(param)].shape == ref_shape
for param in parameters):
raise IOError("not all datasets in the samples group have "
"the same shape")
# check that we can read the first/last sample
firstidx = tuple([0]*len(ref_shape))
lastidx = tuple([-1]*len(ref_shape))
for param in parameters:
_ = fp[group.format(param)][firstidx]
_ = fp[group.format(param)][lastidx]
def validate_checkpoint_files(checkpoint_file, backup_file,
check_nsamples=True):
"""Checks if the given checkpoint and/or backup files are valid.
The checkpoint file is considered valid if:
* it passes all tests run by ``check_integrity``;
* it has at least one sample written to it (indicating at least one
checkpoint has happened).
The same applies to the backup file. The backup file must also have the
same number of samples as the checkpoint file, otherwise, the backup is
considered invalid.
If the checkpoint (backup) file is found to be valid, but the backup
(checkpoint) file is not valid, then the checkpoint (backup) is copied to
the backup (checkpoint). Thus, this function ensures that checkpoint and
backup files are either both valid or both invalid.
Parameters
----------
checkpoint_file : string
Name of the checkpoint file.
backup_file : string
Name of the backup file.
Returns
-------
checkpoint_valid : bool
Whether or not the checkpoint (and backup) file may be used for loading
samples.
"""
# check if checkpoint file exists and is valid
try:
check_integrity(checkpoint_file)
checkpoint_valid = True
except (ValueError, KeyError, IOError):
checkpoint_valid = False
# backup file
try:
check_integrity(backup_file)
backup_valid = True
except (ValueError, KeyError, IOError):
backup_valid = False
# since we can open the file, run self diagnostics
if checkpoint_valid:
with loadfile(checkpoint_file, 'r') as fp:
checkpoint_valid = fp.validate()
if backup_valid:
with loadfile(backup_file, 'r') as fp:
backup_valid = fp.validate()
if check_nsamples:
# This check is not required by nested samplers
# check that the checkpoint and backup have the same number of samples;
# if not, assume the checkpoint has the correct number
if checkpoint_valid and backup_valid:
with loadfile(checkpoint_file, 'r') as fp:
group = list(fp[fp.samples_group].keys())[0]
nsamples = fp[fp.samples_group][group].size
with loadfile(backup_file, 'r') as fp:
group = list(fp[fp.samples_group].keys())[0]
backup_nsamples = fp[fp.samples_group][group].size
backup_valid = nsamples == backup_nsamples
# decide what to do based on the files' statuses
if checkpoint_valid and not backup_valid:
# copy the checkpoint to the backup
logging.info("Backup invalid; copying checkpoint file")
shutil.copy(checkpoint_file, backup_file)
backup_valid = True
elif backup_valid and not checkpoint_valid:
logging.info("Checkpoint invalid; copying backup file")
# copy the backup to the checkpoint
shutil.copy(backup_file, checkpoint_file)
checkpoint_valid = True
return checkpoint_valid
#
# =============================================================================
#
# Command-line Utilities
#
# =============================================================================
#
def get_common_parameters(input_files, collection=None):
"""Gets a list of variable params that are common across all input files.
If no common parameters are found, a ``ValueError`` is raised.
Parameters
----------
input_files : list of str
List of input files to load.
collection : str, optional
What group of parameters to load. Can be the name of a list of
parameters stored in the files' attrs (e.g., "variable_params"), or
"all". If "all", will load all of the parameters in the files'
samples group. Default is to load all.
Returns
-------
list :
List of the parameter names.
"""
if collection is None:
collection = "all"
parameters = []
for fn in input_files:
fp = loadfile(fn, 'r')
if collection == 'all':
ps = fp[fp.samples_group].keys()
else:
ps = fp.attrs[collection]
parameters.append(set(ps))
fp.close()
parameters = list(set.intersection(*parameters))
if parameters == []:
raise ValueError("no common parameters found for collection {} in "
"files {}".format(collection, ', '.join(input_files)))
# if using python 3 to read a file created in python 2, need to convert
# parameters to strs
try:
parameters = [p.decode() for p in parameters]
except AttributeError:
pass
return parameters
class NoInputFileError(Exception):
"""Raised in custom argparse Actions by arguments needing input-files when
no file(s) were provided."""
pass
class PrintFileParams(argparse.Action):
"""Argparse action that will load input files and print possible parameters
to screen. Once this is done, the program is forced to exit immediately.
The behvior is similar to --help, except that the input-file is read.
.. note::
The ``input_file`` attribute must be set in the parser namespace before
this action is called. Otherwise, a ``NoInputFileError`` is raised.
"""
def __init__(self, skip_args=None, nargs=0, **kwargs):
if nargs != 0:
raise ValueError("nargs for this action must be 0")
super(PrintFileParams, self).__init__(nargs=nargs, **kwargs)
self.skip_args = skip_args
def __call__(self, parser, namespace, values, option_string=None):
# get the input file(s)
input_files = namespace.input_file
if input_files is None:
# see if we should raise an error
try:
raise_err = not parser.no_input_file_err
except AttributeError:
raise_err = True
if raise_err:
raise NoInputFileError("must provide at least one input file")
else:
# just return to stop further processing
return
filesbytype = {}
fileparsers = {}
for fn in input_files:
fp = loadfile(fn, 'r')
try:
filesbytype[fp.name].append(fn)
except KeyError:
filesbytype[fp.name] = [fn]
# get any extra options
fileparsers[fp.name], _ = fp.extra_args_parser(
skip_args=self.skip_args, add_help=False)
fp.close()
# now print information about the intersection of all parameters
parameters = get_common_parameters(input_files, collection='all')
print("\n"+textwrap.fill("Parameters available with this (these) "
"input file(s):"), end="\n\n")
print(textwrap.fill(' '.join(sorted(parameters))),
end="\n\n")
# information about the pycbc functions
pfuncs = sorted(FieldArray.functionlib.fget(FieldArray).keys())
print(textwrap.fill("Available pycbc functions (see "
"http://pycbc.org/pycbc/latest/html for "
"more details):"), end="\n\n")
print(textwrap.fill(', '.join(pfuncs)), end="\n\n")
# numpy funcs
npfuncs = sorted([name for (name, obj) in _numpy_function_lib.items()
if isinstance(obj, numpy.ufunc)])
print(textwrap.fill("Available numpy functions:"),
end="\n\n")
print(textwrap.fill(', '.join(npfuncs)), end="\n\n")
# misc
consts = "e euler_gamma inf nan pi"
print(textwrap.fill("Recognized constants:"),
end="\n\n")
print(consts, end="\n\n")
print(textwrap.fill("Python arthimetic (+ - * / // ** %), "
"binary (&, |, etc.), and comparison (>, <, >=, "
"etc.) operators may also be used."), end="\n\n")
# print out the extra arguments that may be used
outstr = textwrap.fill("The following are additional command-line "
"options that may be provided, along with the "
"input files that understand them:")
print("\n"+outstr, end="\n\n")
for ftype, fparser in fileparsers.items():
fnames = ', '.join(filesbytype[ftype])
if fparser is None:
outstr = textwrap.fill(
"File(s) {} use no additional options.".format(fnames))
print(outstr, end="\n\n")
else:
fparser.usage = fnames
fparser.print_help()
parser.exit(0)
class ResultsArgumentParser(argparse.ArgumentParser):
"""Wraps argument parser, and preloads arguments needed for loading samples
from a file.
This parser class should be used by any program that wishes to use the
standard arguments for loading samples. It provides functionality to parse
file specific options. These file-specific arguments are not included in
the standard ``--help`` (since they depend on what input files are given),
but can be seen by running ``--file-help/-H``. The ``--file-help`` will
also print off information about what parameters may be used given the
input files.
As with the standard ``ArgumentParser``, running this class's
``parse_args`` will result in an error if arguments are provided that are
not recognized by the parser, nor by any of the file-specific arguments.
For example, ``parse_args`` would work on the command
``--input-file results.hdf --walker 0`` if
``results.hdf`` was created by a sampler that recognizes a ``--walker``
argument, but would raise an error if ``results.hdf`` was created by a
sampler that does not recognize a ``--walker`` argument. The extra
arguments that are recognized are determined by the sampler IO class's
``extra_args_parser``.
Some arguments may be excluded from the parser using the ``skip_args``
optional parameter.
Parameters
----------
skip_args : list of str, optional
Do not add the given arguments to the parser. Arguments should be
specified as the option string minus the leading '--'; e.g.,
``skip_args=['thin-start']`` would cause the ``thin-start`` argument
to not be included. May also specify sampler-specific arguments. Note
that ``input-file``, ``file-help``, and ``parameters`` are always
added.
defaultparams : {'variable_params', 'all'}, optional
If no ``--parameters`` provided, which collection of parameters to
load. If 'all' will load all parameters in the file's
``samples_group``. If 'variable_params' or None (the default) will load
the variable parameters.
autoparamlabels : bool, optional
Passed to ``add_results_option_group``; see that function for details.
\**kwargs :
All other keyword arguments are passed to ``argparse.ArgumentParser``.
"""
def __init__(self, skip_args=None, defaultparams=None,
autoparamlabels=True, **kwargs):
super(ResultsArgumentParser, self).__init__(**kwargs)
# add attribute to communicate to arguments what to do when there is
# no input files
self.no_input_file_err = False
if skip_args is None:
skip_args = []
self.skip_args = skip_args
if defaultparams is None:
defaultparams = 'variable_params'
self.defaultparams = defaultparams
# add the results option grup
self.add_results_option_group(autoparamlabels=autoparamlabels)
@property
def actions(self):
"""Exposes the actions this parser can do as a dictionary.
The dictionary maps the ``dest`` to actions.
"""
return {act.dest: act for act in self._actions}
def _unset_required(self):
"""Convenience function to turn off required arguments for first parse.
"""
self._required_args = [act for act in self._actions if act.required]
for act in self._required_args:
act.required = False
def _reset_required(self):
"""Convenience function to turn required arguments back on.
"""
for act in self._required_args:
act.required = True
def parse_known_args(self, args=None, namespace=None):
"""Parse args method to handle input-file dependent arguments."""
# run parse args once to make sure the name space is populated with
# the input files. We'll turn off raising NoInputFileErrors on this
# pass
self.no_input_file_err = True
self._unset_required()
opts, extra_opts = super(ResultsArgumentParser, self).parse_known_args(
args, namespace)
# now do it again
self.no_input_file_err = False
self._reset_required()
opts, extra_opts = super(ResultsArgumentParser, self).parse_known_args(
args, opts)
# populate the parameters option if it wasn't specified
if opts.parameters is None or opts.parameters == ['*']:
parameters = get_common_parameters(opts.input_file,
collection=self.defaultparams)
# now call parse parameters action to re-populate the namespace
self.actions['parameters'](self, opts, parameters)
# check if we're being greedy or not
elif '*' in opts.parameters:
# remove the * from the parameters and the labels
opts.parameters = [p for p in opts.parameters if p != '*']
opts.parameters_labels.pop('*', None)
# add the rest of the parameters not used
all_params = get_common_parameters(opts.input_file,
collection=self.defaultparams)
# extract the used parameters from the parameters option
used_params = FieldArray.parse_parameters(opts.parameters,
all_params)
add_params = set(all_params) - set(used_params)
# repopulate the name space with the additional parameters
if add_params:
opts.parameters += list(add_params)
# update the labels
opts.parameters_labels.update({p: p for p in add_params})
# parse the sampler-specific options and check for any unknowns
unknown = []
for fn in opts.input_file:
fp = loadfile(fn, 'r')
sampler_parser, _ = fp.extra_args_parser(skip_args=self.skip_args)
if sampler_parser is not None:
opts, still_unknown = sampler_parser.parse_known_args(
extra_opts, namespace=opts)
unknown.append(set(still_unknown))
# the intersection of the unknowns are options not understood by
# any of the files
if len(unknown) > 0:
unknown = set.intersection(*unknown)
return opts, list(unknown)
def add_results_option_group(self, autoparamlabels=True):
"""Adds the options used to call pycbc.inference.io.results_from_cli
function to the parser.
These are options releated to loading the results from a run of
pycbc_inference, for purposes of plotting and/or creating tables.
Any argument strings included in the ``skip_args`` attribute will not
be added.
Parameters
----------
autoparamlabels : bool, optional
If True, the ``--parameters`` option will use labels from
``waveform.parameters`` if a parameter name is the same as a
parameter there. Otherwise, will just use whatever label is
provided. Default is True.
"""
results_reading_group = self.add_argument_group(
title="Arguments for loading results",
description="Additional, file-specific arguments may also be "
"provided, depending on what input-files are given. See "
"--file-help for details.")
results_reading_group.add_argument(
"--input-file", type=str, required=True, nargs="+",
action=ParseLabelArg, metavar='FILE[:LABEL]',
help="Path to input HDF file(s). A label may be specified for "
"each input file to use for plots when multiple files are "
"specified.")
# advanced help
results_reading_group.add_argument(
"-H", "--file-help",
action=PrintFileParams, skip_args=self.skip_args,
help="Based on the provided input-file(s), print all available "
"parameters that may be retrieved and all possible functions "
"on those parameters. Also print available additional "
"arguments that may be passed. This option is like an "
"advanced --help: if run, the program will just print the "
"information to screen, then exit.")
if autoparamlabels:
paramparser = ParseParametersArg
lblhelp = (
"If LABEL is the same as a parameter in "
"pycbc.waveform.parameters, the label "
"property of that parameter will be used (e.g., if LABEL "
"were 'mchirp' then {} would be used). "
.format(_waveform.parameters.mchirp.label))
else:
paramparser = ParseLabelArg
lblhelp = ''
results_reading_group.add_argument(
"--parameters", type=str, nargs="+", metavar="PARAM[:LABEL]",
action=paramparser,
help="Name of parameters to load; default is to load all. The "
"parameters can be any of the model params or posterior "
"stats (loglikelihood, logprior, etc.) in the input file(s), "
"derived parameters from them, or any function of them. If "
"multiple files are provided, any parameter common to all "
"files may be used. Syntax for functions is python; any math "
"functions in the numpy libary may be used. Can optionally "
"also specify a LABEL for each parameter. If no LABEL is "
"provided, PARAM will used as the LABEL. {}"
"To see all possible parameters that may be used with the "
"given input file(s), as well as all avaiable functions, "
"run --file-help, along with one or more input files. "
"If '*' is provided in addition to other parameters names, "
"then parameters will be loaded in a greedy fashion; i.e., "
"all other parameters that exist in the file(s) that are not "
"explicitly mentioned will also be loaded. For example, "
"if the input-file(s) contains 'srcmass1', "
"'srcmass2', and 'distance', and "
"\"'primary_mass(srcmass1, srcmass2):mass1' '*'\", is given "
"then 'mass1' and 'distance' will be loaded. Otherwise, "
"without the '*', only 'mass1' would be loaded. "
"Note that any parameter that is used in a function "
"will not automatically be added. Tip: enclose "
"arguments in single quotes, or else special characters will "
"be interpreted as shell commands. For example, the "
"wildcard should be given as either '*' or \\*, otherwise "
"bash will expand the * into the names of all the files in "
"the current directory."
.format(lblhelp))
results_reading_group.add_argument(
"--constraint", type=str, nargs="+", metavar="CONSTRAINT[:FILE]",
help="Apply a constraint to the samples. If a file is provided "
"after the constraint, it will only be applied to the given "
"file. Otherwise, the constraint will be applied to all "
"files. Only one constraint may be applied to a file. "
"Samples that violate the constraint will be removed. Syntax "
"is python; any parameter or function of parameter can be "
"used, similar to the parameters argument. Multiple "
"constraints may be combined by using '&' and '|'.")
return results_reading_group
def results_from_cli(opts, load_samples=True, **kwargs):
"""Loads an inference result file along with any labels associated with it
from the command line options.
Parameters
----------
opts : ArgumentParser options
The options from the command line.
load_samples : bool, optional
Load the samples from the file.
Returns
-------
fp_all : (list of) BaseInferenceFile type
The result file as an hdf file. If more than one input file,
then it returns a list.
parameters : list of str
List of the parameters to use, parsed from the parameters option.
labels : dict
Dictionary of labels to associate with the parameters.
samples_all : (list of) FieldArray(s) or None
If load_samples, the samples as a FieldArray; otherwise, None.
If more than one input file, then it returns a list.
\**kwargs :
Any other keyword arguments that are passed to read samples using
samples_from_cli
"""
# lists for files and samples from all input files
fp_all = []
samples_all = []
input_files = opts.input_file
if isinstance(input_files, str):
input_files = [input_files]
# load constraints
constraints = {}
if opts.constraint is not None:
for constraint in opts.constraint:
if len(constraint.split(':')) == 2:
constraint, fn = constraint.split(':')
constraints[fn] = constraint
# no file provided, make sure there's only one constraint
elif len(opts.constraint) > 1:
raise ValueError("must provide a file to apply constraints "
"to if providing more than one constraint")
else:
# this means no file, only one constraint, apply to all
# files
constraints = {fn: constraint for fn in input_files}
# loop over all input files
for input_file in input_files:
logging.info("Reading input file %s", input_file)
# read input file
fp = loadfile(input_file, "r")
# load the samples
if load_samples:
logging.info("Loading samples")
# read samples from file
samples = fp.samples_from_cli(opts, parameters=opts.parameters,
**kwargs)
logging.info("Loaded {} samples".format(samples.size))
if input_file in constraints:
logging.info("Applying constraints")
mask = samples[constraints[input_file]]
samples = samples[mask]
if samples.size == 0:
raise ValueError("No samples remain after constraint {} "
"applied".format(constraints[input_file]))
logging.info("{} samples remain".format(samples.size))
# else do not read samples
else:
samples = None
# add results to lists from all input files
if len(input_files) > 1:
fp_all.append(fp)
samples_all.append(samples)
# else only one input file then do not return lists
else:
fp_all = fp
samples_all = samples
return fp_all, opts.parameters, opts.parameters_labels, samples_all
def injections_from_cli(opts):
"""Gets injection parameters from the inference file(s).
If the opts have a ``injection_samples_map`` option, the injection
parameters will be remapped accordingly. See
:py:func:`pycbc.inference.option_utils.add_injsamples_map_opt` for
details.
Parameters
----------
opts : argparser
Argparser object that has the command-line objects to parse.
Returns
-------
FieldArray
Array of the injection parameters from all of the input files given
by ``opts.input_file``.
"""
# see if a mapping was provided
if hasattr(opts, 'injection_samples_map') and opts.injection_samples_map:
param_map = [opt.split(':') for opt in opts.injection_samples_map]
else:
param_map = []
input_files = opts.input_file
if isinstance(input_files, str):
input_files = [input_files]
injections = None
# loop over all input files getting the injection files
for input_file in input_files:
fp = loadfile(input_file, 'r')
these_injs = fp.read_injections()
# apply mapping if it was provided
if param_map:
mapvals = {sp: these_injs[ip] for ip, sp in param_map}
# if any of the new parameters are the same as the old, just
# overwrite the values
common_params = set(these_injs.fieldnames) & set(mapvals.keys())
for param in common_params:
these_injs[param] = mapvals.pop(param)
# add the rest as new fields
ps = list(mapvals.keys())
these_injs = these_injs.add_fields([mapvals[p] for p in ps],
names=ps)
if injections is None:
injections = these_injs
else:
injections = injections.append(these_injs)
return injections
| 32,158
| 40.124041
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/multinest.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides I/O support for multinest.
"""
from .base_sampler import BaseSamplerFile
class MultinestFile(BaseSamplerFile):
"""Class to handle file IO for the ``multinest`` sampler."""
name = 'multinest_file'
def write_samples(self, samples, parameters=None):
"""Writes samples to the given file.
Results are written to ``samples_group/{vararg}``, where ``{vararg}``
is the name of a model params. The samples are written as an
array of length ``niterations``.
Parameters
----------
samples : dict
The samples to write. Each array in the dictionary should have
length niterations.
parameters : list, optional
Only write the specified parameters to the file. If None, will
write all of the keys in the ``samples`` dict.
"""
niterations = len(tuple(samples.values())[0])
assert all(len(p) == niterations for p in samples.values()), (
"all samples must have the same shape")
group = self.samples_group + '/{name}'
if parameters is None:
parameters = samples.keys()
# loop over number of dimensions
for param in parameters:
dataset_name = group.format(name=param)
try:
fp_niterations = len(self[dataset_name])
if niterations != fp_niterations:
# resize the dataset
self[dataset_name].resize(niterations, axis=0)
except KeyError:
# dataset doesn't exist yet
self.create_dataset(dataset_name, (niterations,),
maxshape=(None,),
dtype=samples[param].dtype,
fletcher32=True)
self[dataset_name][:] = samples[param]
def write_logevidence(self, lnz, dlnz, importance_lnz, importance_dlnz):
"""Writes the given log evidence and its error.
Results are saved to file's 'log_evidence' and 'dlog_evidence'
attributes, as well as the importance-weighted versions of these
stats if they exist.
Parameters
----------
lnz : float
The log of the evidence.
dlnz : float
The error in the estimate of the log evidence.
importance_lnz : float, optional
The importance-weighted log of the evidence.
importance_dlnz : float, optional
The error in the importance-weighted estimate of the log evidence.
"""
self.attrs['log_evidence'] = lnz
self.attrs['dlog_evidence'] = dlnz
if all([e is not None for e in [importance_lnz, importance_dlnz]]):
self.attrs['importance_log_evidence'] = importance_lnz
self.attrs['importance_dlog_evidence'] = importance_dlnz
def read_raw_samples(self, fields, iteration=None):
if isinstance(fields, str):
fields = [fields]
# load
group = self.samples_group + '/{name}'
arrays = {}
for name in fields:
if iteration is not None:
arr = self[group.format(name=name)][int(iteration)]
else:
arr = self[group.format(name=name)][:]
arrays[name] = arr
return arrays
def write_resume_point(self):
"""Keeps a list of the number of iterations that were in a file when a
run was resumed from a checkpoint."""
try:
resume_pts = self.attrs["resume_points"].tolist()
except KeyError:
resume_pts = []
try:
niterations = self.niterations
except KeyError:
niterations = 0
resume_pts.append(niterations)
self.attrs["resume_points"] = resume_pts
@property
def niterations(self):
"""Returns the number of iterations the sampler was run for."""
return self[self.sampler_group].attrs['niterations']
def write_niterations(self, niterations):
"""Writes the given number of iterations to the sampler group."""
self[self.sampler_group].attrs['niterations'] = niterations
def write_sampler_metadata(self, sampler):
"""Writes the sampler's metadata."""
self.attrs['sampler'] = sampler.name
if self.sampler_group not in self.keys():
# create the sampler group
self.create_group(self.sampler_group)
self[self.sampler_group].attrs['nlivepoints'] = sampler.nlivepoints
# write the model's metadata
sampler.model.write_metadata(self)
| 5,382
| 38.291971
| 78
|
py
|
pycbc
|
pycbc-master/pycbc/inference/io/posterior.py
|
# Copyright (C) 2018 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Provides simplified standard format just for posterior data
"""
from .base_hdf import BaseInferenceFile
class PosteriorFile(BaseInferenceFile):
"""Class to handle file IO for the simplified Posterior file."""
name = 'posterior_file'
def read_raw_samples(self, fields, **kwargs):
return read_raw_samples_from_file(self, fields, **kwargs)
def write_samples(self, samples, parameters=None):
return write_samples_to_file(self, samples, parameters=parameters)
def write_sampler_metadata(self, sampler):
sampler.model.write_metadata(self)
def write_resume_point(self):
pass
write_run_start_time = write_run_end_time = write_resume_point
def read_raw_samples_from_file(fp, fields, **kwargs):
samples = fp[fp.samples_group]
return {field: samples[field][:] for field in fields}
def write_samples_to_file(fp, samples, parameters=None, group=None):
"""Writes samples to the given file.
Results are written to ``samples_group/{vararg}``, where ``{vararg}``
is the name of a model params. The samples are written as an
array of length ``niterations``.
Parameters
-----------
fp : self
Pass the 'self' from BaseInferenceFile class.
samples : dict
The samples to write. Each array in the dictionary should have
length niterations.
parameters : list, optional
Only write the specified parameters to the file. If None, will
write all of the keys in the ``samples`` dict.
"""
# check data dimensions; we'll just use the first array in samples
arr = list(samples.values())[0]
if not arr.ndim == 1:
raise ValueError("samples must be 1D arrays")
niterations = arr.size
assert all(len(p) == niterations
for p in samples.values()), (
"all samples must have the same shape")
if group is not None:
group = group + '/{name}'
else:
group = fp.samples_group + '/{name}'
if parameters is None:
parameters = samples.keys()
# loop over number of dimensions
for param in parameters:
dataset_name = group.format(name=param)
try:
fp_niterations = len(fp[dataset_name])
if niterations != fp_niterations:
# resize the dataset
fp[dataset_name].resize(niterations, axis=0)
except KeyError:
# dataset doesn't exist yet
fp.create_dataset(dataset_name, (niterations,),
maxshape=(None,),
dtype=samples[param].dtype,
fletcher32=True)
fp[dataset_name][:] = samples[param]
| 3,653
| 34.823529
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/base.py
|
# Copyright (C) 2016 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Base class for models.
"""
import numpy
import logging
from abc import (ABCMeta, abstractmethod)
from configparser import NoSectionError
from pycbc import (transforms, distributions)
from pycbc.io import FieldArray
#
# =============================================================================
#
# Support classes
#
# =============================================================================
#
class _NoPrior(object):
"""Dummy class to just return 0 if no prior is given to a model.
"""
@staticmethod
def apply_boundary_conditions(**params):
return params
def __call__(self, **params):
return 0.
class ModelStats(object):
"""Class to hold model's current stat values."""
@property
def statnames(self):
"""Returns the names of the stats that have been stored."""
return list(self.__dict__.keys())
def getstats(self, names, default=numpy.nan):
"""Get the requested stats as a tuple.
If a requested stat is not an attribute (implying it hasn't been
stored), then the default value is returned for that stat.
Parameters
----------
names : list of str
The names of the stats to get.
default : float, optional
What to return if a requested stat is not an attribute of self.
Default is ``numpy.nan``.
Returns
-------
tuple
A tuple of the requested stats.
"""
return tuple(getattr(self, n, default) for n in names)
def getstatsdict(self, names, default=numpy.nan):
"""Get the requested stats as a dictionary.
If a requested stat is not an attribute (implying it hasn't been
stored), then the default value is returned for that stat.
Parameters
----------
names : list of str
The names of the stats to get.
default : float, optional
What to return if a requested stat is not an attribute of self.
Default is ``numpy.nan``.
Returns
-------
dict
A dictionary of the requested stats.
"""
return dict(zip(names, self.getstats(names, default=default)))
class SamplingTransforms(object):
"""Provides methods for transforming between sampling parameter space and
model parameter space.
"""
def __init__(self, variable_params, sampling_params,
replace_parameters, sampling_transforms):
assert len(replace_parameters) == len(sampling_params), (
"number of sampling parameters must be the "
"same as the number of replace parameters")
# pull out the replaced parameters
self.sampling_params = [arg for arg in variable_params
if arg not in replace_parameters]
# add the sampling parameters
self.sampling_params += sampling_params
# sort to make sure we have a consistent order
self.sampling_params.sort()
self.sampling_transforms = sampling_transforms
def logjacobian(self, **params):
r"""Returns the log of the jacobian needed to transform pdfs in the
``variable_params`` parameter space to the ``sampling_params``
parameter space.
Let :math:`\mathbf{x}` be the set of variable parameters,
:math:`\mathbf{y} = f(\mathbf{x})` the set of sampling parameters, and
:math:`p_x(\mathbf{x})` a probability density function defined over
:math:`\mathbf{x}`.
The corresponding pdf in :math:`\mathbf{y}` is then:
.. math::
p_y(\mathbf{y}) =
p_x(\mathbf{x})\left|\mathrm{det}\,\mathbf{J}_{ij}\right|,
where :math:`\mathbf{J}_{ij}` is the Jacobian of the inverse transform
:math:`\mathbf{x} = g(\mathbf{y})`. This has elements:
.. math::
\mathbf{J}_{ij} = \frac{\partial g_i}{\partial{y_j}}
This function returns
:math:`\log \left|\mathrm{det}\,\mathbf{J}_{ij}\right|`.
Parameters
----------
\**params :
The keyword arguments should specify values for all of the variable
args and all of the sampling args.
Returns
-------
float :
The value of the jacobian.
"""
return numpy.log(abs(transforms.compute_jacobian(
params, self.sampling_transforms, inverse=True)))
def apply(self, samples, inverse=False):
"""Applies the sampling transforms to the given samples.
Parameters
----------
samples : dict or FieldArray
The samples to apply the transforms to.
inverse : bool, optional
Whether to apply the inverse transforms (i.e., go from the sampling
args to the ``variable_params``). Default is False.
Returns
-------
dict or FieldArray
The transformed samples, along with the original samples.
"""
return transforms.apply_transforms(samples, self.sampling_transforms,
inverse=inverse)
@classmethod
def from_config(cls, cp, variable_params):
"""Gets sampling transforms specified in a config file.
Sampling parameters and the parameters they replace are read from the
``sampling_params`` section, if it exists. Sampling transforms are
read from the ``sampling_transforms`` section(s), using
``transforms.read_transforms_from_config``.
An ``AssertionError`` is raised if no ``sampling_params`` section
exists in the config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of parameter names of the original variable params.
Returns
-------
SamplingTransforms
A sampling transforms class.
"""
# Check if a sampling_params section is provided
try:
sampling_params, replace_parameters = \
read_sampling_params_from_config(cp)
except NoSectionError as e:
logging.warning("No sampling_params section read from config file")
raise e
# get sampling transformations
sampling_transforms = transforms.read_transforms_from_config(
cp, 'sampling_transforms')
logging.info("Sampling in {} in place of {}".format(
', '.join(sampling_params), ', '.join(replace_parameters)))
return cls(variable_params, sampling_params,
replace_parameters, sampling_transforms)
def read_sampling_params_from_config(cp, section_group=None,
section='sampling_params'):
"""Reads sampling parameters from the given config file.
Parameters are read from the `[({section_group}_){section}]` section.
The options should list the variable args to transform; the parameters they
point to should list the parameters they are to be transformed to for
sampling. If a multiple parameters are transformed together, they should
be comma separated. Example:
.. code-block:: ini
[sampling_params]
mass1, mass2 = mchirp, logitq
spin1_a = logitspin1_a
Note that only the final sampling parameters should be listed, even if
multiple intermediate transforms are needed. (In the above example, a
transform is needed to go from mass1, mass2 to mchirp, q, then another one
needed to go from q to logitq.) These transforms should be specified
in separate sections; see ``transforms.read_transforms_from_config`` for
details.
Parameters
----------
cp : WorkflowConfigParser
An open config parser to read from.
section_group : str, optional
Append `{section_group}_` to the section name. Default is None.
section : str, optional
The name of the section. Default is 'sampling_params'.
Returns
-------
sampling_params : list
The list of sampling parameters to use instead.
replaced_params : list
The list of variable args to replace in the sampler.
"""
if section_group is not None:
section_prefix = '{}_'.format(section_group)
else:
section_prefix = ''
section = section_prefix + section
replaced_params = set()
sampling_params = set()
for args in cp.options(section):
map_args = cp.get(section, args)
sampling_params.update(set(map(str.strip, map_args.split(','))))
replaced_params.update(set(map(str.strip, args.split(','))))
return sorted(sampling_params), sorted(replaced_params)
#
# =============================================================================
#
# Base model definition
#
# =============================================================================
#
class BaseModel(metaclass=ABCMeta):
r"""Base class for all models.
Given some model :math:`h` with parameters :math:`\Theta`, Bayes Theorem
states that the probability of observing parameter values :math:`\vartheta`
is:
.. math::
p(\vartheta|h) = \frac{p(h|\vartheta) p(\vartheta)}{p(h)}.
Here:
* :math:`p(\vartheta|h)` is the **posterior** probability;
* :math:`p(h|\vartheta)` is the **likelihood**;
* :math:`p(\vartheta)` is the **prior**;
* :math:`p(h)` is the **evidence**.
This class defines properties and methods for evaluating the log
likelihood, log prior, and log posteror. A set of parameter values is set
using the ``update`` method. Calling the class's
``log(likelihood|prior|posterior)`` properties will then evaluate the model
at those parameter values.
Classes that inherit from this class must implement a ``_loglikelihood``
function that can be called by ``loglikelihood``.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
static_params : dict, optional
A dictionary of parameter names -> values to keep fixed.
prior : callable, optional
A callable class or function that computes the log of the prior. If
None provided, will use ``_noprior``, which returns 0 for all parameter
values.
sampling_params : list, optional
Replace one or more of the ``variable_params`` with the given
parameters for sampling.
replace_parameters : list, optional
The ``variable_params`` to replace with sampling parameters. Must be
the same length as ``sampling_params``.
sampling_transforms : list, optional
List of transforms to use to go between the ``variable_params`` and the
sampling parameters. Required if ``sampling_params`` is not None.
waveform_transforms : list, optional
A list of transforms to convert the ``variable_params`` into something
understood by the likelihood model. This is useful if the prior is
more easily parameterized in parameters that are different than what
the likelihood is most easily defined in. Since these are used solely
for converting parameters, and not for rescaling the parameter space,
a Jacobian is not required for these transforms.
"""
name = None
def __init__(self, variable_params, static_params=None, prior=None,
sampling_transforms=None, waveform_transforms=None, **kwargs):
# store variable and static args
self.variable_params = variable_params
self.static_params = static_params
# store prior
if prior is None:
self.prior_distribution = _NoPrior()
elif set(prior.variable_args) != set(variable_params):
raise ValueError("variable params of prior and model must be the "
"same")
else:
self.prior_distribution = prior
# store transforms
self.sampling_transforms = sampling_transforms
self.waveform_transforms = waveform_transforms
# initialize current params to None
self._current_params = None
# initialize a model stats
self._current_stats = ModelStats()
@property
def variable_params(self):
"""Returns the model parameters."""
return self._variable_params
@variable_params.setter
def variable_params(self, variable_params):
if isinstance(variable_params, str):
variable_params = (variable_params,)
if not isinstance(variable_params, tuple):
variable_params = tuple(variable_params)
self._variable_params = variable_params
@property
def static_params(self):
"""Returns the model's static arguments."""
return self._static_params
@static_params.setter
def static_params(self, static_params):
if static_params is None:
static_params = {}
self._static_params = static_params
@property
def sampling_params(self):
"""Returns the sampling parameters.
If ``sampling_transforms`` is None, this is the same as the
``variable_params``.
"""
if self.sampling_transforms is None:
sampling_params = self.variable_params
else:
sampling_params = self.sampling_transforms.sampling_params
return sampling_params
def update(self, **params):
"""Updates the current parameter positions and resets stats.
If any sampling transforms are specified, they are applied to the
params before being stored.
"""
# add the static params
values = self.static_params.copy()
values.update(params)
self._current_params = self._transform_params(**values)
self._current_stats = ModelStats()
@property
def current_params(self):
if self._current_params is None:
raise ValueError("no parameters values currently stored; "
"run update to add some")
return self._current_params
@property
def default_stats(self):
"""The stats that ``get_current_stats`` returns by default."""
return ['logjacobian', 'logprior', 'loglikelihood'] + self._extra_stats
@property
def _extra_stats(self):
"""Allows child classes to add more stats to the default stats.
This returns an empty list; classes that inherit should override this
property if they want to add extra stats.
"""
return []
def get_current_stats(self, names=None):
"""Return one or more of the current stats as a tuple.
This function does no computation. It only returns what has already
been calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Parameters
----------
names : list of str, optional
Specify the names of the stats to retrieve. If ``None`` (the
default), will return ``default_stats``.
Returns
-------
tuple :
The current values of the requested stats, as a tuple. The order
of the stats is the same as the names.
"""
if names is None:
names = self.default_stats
return self._current_stats.getstats(names)
@property
def current_stats(self):
"""Return the ``default_stats`` as a dict.
This does no computation. It only returns what has already been
calculated. If a stat hasn't been calculated, it will be returned
as ``numpy.nan``.
Returns
-------
dict :
Dictionary of stat names -> current stat values.
"""
return self._current_stats.getstatsdict(self.default_stats)
def _trytoget(self, statname, fallback, apply_transforms=False, **kwargs):
r"""Helper function to get a stat from ``_current_stats``.
If the statistic hasn't been calculated, ``_current_stats`` will raise
an ``AttributeError``. In that case, the ``fallback`` function will
be called. If that call is successful, the ``statname`` will be added
to ``_current_stats`` with the returned value.
Parameters
----------
statname : str
The stat to get from ``current_stats``.
fallback : method of self
The function to call if the property call fails.
apply_transforms : bool, optional
Apply waveform transforms to the current parameters before calling
the fallback function. Default is False.
\**kwargs :
Any other keyword arguments are passed through to the function.
Returns
-------
float :
The value of the property.
"""
try:
return getattr(self._current_stats, statname)
except AttributeError:
# apply waveform transforms if requested
if apply_transforms and self.waveform_transforms is not None:
self._current_params = transforms.apply_transforms(
self._current_params, self.waveform_transforms,
inverse=False)
val = fallback(**kwargs)
setattr(self._current_stats, statname, val)
return val
@property
def loglikelihood(self):
"""The log likelihood at the current parameters.
This will initially try to return the ``current_stats.loglikelihood``.
If that raises an ``AttributeError``, will call `_loglikelihood`` to
calculate it and store it to ``current_stats``.
"""
return self._trytoget('loglikelihood', self._loglikelihood,
apply_transforms=True)
@abstractmethod
def _loglikelihood(self):
"""Low-level function that calculates the log likelihood of the current
params."""
pass
@property
def logjacobian(self):
"""The log jacobian of the sampling transforms at the current postion.
If no sampling transforms were provided, will just return 0.
Parameters
----------
\**params :
The keyword arguments should specify values for all of the variable
args and all of the sampling args.
Returns
-------
float :
The value of the jacobian.
"""
return self._trytoget('logjacobian', self._logjacobian)
def _logjacobian(self):
"""Calculates the logjacobian of the current parameters."""
if self.sampling_transforms is None:
logj = 0.
else:
logj = self.sampling_transforms.logjacobian(
**self.current_params)
return logj
@property
def logprior(self):
"""Returns the log prior at the current parameters."""
return self._trytoget('logprior', self._logprior)
def _logprior(self):
"""Calculates the log prior at the current parameters."""
logj = self.logjacobian
logp = self.prior_distribution(**self.current_params) + logj
if numpy.isnan(logp):
logp = -numpy.inf
return logp
@property
def logposterior(self):
"""Returns the log of the posterior of the current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then the ``loglikelihood``
is not called.
"""
logp = self.logprior
if logp == -numpy.inf:
return logp
else:
return logp + self.loglikelihood
def prior_rvs(self, size=1, prior=None):
"""Returns random variates drawn from the prior.
If the ``sampling_params`` are different from the ``variable_params``,
the variates are transformed to the `sampling_params` parameter space
before being returned.
Parameters
----------
size : int, optional
Number of random values to return for each parameter. Default is 1.
prior : JointDistribution, optional
Use the given prior to draw values rather than the saved prior.
Returns
-------
FieldArray
A field array of the random values.
"""
# draw values from the prior
if prior is None:
prior = self.prior_distribution
p0 = prior.rvs(size=size)
# transform if necessary
if self.sampling_transforms is not None:
ptrans = self.sampling_transforms.apply(p0)
# pull out the sampling args
p0 = FieldArray.from_arrays([ptrans[arg]
for arg in self.sampling_params],
names=self.sampling_params)
return p0
def _transform_params(self, **params):
"""Applies sampling transforms and boundary conditions to parameters.
Parameters
----------
\**params :
Key, value pairs of parameters to apply the transforms to.
Returns
-------
dict
A dictionary of the transformed parameters.
"""
# apply inverse transforms to go from sampling parameters to
# variable args
if self.sampling_transforms is not None:
params = self.sampling_transforms.apply(params, inverse=True)
# apply boundary conditions
params = self.prior_distribution.apply_boundary_conditions(**params)
return params
#
# Methods for initiating from a config file.
#
@staticmethod
def extra_args_from_config(cp, section, skip_args=None, dtypes=None):
"""Gets any additional keyword in the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
section : str
The name of the section to read.
skip_args : list of str, optional
Names of arguments to skip.
dtypes : dict, optional
A dictionary of arguments -> data types. If an argument is found
in the dict, it will be cast to the given datatype. Otherwise, the
argument's value will just be read from the config file (and thus
be a string).
Returns
-------
dict
Dictionary of keyword arguments read from the config file.
"""
kwargs = {}
if dtypes is None:
dtypes = {}
if skip_args is None:
skip_args = []
read_args = [opt for opt in cp.options(section)
if opt not in skip_args]
for opt in read_args:
val = cp.get(section, opt)
# try to cast the value if a datatype was specified for this opt
try:
val = dtypes[opt](val)
except KeyError:
pass
kwargs[opt] = val
return kwargs
@staticmethod
def prior_from_config(cp, variable_params, static_params, prior_section,
constraint_section):
"""Gets arguments and keyword arguments from a config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
variable_params : list
List of variable model parameter names.
static_params : dict
Dictionary of static model parameters and their values.
prior_section : str
Section to read prior(s) from.
constraint_section : str
Section to read constraint(s) from.
Returns
-------
pycbc.distributions.JointDistribution
The prior.
"""
# get prior distribution for each variable parameter
logging.info("Setting up priors for each parameter")
dists = distributions.read_distributions_from_config(cp, prior_section)
constraints = distributions.read_constraints_from_config(
cp, constraint_section, static_args=static_params)
return distributions.JointDistribution(variable_params, *dists,
constraints=constraints)
@classmethod
def _init_args_from_config(cls, cp):
"""Helper function for loading parameters.
This retrieves the prior, variable parameters, static parameterss,
constraints, sampling transforms, and waveform transforms
(if provided).
Parameters
----------
cp : ConfigParser
Config parser to read.
Returns
-------
dict :
Dictionary of the arguments. Has keys ``variable_params``,
``static_params``, ``prior``, and ``sampling_transforms``. If
waveform transforms are in the config file, will also have
``waveform_transforms``.
"""
section = "model"
prior_section = "prior"
vparams_section = 'variable_params'
sparams_section = 'static_params'
constraint_section = 'constraint'
# check that the name exists and matches
name = cp.get(section, 'name')
if name != cls.name:
raise ValueError("section's {} name does not match mine {}".format(
name, cls.name))
# get model parameters
variable_params, static_params = distributions.read_params_from_config(
cp, prior_section=prior_section, vargs_section=vparams_section,
sargs_section=sparams_section)
# get prior
prior = cls.prior_from_config(
cp, variable_params, static_params, prior_section,
constraint_section)
args = {'variable_params': variable_params,
'static_params': static_params,
'prior': prior}
# try to load sampling transforms
try:
sampling_transforms = SamplingTransforms.from_config(
cp, variable_params)
except NoSectionError:
sampling_transforms = None
args['sampling_transforms'] = sampling_transforms
# get any waveform transforms
if any(cp.get_subsections('waveform_transforms')):
logging.info("Loading waveform transforms")
waveform_transforms = transforms.read_transforms_from_config(
cp, 'waveform_transforms')
args['waveform_transforms'] = waveform_transforms
else:
waveform_transforms = []
# safety check for spins
# we won't do this if the following exists in the config file
ignore = "no_err_on_missing_cartesian_spins"
check_for_cartesian_spins(1, variable_params, static_params,
waveform_transforms, cp, ignore)
check_for_cartesian_spins(2, variable_params, static_params,
waveform_transforms, cp, ignore)
return args
@classmethod
def from_config(cls, cp, **kwargs):
"""Initializes an instance of this class from the given config file.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will over ride what is in the config file.
"""
args = cls._init_args_from_config(cp)
# get any other keyword arguments provided in the model section
args.update(cls.extra_args_from_config(cp, "model",
skip_args=['name']))
args.update(kwargs)
return cls(**args)
def write_metadata(self, fp, group=None):
"""Writes metadata to the given file handler.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
attrs = fp.getattrs(group=group)
attrs['model'] = self.name
attrs['variable_params'] = list(map(str, self.variable_params))
attrs['sampling_params'] = list(map(str, self.sampling_params))
fp.write_kwargs_to_attrs(attrs, static_params=self.static_params)
def check_for_cartesian_spins(which, variable_params, static_params,
waveform_transforms, cp, ignore):
"""Checks that if any spin parameters exist, cartesian spins also exist.
This looks for parameters starting with ``spinN`` in the variable and
static params, where ``N`` is either 1 or 2 (specified by the ``which``
argument). If any parameters are found with those names, the params and
the output of the waveform transforms are checked to see that there is
at least one of ``spinN(x|y|z)``. If not, a ``ValueError`` is raised.
This check will not be done if the config file has an section given by
the ignore argument.
Parameters
----------
which : {1, 2}
Which component to check for. Must be either 1 or 2.
variable_params : list
List of the variable parameters.
static_params : dict
The dictionary of static params.
waveform_transforms : list
List of the transforms that will be applied to the variable and
static params before being passed to the waveform generator.
cp : ConfigParser
The config file.
ignore : str
The section to check for in the config file. If the section is
present in the config file, the check will not be done.
"""
# don't do this check if the config file has the ignore section
if cp.has_section(ignore):
logging.info("[{}] found in config file; not performing check for "
"cartesian spin{} parameters".format(ignore, which))
return
errmsg = (
"Spin parameters {sp} found in variable/static "
"params for component {n}, but no Cartesian spin parameters ({cp}) "
"found in either the variable/static params or "
"the waveform transform outputs. Most waveform "
"generators only recognize Cartesian spin "
"parameters; without them, all spins are set to "
"zero. If you are using spherical spin coordinates, add "
"the following waveform_transform to your config file:\n\n"
"[waveform_transforms-spin{n}x+spin{n}y+spin{n}z]\n"
"name = spherical_to_cartesian\n"
"x = spin{n}x\n"
"y = spin{n}y\n"
"z = spin{n}z\n"
"radial = spin{n}_a\n"
"azimuthal = spin{n}_azimuthal\n"
"polar = spin{n}_polar\n\n"
"Here, spin{n}_a, spin{n}_azimuthal, and spin{n}_polar are the names "
"of your radial, azimuthal, and polar coordinates, respectively. "
"If you intentionally did not include Cartesian spin parameters, "
"(e.g., you are using a custom waveform or model) add\n\n"
"[{ignore}]\n\n"
"to your config file as an empty section and rerun. This check will "
"not be performed in that case.")
allparams = set(variable_params) | set(static_params.keys())
spinparams = set(p for p in allparams
if p.startswith('spin{}'.format(which)))
if any(spinparams):
cartspins = set('spin{}{}'.format(which, coord)
for coord in ['x', 'y', 'z'])
# add any parameters to all params that will be output by waveform
# transforms
allparams = allparams.union(*[t.outputs for t in waveform_transforms])
if not any(allparams & cartspins):
raise ValueError(errmsg.format(sp=', '.join(spinparams),
cp=', '.join(cartspins),
n=which, ignore=ignore))
| 33,142
| 36.323198
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/hierarchical.py
|
# Copyright (C) 2022 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Hierarchical model definitions."""
import shlex
import logging
from pycbc import transforms
from pycbc.workflow import WorkflowConfigParser
from .base import BaseModel
#
# =============================================================================
#
# Hierarhical model definition
#
# =============================================================================
#
class HierarchicalModel(BaseModel):
r"""Model that is a combination of other models.
Sub-models are treated as being independent of each other, although
they can share parameters. In other words, the hiearchical likelihood is:
.. math::
p(\mathbf{D}|\mathbf{\vartheta}, \mathbf{H}) =
\prod_{I}^{K} p(\mathbf{d}_I|\mathbf{\vartheta}, H_{I})
Submodels are provided as a dictionary upon initialization with a unique
label assigned to each model, e.g., ``{'event1' -> model1, 'event2' ->
model2}``. Variable and static parameters that are specific to each
submodel should be prepended with ``{label}__``, where ``{label}__`` is the
label associated with the given submodel. Shared parameters across multiple
models have no labels prepended. To specify shared models over a subset of
models, separate models with an underscore. For example,
``event1_event2__foo`` will result in ``foo`` being common between models
``event1`` and ``event2``. For more details on parameter naming see
:py:class:`HierarchicalParam
<pycbc.inference.models.hierarchical.HierarchicalParam>`.
All waveform and sampling transforms, as well as prior evaluation, are
handled by this model, not the sub-models. Parameters created by waveform
transforms should therefore also have sub-model names prepended to them,
to indicate which models they should be provided to for likelihood
evaluation.
Parameters
----------
variable_params: (tuple of) string(s)
A tuple of parameter names that will be varied.
submodels: dict
Dictionary of model labels -> model instances of all the submodels.
\**kwargs :
All other keyword arguments are passed to
:py:class:`BaseModel <pycbc.inference.models.base.BaseModel>`.
"""
name = 'hierarchical'
def __init__(self, variable_params, submodels, **kwargs):
# sub models is assumed to be a dict of model labels -> model instances
self.submodels = submodels
# initialize standard attributes
super().__init__(variable_params, **kwargs)
# store a map of model labels -> parameters for quick look up later
self.param_map = map_params(self.hvariable_params)
# add any parameters created by waveform transforms
if self.waveform_transforms is not None:
derived_params = set()
derived_params.update(*[t.outputs
for t in self.waveform_transforms])
# convert to hierarchical params
derived_params = map_params(hpiter(derived_params,
list(self.submodels.keys())))
for lbl, pset in derived_params.items():
self.param_map[lbl].update(pset)
# make sure the static parameters of all submodels are set correctly
self.static_param_map = map_params(self.hstatic_params.keys())
# also create a map of model label -> extra stats created by each model
# stats are prepended with the model label. We'll include the
# loglikelihood returned by each submodel in the extra stats.
self.extra_stats_map = {}
self.__extra_stats = []
for lbl, model in self.submodels.items():
model.static_params = {p.subname: self.static_params[p.fullname]
for p in self.static_param_map[lbl]}
self.extra_stats_map.update(map_params([
HierarchicalParam.from_subname(lbl, p)
for p in model._extra_stats+['loglikelihood']]))
self.__extra_stats += self.extra_stats_map[lbl]
# also make sure the model's sampling transforms and waveform
# transforms are not set, as these are handled by the hierarchical
# model
if model.sampling_transforms is not None:
raise ValueError("Model {} has sampling transforms set; "
"in a hierarchical analysis, these are "
"handled by the hiearchical model"
.format(lbl))
if model.waveform_transforms is not None:
raise ValueError("Model {} has waveform transforms set; "
"in a hierarchical analysis, these are "
"handled by the hiearchical model"
.format(lbl))
@property
def hvariable_params(self):
"""The variable params as a tuple of :py:class:`HierarchicalParam`
instances.
"""
return self._variable_params
@property
def variable_params(self):
# converts variable params back to a set of strings before returning
return tuple(p.fullname for p in self._variable_params)
@variable_params.setter
def variable_params(self, variable_params):
# overrides BaseModel's variable params to store the variable params
# as HierarchicalParam instances
if isinstance(variable_params, str):
variable_params = [variable_params]
self._variable_params = tuple(HierarchicalParam(p, self.submodels)
for p in variable_params)
@property
def hstatic_params(self):
"""The static params with :py:class:`HierarchicalParam` instances used
as dictionary keys.
"""
return self._static_params
@property
def static_params(self):
# converts the static param keys back to strings
return {p.fullname: val for p, val in self._static_params.items()}
@static_params.setter
def static_params(self, static_params):
if static_params is None:
static_params = {}
self._static_params = {HierarchicalParam(p, self.submodels): val
for p, val in static_params.items()}
@property
def _extra_stats(self):
return [p.fullname for p in self.__extra_stats]
@property
def _hextra_stats(self):
"""The extra stats as :py:class:`HierarchicalParam` instances."""
return self.__extra_stats
def _loglikelihood(self):
# takes the sum of the constitutent models' loglikelihoods
logl = 0.
for lbl, model in self.submodels.items():
# update the model with the current params. This is done here
# instead of in `update` because waveform transforms are not
# applied until the loglikelihood function is called
model.update(**{p.subname: self.current_params[p.fullname]
for p in self.param_map[lbl]})
# now get the loglikelihood from the model
sublogl = model.loglikelihood
# store the extra stats
mstats = model.current_stats
for stat in self.extra_stats_map[lbl]:
setattr(self._current_stats, stat, mstats[stat.subname])
# add to the total loglikelihood
logl += sublogl
return logl
def write_metadata(self, fp, group=None):
"""Adds data to the metadata that's written.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
# write information about self
super().write_metadata(fp, group=group)
# write information about each submodel into a different group for
# each one
if group is None or group == '/':
prefix = ''
else:
prefix = group+'/'
for lbl, model in self.submodels.items():
model.write_metadata(fp, group=prefix+lbl)
# if all submodels support it, write a combined lognl parameter
try:
sampattrs = fp.getattrs(group=fp.samples_group)
lognl = [self.submodels[k].lognl for k in self.submodels]
sampattrs['{}lognl'.format(prefix)] = sum(lognl)
except AttributeError:
pass
@classmethod
def from_config(cls, cp, **kwargs):
r"""Initializes an instance of this class from the given config file.
Sub-models are initialized before initializing this class. The model
section must have a ``submodels`` argument that lists the names of all
the submodels to generate as a space-separated list. Each sub-model
should have its own ``[{label}__model]`` section that sets up the
model for that sub-model. For example:
.. code-block:: ini
[model]
name = hiearchical
submodels = event1 event2
[event1__model]
<event1 model options>
[event2__model]
<event2 model options>
Similarly, all other sections that are specific to a model should start
with the model's label. All sections starting with a model's label will
be passed to that model's ``from_config`` method with the label removed
from the section name. For example, if a sub-model requires a data
section to be specified, it should be titled ``[{label}__data]``. Upon
initialization, the ``{label}__`` will be stripped from the section
header and passed to the model.
No model labels should preceed the ``variable_params``,
``static_params``, ``waveform_transforms``, or ``sampling_transforms``
sections. Instead, the parameters specified in these sections should
follow the naming conventions described in :py:class:`HierachicalParam`
to determine which sub-model(s) they belong to. (Sampling parameters
can follow any naming convention, as they are only handled by the
hierarchical model.) This is because the hierarchical model handles
all transforms, communication with the sampler, file IO, and prior
calculation. Only sub-model's loglikelihood functions are called.
Metadata for each sub-model is written to the output hdf file under
groups given by the sub-model label. For example, if we have two
submodels labelled ``event1`` and ``event2``, there will be groups
with the same names in the top level of the output that contain that
model's subdata. For instance, if event1 used the ``gaussian_noise``
model, the GW data and PSDs will be found in ``event1/data`` and the
low frequency cutoff used for that model will be in the ``attrs`` of
the ``event1`` group.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will override what is in the config file.
"""
# we need the read from config function from the init; to prevent
# circular imports, we import it here
from pycbc.inference.models import read_from_config
# get the submodels
submodel_lbls = shlex.split(cp.get('model', 'submodels'))
# sort parameters by model
vparam_map = map_params(hpiter(cp.options('variable_params'),
submodel_lbls))
sparam_map = map_params(hpiter(cp.options('static_params'),
submodel_lbls))
# we'll need any waveform transforms for the initializing sub-models,
# as the underlying models will receive the output of those transforms
if any(cp.get_subsections('waveform_transforms')):
waveform_transforms = transforms.read_transforms_from_config(
cp, 'waveform_transforms')
wfoutputs = set.union(*[t.outputs
for t in waveform_transforms])
wfparam_map = map_params(hpiter(wfoutputs, submodel_lbls))
else:
wfparam_map = {lbl: [] for lbl in submodel_lbls}
# initialize the models
submodels = {}
logging.info("Loading submodels")
for lbl in submodel_lbls:
logging.info("============= %s =============", lbl)
# create a config parser to pass to the model
subcp = WorkflowConfigParser()
# copy sections over that start with the model label (this should
# include the [model] section for that model)
copy_sections = [
HierarchicalParam(sec, submodel_lbls)
for sec in cp.sections() if lbl in
sec.split('-')[0].split(HierarchicalParam.delim, 1)[0]]
for sec in copy_sections:
# check that the user isn't trying to set variable or static
# params for the model (we won't worry about waveform or
# sampling transforms here, since that is checked for in the
# __init__)
if sec.subname in ['variable_params', 'static_params']:
raise ValueError("Section {} found in the config file; "
"[variable_params] and [static_params] "
"sections should not include model "
"labels. To specify parameters unique to "
"one or more sub-models, prepend the "
"individual parameter names with the "
"model label. See HierarchicalParam for "
"details.".format(sec))
subcp.add_section(sec.subname)
for opt, val in cp.items(sec):
subcp.set(sec.subname, opt, val)
# set the static params
subcp.add_section('static_params')
for param in sparam_map[lbl]:
subcp.set('static_params', param.subname,
cp.get('static_params', param.fullname))
# set the variable params: for now we'll just set all the
# variable params as static params
# so that the model doesn't raise an error looking for
# prior sections. We'll then manually set the variable
# params after the model is initialized
subcp.add_section('variable_params')
for param in vparam_map[lbl]:
subcp.set('static_params', param.subname, 'REPLACE')
# add the outputs from the waveform transforms
for param in wfparam_map[lbl]:
subcp.set('static_params', param.subname, 'REPLACE')
# initialize
submodel = read_from_config(subcp)
# move the static params back to variable
for p in vparam_map[lbl]:
submodel.static_params.pop(p.subname)
submodel.variable_params = tuple(p.subname
for p in vparam_map[lbl])
# remove the waveform transform parameters
for p in wfparam_map[lbl]:
submodel.static_params.pop(p.subname)
# store
submodels[lbl] = submodel
logging.info("")
# now load the model
logging.info("Loading hierarchical model")
return super().from_config(cp, submodels=submodels)
class HierarchicalParam(str):
"""Sub-class of str for hierarchical parameter names.
This adds attributes that keep track of the model label(s) the parameter
is associated with, along with the name that is passed to the models.
The following conventions are used for parsing parameter names:
* Model labels and parameter names are separated by the ``delim`` class
attribute, which by default is ``__``, e.g., ``event1__mass``.
* Multiple model labels can be provided by separating the model labels
with the ``model_delim`` class attribute, which by default is ``_``,
e.g., ``event1_event2__mass``. Note that this means that individual
model labels cannot contain ``_``, else they'll be parsed as separate
models.
* Parameters that have no model labels prepended to them (i.e., there
is no ``__`` in the name) are common to all models.
These parsing rules are applied by the :py:meth:`HierarchicalParam.parse`
method.
Parameters
----------
fullname : str
Name of the hierarchical parameter. Should have format
``{model1}[_{model2}[_{...}]]__{param}``.
possible_models : set of str
The possible sub-models a parameter can belong to. Should a set of
model labels.
Attributes
----------
fullname : str
The full name of the parameter, including model labels. For example,
``e1_e2__foo``.
models : set
The model labels the parameter is associated with. For example,
``e1_e2__foo`` yields models ``e1, e2``.
subname : str
The name of the parameter without the model labels prepended to it.
For example, ``e1_e2__foo`` yields ``foo``.
"""
delim = '__'
model_delim = '_'
def __new__(cls, fullname, possible_models):
fullname = str(fullname)
obj = str.__new__(cls, fullname)
obj.fullname = fullname
models, subp = HierarchicalParam.parse(fullname, possible_models)
obj.models = models
obj.subname = subp
return obj
@classmethod
def from_subname(cls, model_label, subname):
"""Creates a HierarchicalParam from the given subname and model label.
"""
return cls(cls.delim.join([model_label, subname]), set([model_label]))
@classmethod
def parse(cls, fullname, possible_models):
"""Parses the full parameter name into the models the parameter is
associated with and the parameter name that is passed to the models.
Parameters
----------
fullname : str
The full name of the parameter, which includes both the model
label(s) and the parameter name.
possible_models : set
Set of model labels the parameter can be associated with.
Returns
-------
models : list
List of the model labels the parameter is associated with.
subp : str
Parameter name that is passed to the models. This is the parameter
name with the model label(s) stripped from it.
"""
# make sure possible models is a set
possible_models = set(possible_models)
p = fullname.split(cls.delim, 1)
if len(p) == 1:
# is a global fullname, associate with all
subp = fullname
models = possible_models.copy()
else:
models, subp = p
# convert into set of model label(s)
models = set(models.split(cls.model_delim))
# make sure the given labels are in the list of possible models
unknown = models - possible_models
if any(unknown):
raise ValueError('unrecognized model label(s) {} present in '
'parameter {}'.format(', '.join(unknown),
fullname))
return models, subp
def hpiter(params, possible_models):
"""Turns a list of parameter strings into a list of HierarchicalParams.
Parameters
----------
params : list of str
List of parameter names.
possible_models : set
Set of model labels the parameters can be associated with.
Returns
-------
iterator :
Iterator of :py:class:`HierarchicalParam` instances.
"""
return map(lambda x: HierarchicalParam(x, possible_models), params)
def map_params(params):
"""Creates a map of models -> parameters.
Parameters
----------
params : list of HierarchicalParam instances
The list of hierarchical parameter names to parse.
Returns
-------
dict :
Dictionary of model labels -> associated parameters.
"""
param_map = {}
for p in params:
for lbl in p.models:
try:
param_map[lbl].update([p])
except KeyError:
param_map[lbl] = set([p])
return param_map
class MultiSignalModel(HierarchicalModel):
""" Model for multiple signals which share data
Sub models are treated as if the signals overlap in data. This requires
constituent models to implement a specific method to handle this case.
All models must be of the same type or the specific model is responsible
for implement cross-compatibility with another model. Each model h_i is
responsible for calculating its own loglikelihood ratio for itself, and
must also implement a method to calculate crossterms of the form
<h_i | h_j> which arise from the full calculation of <d - h|d - h>.
This model inherits from the HierarchicalModel so the syntax for
configuration files is the same. The primary model is used to determine
the noise terms <d | d>, which by default will be the first model used.
"""
name = 'multi_signal'
def __init__(self, variable_params, submodels, **kwargs):
super().__init__(variable_params, submodels, **kwargs)
# Check what models each model supports
support = {}
ctypes = set() # The set of models we need to completely support
for lbl in self.submodels:
model = self.submodels[lbl]
ctypes.add(type(model))
if hasattr(model, 'multi_signal_support'):
support[lbl] = set(model.multi_signal_support)
# pick the primary model if it supports the set of constituent models
for lbl in support:
if ctypes <= support[lbl]:
self.primary_model = lbl
logging.info('MultiSignalModel: PrimaryModel == %s', lbl)
break
else:
# Oh, no, we don't support this combo!
raise RuntimeError("It looks like the combination of models, {},"
"for the MultiSignal model isn't supported by"
"any of the constituent models.".format(ctypes))
self.other_models = self.submodels.copy()
self.other_models.pop(self.primary_model)
self.other_models = list(self.other_models.values())
def write_metadata(self, fp, group=None):
"""Adds metadata to the output files
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
super().write_metadata(fp, group=group)
sampattrs = fp.getattrs(group=fp.samples_group)
# if a group is specified, prepend the lognl names with it
if group is None or group == '/':
prefix = ''
else:
prefix = group.replace('/', '__')
if not prefix.endswith('__'):
prefix += '__'
try:
model = self.submodels[self.primary_model]
sampattrs['{}lognl'.format(prefix)] = model.lognl
except AttributeError:
pass
def _loglikelihood(self):
for lbl, model in self.submodels.items():
# Update the parameters of each
model.update(**{p.subname: self.current_params[p.fullname]
for p in self.param_map[lbl]})
# Calculate the combined loglikelihood
p = self.primary_model
logl = self.submodels[p].multi_loglikelihood(self.other_models)
# store any extra stats from the submodels
for lbl, model in self.submodels.items():
mstats = model.current_stats
for stat in self.extra_stats_map[lbl]:
setattr(self._current_stats, stat, mstats[stat.subname])
return logl
| 25,771
| 41.810631
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/single_template.py
|
# Copyright (C) 2018 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian.
"""
import logging
import numpy
import itertools
from pycbc import filter as pyfilter
from pycbc.waveform import get_fd_waveform
from pycbc.detector import Detector
from .gaussian_noise import BaseGaussianNoise
from .tools import DistMarg
class SingleTemplate(DistMarg, BaseGaussianNoise):
r"""Model that assumes we know all the intrinsic parameters.
This model assumes we know all the intrinsic parameters, and are only
maximizing over the extrinsic ones. We also assume a dominant mode waveform
approximant only and non-precessing.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
sample_rate : int, optional
The sample rate to use. Default is 32768.
polarization_samples: int, optional
Parameter to specify how finely to marginalize over polarization angle.
If None, then polarization must be a parameter.
\**kwargs :
All other keyword arguments are passed to
:py:class:`BaseGaussianNoise`; see that class for details.
"""
name = 'single_template'
def __init__(self, variable_params, data, low_frequency_cutoff,
sample_rate=32768,
marginalize_phase=True,
**kwargs):
variable_params, kwargs = self.setup_marginalization(
variable_params,
marginalize_phase=marginalize_phase,
**kwargs)
super(SingleTemplate, self).__init__(
variable_params, data, low_frequency_cutoff, **kwargs)
sample_rate = float(sample_rate)
# Generate template waveforms
df = data[self.detectors[0]].delta_f
self.df = df
p = self.static_params.copy()
for k in self.static_params:
if p[k] == 'REPLACE':
p.pop(k)
if 'distance' in p:
_ = p.pop('distance')
if 'inclination' in p:
_ = p.pop('inclination')
hp, _ = get_fd_waveform(delta_f=df, distance=1, inclination=0, **p)
# Extend template to high sample rate
flen = int(round(sample_rate / df) / 2 + 1)
hp.resize(flen)
# Calculate high sample rate SNR time series
self.sh = {}
self.hh = {}
self.snr = {}
self.det = {}
for ifo in self.data:
flow = self.kmin[ifo] * df
fhigh = self.kmax[ifo] * df
# Extend data to high sample rate
self.data[ifo].resize(flen)
self.det[ifo] = Detector(ifo)
snr, _, norm = pyfilter.matched_filter_core(
hp, self.data[ifo],
psd=self.psds[ifo],
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
self.sh[ifo] = 4 * df * snr
self.snr[ifo] = snr * norm
self.hh[ifo] = pyfilter.sigmasq(
hp, psd=self.psds[ifo],
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
self.waveform = hp
self.htfs = {} # Waveform phase / distance transformation factors
self.dts = {}
# Retrict to analyzing around peaks if chosen and choose what
# ifos to draw from
self.setup_peak_lock(snrs=self.snr,
sample_rate=sample_rate,
**kwargs)
self.draw_ifos(self.snr)
@property
def multi_signal_support(self):
""" The list of classes that this model supports in a multi-signal
likelihood
"""
# Check if this model *can* be included in a multi-signal model.
# All marginalizations must currently be disabled to work!
if (self.marginalize_vector_params or
self.marginalize_distance or
self.marginalize_phase):
logging.info("Cannot use single template model inside of"
"multi_signal if marginalizations are enabled")
return [type(self)]
def calculate_hihjs(self, models):
""" Pre-calculate the hihj inner products on a grid
"""
self.hihj = {}
for m1, m2 in itertools.combinations(models, 2):
self.hihj[(m1, m2)] = {}
h1 = m1.waveform
h2 = m2.waveform
for ifo in self.data:
flow = self.kmin[ifo] * self.df
fhigh = self.kmax[ifo] * self.df
h1h2, _, _ = pyfilter.matched_filter_core(
h1, h2,
psd=self.psds[ifo],
low_frequency_cutoff=flow,
high_frequency_cutoff=fhigh)
self.hihj[(m1, m2)][ifo] = 4 * self.df * h1h2
def multi_loglikelihood(self, models):
""" Calculate a multi-model (signal) likelihood
"""
models = [self] + models
loglr = 0
# handle sum[<d|h_i> - 0.5 <h_i|h_i>]
for m in models:
loglr += m.loglr
if not hasattr(self, 'hihj'):
self.calculate_hihjs(models)
# finally add in the lognl term from this model
for m1, m2 in itertools.combinations(models, 2):
for det in self.data:
hihj_vec = self.hihj[(m1, m2)][det]
dt = m1.dts[det] - m2.dts[det] + hihj_vec.start_time
if dt < hihj_vec.start_time:
dt += hihj_vec.duration
h1h2 = hihj_vec.at_time(dt, nearest_sample=True)
h1h2 *= m1.htfs[det] * m2.htfs[det].conj()
loglr += - h1h2.real # This is -0.5 * re(<h1|h2> + <h2|h1>)
return loglr + self.lognl
def _loglr(self):
r"""Computes the log likelihood ratio
Returns
-------
float
The value of the log likelihood ratio.
"""
# calculate <d-h|d-h> = <h|h> - 2<h|d> + <d|d> up to a constant
p = self.current_params
phase = 1
if 'coa_phase' in p:
phase = numpy.exp(-1.0j * 2 * p['coa_phase'])
sh_total = hh_total = 0
ic = numpy.cos(p['inclination'])
ip = 0.5 * (1.0 + ic * ic)
pol_phase = numpy.exp(-2.0j * p['polarization'])
self.snr_draw(snrs=self.snr)
for ifo in self.sh:
dt = self.det[ifo].time_delay_from_earth_center(p['ra'], p['dec'],
p['tc'])
self.dts[ifo] = p['tc'] + dt
fp, fc = self.det[ifo].antenna_pattern(p['ra'], p['dec'],
0, p['tc'])
f = (fp + 1.0j * fc) * pol_phase
# Note, this includes complex conjugation already
# as our stored inner products were hp* x data
htf = (f.real * ip + 1.0j * f.imag * ic) / p['distance'] * phase
self.htfs[ifo] = htf
sh = self.sh[ifo].at_time(self.dts[ifo], interpolate='quadratic')
sh_total += sh * htf
hh_total += self.hh[ifo] * abs(htf) ** 2.0
loglr = self.marginalize_loglr(sh_total, hh_total)
return loglr
| 8,440
| 36.02193
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/gated_gaussian_noise.py
|
# Copyright (C) 2020 Collin Capano and Shilpa Kastha
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian and
introduces a gate to remove given times from the data, using the inpainting
method to fill the removed part such that it does not enter the likelihood.
"""
from abc import abstractmethod
import logging
import numpy
from scipy import special
from pycbc.waveform import (NoWaveformError, FailedWaveformError)
from pycbc.types import FrequencySeries
from pycbc.detector import Detector
from pycbc.pnutils import hybrid_meco_frequency
from pycbc.waveform.utils import time_from_frequencyseries
from pycbc.waveform import generator
from pycbc.filter import highpass
from .gaussian_noise import (BaseGaussianNoise, create_waveform_generator)
from .base_data import BaseDataModel
from .data_utils import fd_data_from_strain_dict
class BaseGatedGaussian(BaseGaussianNoise):
r"""Base model for gated gaussian.
Provides additional routines for applying a time-domain gate to data.
See :py:class:`GatedGaussianNoise` for more details.
"""
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, highpass_waveforms=False, **kwargs):
# we'll want the time-domain data, so store that
self._td_data = {}
# cache the current projection for debugging
self.current_proj = {}
self.current_nproj = {}
# cache the overwhitened data
self._overwhitened_data = {}
# cache the current gated data
self._gated_data = {}
# highpass waveforms with the given frequency
self.highpass_waveforms = highpass_waveforms
if self.highpass_waveforms:
logging.info("Will highpass waveforms at %f Hz",
highpass_waveforms)
# set up the boiler-plate attributes
super().__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
@classmethod
def from_config(cls, cp, data_section='data', data=None, psds=None,
**kwargs):
"""Adds highpass filtering to keyword arguments based on config file.
"""
if cp.has_option(data_section, 'strain-high-pass') and \
'highpass_waveforms' not in kwargs:
kwargs['highpass_waveforms'] = float(cp.get(data_section,
'strain-high-pass'))
return super().from_config(cp, data_section=data_section,
data=data, psds=psds,
**kwargs)
@BaseDataModel.data.setter
def data(self, data):
"""Store a copy of the FD and TD data."""
BaseDataModel.data.fset(self, data)
# store the td version
self._td_data = {det: d.to_timeseries() for det, d in data.items()}
@property
def td_data(self):
"""The data in the time domain."""
return self._td_data
@BaseGaussianNoise.psds.setter
def psds(self, psds):
"""Sets the psds, and calculates the weight and norm from them.
The data and the low and high frequency cutoffs must be set first.
"""
# check that the data has been set
if self._data is None:
raise ValueError("No data set")
if self._f_lower is None:
raise ValueError("low frequency cutoff not set")
if self._f_upper is None:
raise ValueError("high frequency cutoff not set")
# make sure the relevant caches are cleared
self._psds.clear()
self._invpsds.clear()
self._gated_data.clear()
# store the psds
for det, d in self._data.items():
if psds is None:
# No psd means assume white PSD
p = FrequencySeries(numpy.ones(int(self._N/2+1)),
delta_f=d.delta_f)
else:
# copy for storage
p = psds[det].copy()
self._psds[det] = p
# we'll store the weight to apply to the inner product
invp = 1./p
self._invpsds[det] = invp
self._overwhitened_data = self.whiten(self.data, 2, inplace=False)
def det_lognorm(self, det):
# FIXME: just returning 0 for now, but should be the determinant
# of the truncated covariance matrix
return 0.
@property
def normalize(self):
"""Determines if the loglikelihood includes the normalization term.
"""
return self._normalize
@normalize.setter
def normalize(self, normalize):
"""Clears the current stats if the normalization state is changed.
"""
self._normalize = normalize
@staticmethod
def _nowaveform_logl():
"""Convenience function to set logl values if no waveform generated.
"""
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
return self.loglikelihood - self.lognl
def whiten(self, data, whiten, inplace=False):
"""Whitens the given data.
Parameters
----------
data : dict
Dictionary of detector names -> FrequencySeries.
whiten : {0, 1, 2}
Integer indicating what level of whitening to apply. Levels are:
0: no whitening; 1: whiten; 2: overwhiten.
inplace : bool, optional
If True, modify the data in place. Otherwise, a copy will be
created for whitening.
Returns
-------
dict :
Dictionary of FrequencySeries after the requested whitening has
been applied.
"""
if not inplace:
data = {det: d.copy() for det, d in data.items()}
if whiten:
for det, dtilde in data.items():
invpsd = self._invpsds[det]
if whiten == 1:
dtilde *= invpsd**0.5
elif whiten == 2:
dtilde *= invpsd
else:
raise ValueError("whiten must be either 0, 1, or 2")
return data
def get_waveforms(self):
"""The waveforms generated using the current parameters.
If the waveforms haven't been generated yet, they will be generated,
resized to the same length as the data, and cached. If the
``highpass_waveforms`` attribute is set, a highpass filter will
also be applied to the waveforms.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
if self._current_wfs is None:
params = self.current_params
wfs = self.waveform_generator.generate(**params)
for det, h in wfs.items():
# make the same length as the data
h.resize(len(self.data[det]))
# apply high pass
if self.highpass_waveforms:
h = highpass(
h.to_timeseries(),
frequency=self.highpass_waveforms).to_frequencyseries()
wfs[det] = h
self._current_wfs = wfs
return self._current_wfs
@abstractmethod
def get_gated_waveforms(self):
"""Generates and gates waveforms using the current parameters.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
pass
def get_residuals(self):
"""Generates the residuals ``d-h`` using the current parameters.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
wfs = self.get_waveforms()
out = {}
for det, h in wfs.items():
d = self.data[det]
out[det] = d - h
return out
def get_data(self):
"""Return a copy of the data.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
return {det: d.copy() for det, d in self.data.items()}
def get_gated_data(self):
"""Return a copy of the gated data.
The gated data will be cached for faster retrieval.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
gate_times = self.get_gate_times()
out = {}
for det, d in self.td_data.items():
# make sure the cache at least has the detectors in it
try:
cache = self._gated_data[det]
except KeyError:
cache = self._gated_data[det] = {}
invpsd = self._invpsds[det]
gatestartdelay, dgatedelay = gate_times[det]
try:
dtilde = cache[gatestartdelay, dgatedelay]
except KeyError:
# doesn't exist yet, or the gate times changed
cache.clear()
d = d.gate(gatestartdelay + dgatedelay/2,
window=dgatedelay/2, copy=True,
invpsd=invpsd, method='paint')
dtilde = d.to_frequencyseries()
# save for next time
cache[gatestartdelay, dgatedelay] = dtilde
out[det] = dtilde
return out
def get_gate_times(self):
"""Gets the time to apply a gate based on the current sky position.
If the parameter ``gatefunc`` is set to ``'hmeco'``, the gate times
will be calculated based on the hybrid MECO of the given set of
parameters; see ``get_gate_times_hmeco`` for details. Otherwise, the
gate times will just be retrieved from the ``t_gate_start`` and
``t_gate_end`` parameters.
.. warning::
Since the normalization of the likelihood is currently not
being calculated, it is recommended that you do not use
``gatefunc``, instead using fixed gate times.
Returns
-------
dict :
Dictionary of detector names -> (gate start, gate width)
"""
params = self.current_params
try:
gatefunc = self.current_params['gatefunc']
except KeyError:
gatefunc = None
if gatefunc == 'hmeco':
return self.get_gate_times_hmeco()
# gate input for ringdown analysis which consideres a start time
# and an end time
gatestart = params['t_gate_start']
gateend = params['t_gate_end']
# we'll need the sky location for determining time shifts
ra = self.current_params['ra']
dec = self.current_params['dec']
gatetimes = {}
for det in self._invpsds:
thisdet = Detector(det)
# account for the time delay between the waveforms of the
# different detectors
gatestartdelay = gatestart + thisdet.time_delay_from_earth_center(
ra, dec, gatestart)
gateenddelay = gateend + thisdet.time_delay_from_earth_center(
ra, dec, gateend)
dgatedelay = gateenddelay - gatestartdelay
gatetimes[det] = (gatestartdelay, dgatedelay)
return gatetimes
def get_gate_times_hmeco(self):
"""Gets the time to apply a gate based on the current sky position.
Returns
-------
dict :
Dictionary of detector names -> (gate start, gate width)
"""
# generate the template waveform
try:
wfs = self.get_waveforms()
except NoWaveformError:
return self._nowaveform_logl()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_logl()
raise e
# get waveform parameters
params = self.current_params
spin1 = params['spin1z']
spin2 = params['spin2z']
# gate input for ringdown analysis which consideres a start time
# and an end time
dgate = params['gate_window']
meco_f = hybrid_meco_frequency(params['mass1'], params['mass2'],
spin1, spin2)
# figure out the gate times
gatetimes = {}
for det, h in wfs.items():
invpsd = self._invpsds[det]
h.resize(len(invpsd))
ht = h.to_timeseries()
f_low = int((self._f_lower[det]+1)/h.delta_f)
sample_freqs = h.sample_frequencies[f_low:].numpy()
f_idx = numpy.where(sample_freqs <= meco_f)[0][-1]
# find time corresponding to meco frequency
t_from_freq = time_from_frequencyseries(
h[f_low:], sample_frequencies=sample_freqs)
if t_from_freq[f_idx] > 0:
gatestartdelay = t_from_freq[f_idx] + float(t_from_freq.epoch)
else:
gatestartdelay = t_from_freq[f_idx] + ht.sample_times[-1]
gatestartdelay = min(gatestartdelay, params['t_gate_start'])
gatetimes[det] = (gatestartdelay, dgate)
return gatetimes
def _lognl(self):
"""Calculates the log of the noise likelihood.
"""
# clear variables
lognl = 0.
self._det_lognls.clear()
# get the times of the gates
gate_times = self.get_gate_times()
self.current_nproj.clear()
for det, invpsd in self._invpsds.items():
norm = self.det_lognorm(det)
gatestartdelay, dgatedelay = gate_times[det]
# we always filter the entire segment starting from kmin, since the
# gated series may have high frequency components
slc = slice(self._kmin[det], self._kmax[det])
# gate the data
data = self.td_data[det]
gated_dt = data.gate(gatestartdelay + dgatedelay/2,
window=dgatedelay/2, copy=True,
invpsd=invpsd, method='paint')
self.current_nproj[det] = (gated_dt.proj, gated_dt.projslc)
# convert to the frequency series
gated_d = gated_dt.to_frequencyseries()
# overwhiten
gated_d *= invpsd
d = self.data[det]
# inner product
ip = 4 * invpsd.delta_f * d[slc].inner(gated_d[slc]).real # <d, d>
dd = norm - 0.5*ip
# store
self._det_lognls[det] = dd
lognl += dd
return float(lognl)
def det_lognl(self, det):
# make sure lognl has been called
_ = self._trytoget('lognl', self._lognl)
# the det_lognls dict should have been updated, so can call it now
return self._det_lognls[det]
@staticmethod
def _fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict):
"""Wrapper around :py:func:`data_utils.fd_data_from_strain_dict`.
Ensures that if the PSD is estimated from data, the inverse spectrum
truncation uses a Hann window, and that the low frequency cutoff is
zero.
"""
if opts.psd_inverse_length and opts.invpsd_trunc_method is None:
# make sure invpsd truncation is set to hanning
logging.info("Using Hann window to truncate inverse PSD")
opts.invpsd_trunc_method = 'hann'
lfs = None
if opts.psd_estimation:
# make sure low frequency cutoff is zero
logging.info("Setting low frequency cutoff of PSD to 0")
lfs = opts.low_frequency_cutoff.copy()
opts.low_frequency_cutoff = {d: 0. for d in lfs}
out = fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict)
# set back
if lfs is not None:
opts.low_frequency_cutoff = lfs
return out
def write_metadata(self, fp, group=None):
"""Adds writing the psds, and analyzed detectors.
The analyzed detectors, their analysis segments, and the segments
used for psd estimation are written as
``analyzed_detectors``, ``{{detector}}_analysis_segment``, and
``{{detector}}_psd_segment``, respectively. These are either written
to the specified ``group``'s attrs, or to the top level attrs if
``group`` is None.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
BaseDataModel.write_metadata(self, fp)
attrs = fp.getattrs(group=group)
# write the analyzed detectors and times
attrs['analyzed_detectors'] = self.detectors
for det, data in self.data.items():
key = '{}_analysis_segment'.format(det)
attrs[key] = [float(data.start_time), float(data.end_time)]
if self._psds is not None and not self.no_save_data:
fp.write_psd(self._psds, group=group)
# write the times used for psd estimation (if they were provided)
for det in self.psd_segments:
key = '{}_psd_segment'.format(det)
attrs[key] = list(map(float, self.psd_segments[det]))
# save the frequency cutoffs
for det in self.detectors:
attrs['{}_likelihood_low_freq'.format(det)] = self._f_lower[det]
if self._f_upper[det] is not None:
attrs['{}_likelihood_high_freq'.format(det)] = \
self._f_upper[det]
class GatedGaussianNoise(BaseGatedGaussian):
r"""Model that applies a time domain gate, assuming stationary Gaussian
noise.
The gate start and end times are set by providing ``t_gate_start`` and
``t_gate_end`` parameters, respectively. This will cause the gated times
to be excised from the analysis. For more details on the likelihood
function and its derivation, see
`arXiv:2105.05238 <https://arxiv.org/abs/2105.05238>`_.
.. warning::
The normalization of the likelihood depends on the gate times. However,
at the moment, the normalization is not calculated, as it depends on
the determinant of the truncated covariance matrix (see Eq. 4 of
arXiv:2105.05238). For this reason it is recommended that you only
use this model for fixed gate times.
"""
name = 'gated_gaussian_noise'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super().__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
# create the waveform generator
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
gates=self.gates, **self.static_params)
@property
def _extra_stats(self):
"""No extra stats are stored."""
return []
def _loglikelihood(self):
r"""Computes the log likelihood after removing the power within the
given time window,
.. math::
\log p(d|\Theta) = -\frac{1}{2} \sum_i
\left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood.
"""
# generate the template waveform
try:
wfs = self.get_waveforms()
except NoWaveformError:
return self._nowaveform_logl()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_logl()
raise e
# get the times of the gates
gate_times = self.get_gate_times()
logl = 0.
self.current_proj.clear()
for det, h in wfs.items():
invpsd = self._invpsds[det]
norm = self.det_lognorm(det)
gatestartdelay, dgatedelay = gate_times[det]
# we always filter the entire segment starting from kmin, since the
# gated series may have high frequency components
slc = slice(self._kmin[det], self._kmax[det])
# calculate the residual
data = self.td_data[det]
ht = h.to_timeseries()
res = data - ht
rtilde = res.to_frequencyseries()
gated_res = res.gate(gatestartdelay + dgatedelay/2,
window=dgatedelay/2, copy=True,
invpsd=invpsd, method='paint')
self.current_proj[det] = (gated_res.proj, gated_res.projslc)
gated_rtilde = gated_res.to_frequencyseries()
# overwhiten
gated_rtilde *= invpsd
rr = 4 * invpsd.delta_f * rtilde[slc].inner(gated_rtilde[slc]).real
logl += norm - 0.5*rr
return float(logl)
def get_gated_waveforms(self):
wfs = self.get_waveforms()
gate_times = self.get_gate_times()
out = {}
for det, h in wfs.items():
invpsd = self._invpsds[det]
gatestartdelay, dgatedelay = gate_times[det]
ht = h.to_timeseries()
ht = ht.gate(gatestartdelay + dgatedelay/2,
window=dgatedelay/2, copy=False,
invpsd=invpsd, method='paint')
h = ht.to_frequencyseries()
out[det] = h
return out
def get_gated_residuals(self):
"""Generates the gated residuals ``d-h`` using the current parameters.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
params = self.current_params
wfs = self.waveform_generator.generate(**params)
gate_times = self.get_gate_times()
out = {}
for det, h in wfs.items():
invpsd = self._invpsds[det]
gatestartdelay, dgatedelay = gate_times[det]
data = self.td_data[det]
ht = h.to_timeseries()
res = data - ht
res = res.gate(gatestartdelay + dgatedelay/2,
window=dgatedelay/2, copy=True,
invpsd=invpsd, method='paint')
res = res.to_frequencyseries()
out[det] = res
return out
class GatedGaussianMargPol(BaseGatedGaussian):
r"""Gated gaussian model with numerical marginalization over polarization.
This implements the GatedGaussian likelihood with an explicit numerical
marginalization over polarization angle. This is accomplished using
a fixed set of integration points distribution uniformation between
0 and 2pi. By default, 1000 integration points are used.
The 'polarization_samples' argument can be passed to set an alternate
number of integration points.
"""
name = 'gated_gaussian_margpol'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None,
polarization_samples=1000, **kwargs):
# set up the boiler-plate attributes
super().__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
# the polarization parameters
self.polarization_samples = polarization_samples
self.pol = numpy.linspace(0, 2*numpy.pi, self.polarization_samples)
self.dets = {}
# create the waveform generator
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolGenerator,
**self.static_params)
def get_waveforms(self):
if self._current_wfs is None:
params = self.current_params
wfs = self.waveform_generator.generate(**params)
for det, (hp, hc) in wfs.items():
# make the same length as the data
hp.resize(len(self.data[det]))
hc.resize(len(self.data[det]))
# apply high pass
if self.highpass_waveforms:
hp = highpass(
hp.to_timeseries(),
frequency=self.highpass_waveforms).to_frequencyseries()
hc = highpass(
hc.to_timeseries(),
frequency=self.highpass_waveforms).to_frequencyseries()
wfs[det] = (hp, hc)
self._current_wfs = wfs
return self._current_wfs
def get_gated_waveforms(self):
wfs = self.get_waveforms()
gate_times = self.get_gate_times()
out = {}
for det in wfs:
invpsd = self._invpsds[det]
gatestartdelay, dgatedelay = gate_times[det]
# the waveforms are a dictionary of (hp, hc)
pols = []
for h in wfs[det]:
ht = h.to_timeseries()
ht = ht.gate(gatestartdelay + dgatedelay/2,
window=dgatedelay/2, copy=False,
invpsd=invpsd, method='paint')
h = ht.to_frequencyseries()
pols.append(h)
out[det] = tuple(pols)
return out
@property
def _extra_stats(self):
"""Adds the maxL polarization and corresponding likelihood."""
return ['maxl_polarization', 'maxl_logl']
def _loglikelihood(self):
r"""Computes the log likelihood after removing the power within the
given time window,
.. math::
\log p(d|\Theta) = -\frac{1}{2} \sum_i
\left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood.
"""
# generate the template waveform
try:
wfs = self.get_waveforms()
except NoWaveformError:
return self._nowaveform_logl()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_logl()
raise e
# get the gated waveforms and data
gated_wfs = self.get_gated_waveforms()
gated_data = self.get_gated_data()
# cycle over
loglr = 0.
lognl = 0.
for det, (hp, hc) in wfs.items():
# get the antenna patterns
if det not in self.dets:
self.dets[det] = Detector(det)
fp, fc = self.dets[det].antenna_pattern(self.current_params['ra'],
self.current_params['dec'],
self.pol,
self.current_params['tc'])
norm = self.det_lognorm(det)
# we always filter the entire segment starting from kmin, since the
# gated series may have high frequency components
slc = slice(self._kmin[det], self._kmax[det])
# get the gated values
gated_hp, gated_hc = gated_wfs[det]
gated_d = gated_data[det]
# we'll overwhiten the ungated data and waveforms for computing
# inner products
d = self._overwhitened_data[det]
# overwhiten the hp and hc
# we'll do this in place for computational efficiency, but as a
# result we'll clear the current waveforms cache so a repeated call
# to get_waveforms does not return the overwhitened versions
self._current_wfs = None
invpsd = self._invpsds[det]
hp *= invpsd
hc *= invpsd
# get the various gated inner products
hpd = hp[slc].inner(gated_d[slc]).real # <hp, d>
hcd = hc[slc].inner(gated_d[slc]).real # <hc, d>
dhp = d[slc].inner(gated_hp[slc]).real # <d, hp>
dhc = d[slc].inner(gated_hc[slc]).real # <d, hc>
hphp = hp[slc].inner(gated_hp[slc]).real # <hp, hp>
hchc = hc[slc].inner(gated_hc[slc]).real # <hc, hc>
hphc = hp[slc].inner(gated_hc[slc]).real # <hp, hc>
hchp = hc[slc].inner(gated_hp[slc]).real # <hc, hp>
dd = d[slc].inner(gated_d[slc]).real # <d, d>
# since the antenna patterns are real,
# <h, d>/2 + <d, h>/2 = fp*(<hp, d>/2 + <d, hp>/2)
# + fc*(<hc, d>/2 + <d, hc>/2)
hd = fp*(hpd + dhp) + fc*(hcd + dhc)
# <h, h>/2 = <fp*hp + fc*hc, fp*hp + fc*hc>/2
# = fp*fp*<hp, hp>/2 + fc*fc*<hc, hc>/2
# + fp*fc*<hp, hc>/2 + fc*fp*<hc, hp>/2
hh = fp*fp*hphp + fc*fc*hchc + fp*fc*(hphc + hchp)
# sum up; note that the factor is 2df instead of 4df to account
# for the factor of 1/2
loglr += norm + 2*invpsd.delta_f*(hd - hh)
lognl += -2 * invpsd.delta_f * dd
# store the maxl polarization
idx = loglr.argmax()
setattr(self._current_stats, 'maxl_polarization', self.pol[idx])
setattr(self._current_stats, 'maxl_logl', loglr[idx] + lognl)
# compute the marginalized log likelihood
marglogl = special.logsumexp(loglr) + lognl - numpy.log(len(self.pol))
return float(marglogl)
| 31,219
| 39.231959
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/data_utils.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Utilities for loading data for models.
"""
import logging
from argparse import ArgumentParser
from time import sleep
import numpy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from pycbc.types import MultiDetOptionAction
from pycbc.psd import (insert_psd_option_group_multi_ifo,
from_cli_multi_ifos as psd_from_cli_multi_ifos,
verify_psd_options_multi_ifo)
from pycbc import strain
from pycbc.strain import (gates_from_cli, psd_gates_from_cli,
apply_gates_to_td, apply_gates_to_fd,
verify_strain_options_multi_ifo)
from pycbc import dq
def strain_from_cli_multi_ifos(*args, **kwargs):
"""Wrapper around strain.from_cli_multi_ifos that tries a few times before
quiting.
When running in a parallel environment, multiple concurrent queries to the
segment data base can cause time out errors. If that happens, this will
sleep for a few seconds, then try again a few times before giving up.
"""
count = 0
while count < 3:
try:
return strain.from_cli_multi_ifos(*args, **kwargs)
except RuntimeError as e:
exception = e
count += 1
sleep(10)
# if get to here, we've tries 3 times and still got an error, so exit
raise exception
#
# =============================================================================
#
# Utilities for gravitational-wave data
#
# =============================================================================
#
class NoValidDataError(Exception):
"""This should be raised if a continous segment of valid data could not be
found.
"""
pass
def create_data_parser():
"""Creates an argument parser for loading GW data."""
parser = ArgumentParser()
# add data options
parser.add_argument("--instruments", type=str, nargs="+", required=True,
help="Instruments to analyze, eg. H1 L1.")
parser.add_argument("--trigger-time", type=float, default=0.,
help="Reference GPS time (at geocenter) from which "
"the (anlaysis|psd)-(start|end)-time options are "
"measured. The integer seconds will be used. "
"Default is 0; i.e., if not provided, "
"the analysis and psd times should be in GPS "
"seconds.")
parser.add_argument("--analysis-start-time", type=int, required=True,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME',
help="The start time to use for the analysis, "
"measured with respect to the trigger-time. "
"If psd-inverse-length is provided, the given "
"start time will be padded by half that length "
"to account for wrap-around effects.")
parser.add_argument("--analysis-end-time", type=int, required=True,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME',
help="The end time to use for the analysis, "
"measured with respect to the trigger-time. "
"If psd-inverse-length is provided, the given "
"end time will be padded by half that length "
"to account for wrap-around effects.")
parser.add_argument("--psd-start-time", type=int, default=None,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME',
help="Start time to use for PSD estimation, measured "
"with respect to the trigger-time.")
parser.add_argument("--psd-end-time", type=int, default=None,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME',
help="End time to use for PSD estimation, measured "
"with respect to the trigger-time.")
parser.add_argument("--data-conditioning-low-freq", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FLOW', dest="low_frequency_cutoff",
help="Low frequency cutoff of the data. Needed for "
"PSD estimation and when creating fake strain. "
"If not provided, will use the model's "
"low-frequency-cutoff.")
insert_psd_option_group_multi_ifo(parser)
strain.insert_strain_option_group_multi_ifo(parser, gps_times=False)
strain.add_gate_option_group(parser)
# add arguments for dq
dqgroup = parser.add_argument_group("Options for quering data quality "
"(DQ)")
dqgroup.add_argument('--dq-segment-name', default='DATA',
help='The status flag to query for data quality. '
'Default is "DATA".')
dqgroup.add_argument('--dq-source', choices=['any', 'GWOSC', 'dqsegdb'],
default='any',
help='Where to look for DQ information. If "any" '
'(the default) will first try GWOSC, then '
'dqsegdb.')
dqgroup.add_argument('--dq-server', default='https://segments.ligo.org',
help='The server to use for dqsegdb.')
dqgroup.add_argument('--veto-definer', default=None,
help='Path to a veto definer file that defines '
'groups of flags, which themselves define a set '
'of DQ segments.')
return parser
def check_validtimes(detector, gps_start, gps_end, shift_to_valid=False,
max_shift=None, segment_name='DATA',
**kwargs):
r"""Checks DQ server to see if the given times are in a valid segment.
If the ``shift_to_valid`` flag is provided, the times will be shifted left
or right to try to find a continous valid block nearby. The shifting starts
by shifting the times left by 1 second. If that does not work, it shifts
the times right by one second. This continues, increasing the shift time by
1 second, until a valid block could be found, or until the shift size
exceeds ``max_shift``.
If the given times are not in a continuous valid segment, or a valid block
cannot be found nearby, a ``NoValidDataError`` is raised.
Parameters
----------
detector : str
The name of the detector to query; e.g., 'H1'.
gps_start : int
The GPS start time of the segment to query.
gps_end : int
The GPS end time of the segment to query.
shift_to_valid : bool, optional
If True, will try to shift the gps start and end times to the nearest
continous valid segment of data. Default is False.
max_shift : int, optional
The maximum number of seconds to try to shift left or right to find
a valid segment. Default is ``gps_end - gps_start``.
segment_name : str, optional
The status flag to query; passed to :py:func:`pycbc.dq.query_flag`.
Default is "DATA".
\**kwargs :
All other keyword arguments are passed to
:py:func:`pycbc.dq.query_flag`.
Returns
-------
use_start : int
The start time to use. If ``shift_to_valid`` is True, this may differ
from the given GPS start time.
use_end : int
The end time to use. If ``shift_to_valid`` is True, this may differ
from the given GPS end time.
"""
# expand the times checked encase we need to shift
if max_shift is None:
max_shift = int(gps_end - gps_start)
check_start = gps_start - max_shift
check_end = gps_end + max_shift
# if we're running in an mpi enviornment and we're not the parent process,
# we'll wait before quering the segment database. This will result in
# getting the segments from the cache, so as not to overload the database
if MPI is not None and (MPI.COMM_WORLD.Get_size() > 1 and
MPI.COMM_WORLD.Get_rank() != 0):
# we'll wait for 2 minutes
sleep(120)
validsegs = dq.query_flag(detector, segment_name, check_start,
check_end, cache=True,
**kwargs)
use_start = gps_start
use_end = gps_end
# shift if necessary
if shift_to_valid:
shiftsize = 1
while (use_start, use_end) not in validsegs and shiftsize < max_shift:
# try shifting left
use_start = gps_start - shiftsize
use_end = gps_end - shiftsize
if (use_start, use_end) not in validsegs:
# try shifting right
use_start = gps_start + shiftsize
use_end = gps_end + shiftsize
shiftsize += 1
# check that we have a valid range
if (use_start, use_end) not in validsegs:
raise NoValidDataError("Could not find a continous valid segment in "
"in detector {}".format(detector))
return use_start, use_end
def detectors_with_valid_data(detectors, gps_start_times, gps_end_times,
pad_data=None, err_on_missing_detectors=False,
**kwargs):
r"""Determines which detectors have valid data.
Parameters
----------
detectors : list of str
Names of the detector names to check.
gps_start_times : dict
Dictionary of detector name -> start time listing the GPS start times
of the segment to check for each detector.
gps_end_times : dict
Dictionary of detector name -> end time listing the GPS end times of
the segment to check for each detector.
pad_data : dict, optional
Dictionary of detector name -> pad time to add to the beginning/end of
the GPS start/end times before checking. A pad time for every detector
in ``detectors`` must be given. Default (None) is to apply no pad to
the times.
err_on_missing_detectors : bool, optional
If True, a ``NoValidDataError`` will be raised if any detector does not
have continous valid data in its requested segment. Otherwise, the
detector will not be included in the returned list of detectors with
valid data. Default is False.
\**kwargs :
All other keyword arguments are passed to ``check_validtimes``.
Returns
-------
dict :
A dictionary of detector name -> valid times giving the detectors with
valid data and their segments. If ``shift_to_valid`` was passed to
``check_validtimes`` this may not be the same as the input segments. If
no valid times could be found for a detector (and
``err_on_missing_detectors`` is False), it will not be included in the
returned dictionary.
"""
if pad_data is None:
pad_data = {det: 0 for det in detectors}
dets_with_data = {}
for det in detectors:
logging.info("Checking that %s has valid data in the requested "
"segment", det)
try:
pad = pad_data[det]
start, end = check_validtimes(det, gps_start_times[det]-pad,
gps_end_times[det]+pad,
**kwargs)
dets_with_data[det] = (start+pad, end-pad)
except NoValidDataError as e:
if err_on_missing_detectors:
raise e
logging.warning("WARNING: Detector %s will not be used in "
"the analysis, as it does not have "
"continuous valid data that spans the "
"segment [%d, %d).", det, gps_start_times[det]-pad,
gps_end_times[det]+pad)
return dets_with_data
def check_for_nans(strain_dict):
"""Checks if any data in a dictionary of strains has NaNs.
If any NaNs are found, a ``ValueError`` is raised.
Parameters
----------
strain_dict : dict
Dictionary of detectors ->
:py:class:`pycbc.types.timeseries.TimeSeries`.
"""
for det, ts in strain_dict.items():
if numpy.isnan(ts.numpy()).any():
raise ValueError("NaN found in strain from {}".format(det))
def data_opts_from_config(cp, section, filter_flow):
"""Loads data options from a section in a config file.
Parameters
----------
cp : WorkflowConfigParser
Config file to read.
section : str
The section to read. All options in the section will be loaded as-if
they wre command-line arguments.
filter_flow : dict
Dictionary of detectors -> inner product low frequency cutoffs.
If a `data-conditioning-low-freq` cutoff wasn't provided for any
of the detectors, these values will be used. Otherwise, the
data-conditioning-low-freq must be less than the inner product cutoffs.
If any are not, a ``ValueError`` is raised.
Returns
-------
opts : parsed argparse.ArgumentParser
An argument parser namespace that was constructed as if the options
were specified on the command line.
"""
# convert the section options into a command-line options
optstr = cp.section_to_cli(section)
# create a fake parser to parse them
parser = create_data_parser()
# parse the options
opts = parser.parse_args(optstr.split())
# figure out the times to use
logging.info("Determining analysis times to use")
opts.trigger_time = int(opts.trigger_time)
gps_start = opts.analysis_start_time.copy()
gps_end = opts.analysis_end_time.copy()
for det in opts.instruments:
gps_start[det] += opts.trigger_time
gps_end[det] += opts.trigger_time
if opts.psd_inverse_length[det] is not None:
pad = int(numpy.ceil(opts.psd_inverse_length[det] / 2))
logging.info("Padding %s analysis start and end times by %d "
"(= psd-inverse-length/2) seconds to "
"account for PSD wrap around effects.",
det, pad)
else:
pad = 0
gps_start[det] -= pad
gps_end[det] += pad
if opts.psd_start_time[det] is not None:
opts.psd_start_time[det] += opts.trigger_time
if opts.psd_end_time[det] is not None:
opts.psd_end_time[det] += opts.trigger_time
opts.gps_start_time = gps_start
opts.gps_end_time = gps_end
# check for the frequencies
low_freq_cutoff = filter_flow.copy()
if opts.low_frequency_cutoff:
# add in any missing detectors
low_freq_cutoff.update({det: opts.low_frequency_cutoff[det]
for det in opts.instruments
if opts.low_frequency_cutoff[det] is not None})
# make sure the data conditioning low frequency cutoff is < than
# the matched filter cutoff
if any(low_freq_cutoff[det] > filter_flow[det] for det in filter_flow):
raise ValueError("data conditioning low frequency cutoff must "
"be less than the filter low frequency "
"cutoff")
opts.low_frequency_cutoff = low_freq_cutoff
# verify options are sane
verify_psd_options_multi_ifo(opts, parser, opts.instruments)
verify_strain_options_multi_ifo(opts, parser, opts.instruments)
return opts
def data_from_cli(opts, check_for_valid_times=False,
shift_psd_times_to_valid=False,
err_on_missing_detectors=False):
"""Loads the data needed for a model from the given command-line options.
Gates specifed on the command line are also applied.
Parameters
----------
opts : ArgumentParser parsed args
Argument options parsed from a command line string (the sort of thing
returned by `parser.parse_args`).
check_for_valid_times : bool, optional
Check that valid data exists in the requested gps times. Default is
False.
shift_psd_times_to_valid : bool, optional
If estimating the PSD from data, shift the PSD times to a valid
segment if needed. Default is False.
err_on_missing_detectors : bool, optional
Raise a NoValidDataError if any detector does not have valid data.
Otherwise, a warning is printed, and that detector is skipped.
Returns
-------
strain_dict : dict
Dictionary of detectors -> time series strain.
psd_strain_dict : dict or None
If ``opts.psd_(start|end)_time`` were set, a dctionary of
detectors -> time series data to use for PSD estimation. Otherwise,
``None``.
"""
# get gates to apply
gates = gates_from_cli(opts)
psd_gates = psd_gates_from_cli(opts)
# get strain time series
instruments = opts.instruments
# validate times
if check_for_valid_times:
dets_with_data = detectors_with_valid_data(
instruments, opts.gps_start_time, opts.gps_end_time,
pad_data=opts.pad_data,
err_on_missing_detectors=err_on_missing_detectors,
shift_to_valid=False,
segment_name=opts.dq_segment_name, source=opts.dq_source,
server=opts.dq_server, veto_definer=opts.veto_definer)
# reset instruments to only be those with valid data
instruments = list(dets_with_data.keys())
strain_dict = strain_from_cli_multi_ifos(opts, instruments,
precision="double")
# apply gates if not waiting to overwhiten
if not opts.gate_overwhitened:
strain_dict = apply_gates_to_td(strain_dict, gates)
# check that there aren't nans in the data
check_for_nans(strain_dict)
# get strain time series to use for PSD estimation
# if user has not given the PSD time options then use same data as analysis
if opts.psd_start_time and opts.psd_end_time:
logging.info("Will generate a different time series for PSD "
"estimation")
if check_for_valid_times:
psd_times = detectors_with_valid_data(
instruments, opts.psd_start_time, opts.psd_end_time,
pad_data=opts.pad_data,
err_on_missing_detectors=err_on_missing_detectors,
shift_to_valid=shift_psd_times_to_valid,
segment_name=opts.dq_segment_name, source=opts.dq_source,
server=opts.dq_server, veto_definer=opts.veto_definer)
# remove detectors from the strain dict that did not have valid
# times for PSD estimation
for det in set(strain_dict.keys())-set(psd_times.keys()):
_ = strain_dict.pop(det)
# reset instruments to only be those with valid data
instruments = list(psd_times.keys())
else:
psd_times = {det: (opts.psd_start_time[det],
opts.psd_end_time[det])
for det in instruments}
psd_strain_dict = {}
for det, (psd_start, psd_end) in psd_times.items():
opts.gps_start_time = psd_start
opts.gps_end_time = psd_end
psd_strain_dict.update(
strain_from_cli_multi_ifos(opts, [det], precision="double"))
# apply any gates
logging.info("Applying gates to PSD data")
psd_strain_dict = apply_gates_to_td(psd_strain_dict, psd_gates)
# check that there aren't nans in the psd data
check_for_nans(psd_strain_dict)
elif opts.psd_start_time or opts.psd_end_time:
raise ValueError("Must give psd-start-time and psd-end-time")
else:
psd_strain_dict = None
# check that we have data left to analyze
if instruments == []:
raise NoValidDataError("No valid data could be found in any of the "
"requested instruments.")
return strain_dict, psd_strain_dict
def fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict=None):
"""Converts a dictionary of time series to the frequency domain, and gets
the PSDs.
Parameters
----------
opts : ArgumentParser parsed args
Argument options parsed from a command line string (the sort of thing
returned by `parser.parse_args`).
strain_dict : dict
Dictionary of detectors -> time series data.
psd_strain_dict : dict, optional
Dictionary of detectors -> time series data to use for PSD estimation.
If not provided, will use the ``strain_dict``. This is
ignored if ``opts.psd_estimation`` is not set. See
:py:func:`pycbc.psd.psd_from_cli_multi_ifos` for details.
Returns
-------
stilde_dict : dict
Dictionary of detectors -> frequency series data.
psd_dict : dict
Dictionary of detectors -> frequency-domain PSDs.
"""
# FFT strain and save each of the length of the FFT, delta_f, and
# low frequency cutoff to a dict
stilde_dict = {}
length_dict = {}
delta_f_dict = {}
for det, tsdata in strain_dict.items():
stilde_dict[det] = tsdata.to_frequencyseries()
length_dict[det] = len(stilde_dict[det])
delta_f_dict[det] = stilde_dict[det].delta_f
if psd_strain_dict is None:
psd_strain_dict = strain_dict
# get PSD as frequency series
psd_dict = psd_from_cli_multi_ifos(
opts, length_dict, delta_f_dict, opts.low_frequency_cutoff,
list(psd_strain_dict.keys()), strain_dict=psd_strain_dict,
precision="double")
return stilde_dict, psd_dict
def gate_overwhitened_data(stilde_dict, psd_dict, gates):
"""Applies gates to overwhitened data.
Parameters
----------
stilde_dict : dict
Dictionary of detectors -> frequency series data to apply the gates to.
psd_dict : dict
Dictionary of detectors -> PSD to use for overwhitening.
gates : dict
Dictionary of detectors -> gates.
Returns
-------
dict :
Dictionary of detectors -> frequency series data with the gates
applied after overwhitening. The returned data is not overwhitened.
"""
logging.info("Applying gates to overwhitened data")
# overwhiten the data
out = {}
for det in gates:
out[det] = stilde_dict[det] / psd_dict[det]
# now apply the gate
out = apply_gates_to_fd(out, gates)
# now unwhiten
for det in gates:
out[det] *= psd_dict[det]
return out
| 23,677
| 41.663063
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/tools.py
|
""" Common utility functions for calculation of likelihoods
"""
import logging
import warnings
from distutils.util import strtobool
import numpy
import numpy.random
import tqdm
from scipy.special import logsumexp, i0e
from scipy.interpolate import RectBivariateSpline, interp1d
from pycbc.distributions import JointDistribution
from pycbc.detector import Detector
# Earth radius in seconds
EARTH_RADIUS = 0.031
def str_to_tuple(sval, ftype):
""" Convenience parsing to convert str to tuple"""
if sval is None:
return ()
return tuple(ftype(x.strip(' ')) for x in sval.split(','))
def str_to_bool(sval):
""" Ensure value is a bool if it can be converted """
if isinstance(sval, str):
return strtobool(sval)
return sval
def draw_sample(loglr, size=None):
""" Draw a random index from a 1-d vector with loglr weights
"""
if size:
x = numpy.random.uniform(size=size)
else:
x = numpy.random.uniform()
loglr = loglr - loglr.max()
cdf = numpy.exp(loglr).cumsum()
cdf /= cdf[-1]
xl = numpy.searchsorted(cdf, x)
return xl
class DistMarg():
"""Help class to add bookkeeping for likelihood marginalization"""
marginalize_phase = None
distance_marginalization = None
distance_interpolator = None
def setup_marginalization(self,
variable_params,
marginalize_phase=False,
marginalize_distance=False,
marginalize_distance_param='distance',
marginalize_distance_samples=int(1e4),
marginalize_distance_interpolator=False,
marginalize_distance_snr_range=None,
marginalize_distance_density=None,
marginalize_vector_params=None,
marginalize_vector_samples=1e3,
marginalize_sky_initial_samples=1e6,
**kwargs):
""" Setup the model for use with distance marginalization
This function sets up precalculations for distance / phase
marginalization. For distance margininalization it modifies the
model to internally remove distance as a parameter.
Parameters
----------
variable_params: list of strings
The set of variable parameters
marginalize_phase: bool, False
Do analytic marginalization (appopriate only for 22 mode waveforms)
marginalize_distance: bool, False
Marginalize over distance
marginalize_distance_param: str
Name of the parameter that is used to determine the distance.
This might be 'distance' or a parameter which can be converted
to distance by a provided univariate transformation.
marginalize_distance_interpolator: bool
Use a pre-calculated interpolating function for the distance
marginalized likelihood.
marginalize_distance_snr_range: tuple of floats, (1, 50)
The SNR range for the interpolating function to be defined in.
If a sampler goes outside this range, the logl will be returned
as -numpy.inf.
marginalize_distance_density: tuple of intes, (1000, 1000)
The dimensions of the interpolation grid over (sh, hh).
Returns
-------
variable_params: list of strings
Set of variable params (missing distance-related parameter).
kwags: dict
The keyword arguments to the model initialization, may be modified
from the original set by this function.
"""
def pop_prior(param):
variable_params.remove(param)
old_prior = kwargs['prior']
dists = [d for d in old_prior.distributions
if param not in d.params]
dprior = [d for d in old_prior.distributions
if param in d.params][0]
prior = JointDistribution(variable_params,
*dists, **old_prior.kwargs)
kwargs['prior'] = prior
return dprior
self.reconstruct_phase = False
self.reconstruct_distance = False
self.reconstruct_vector = False
self.precalc_antennna_factors = False
# Handle any requested parameter vector / brute force marginalizations
self.marginalize_vector_params = {}
self.marginalized_vector_priors = {}
self.vsamples = int(marginalize_vector_samples)
self.marginalize_sky_initial_samples = \
int(float(marginalize_sky_initial_samples))
for param in str_to_tuple(marginalize_vector_params, str):
logging.info('Marginalizing over %s, %s points from prior',
param, self.vsamples)
self.marginalized_vector_priors[param] = pop_prior(param)
# Remove in the future, backwards compatibility
if 'polarization_samples' in kwargs:
warnings.warn("use marginalize_vector_samples rather "
"than 'polarization_samples'", DeprecationWarning)
pol_uniform = numpy.linspace(0, numpy.pi * 2.0, self.vsamples)
self.marginalize_vector_params['polarization'] = pol_uniform
self.vsamples = int(kwargs['polarization_samples'])
kwargs.pop('polarization_samples')
self.reset_vector_params()
self.marginalize_phase = str_to_bool(marginalize_phase)
self.distance_marginalization = False
self.distance_interpolator = None
marginalize_distance = str_to_bool(marginalize_distance)
self.marginalize_distance = marginalize_distance
if not marginalize_distance:
return variable_params, kwargs
if isinstance(marginalize_distance_snr_range, str):
marginalize_distance_snr_range = \
str_to_tuple(marginalize_distance_snr_range, float)
if isinstance(marginalize_distance_density, str):
marginalize_distance_density = \
str_to_tuple(marginalize_distance_density, int)
logging.info('Marginalizing over distance')
# Take distance out of the variable params since we'll handle it
# manually now
dprior = pop_prior(marginalize_distance_param)
if len(dprior.params) != 1 or not hasattr(dprior, 'bounds'):
raise ValueError('Distance Marginalization requires a '
'univariate and bounded prior')
# Set up distance prior vector and samples
# (1) prior is using distance
if dprior.params[0] == 'distance':
logging.info("Prior is directly on distance, setting up "
"%s grid weights", marginalize_distance_samples)
dmin, dmax = dprior.bounds['distance']
dist_locs = numpy.linspace(dmin, dmax,
int(marginalize_distance_samples))
dist_weights = [dprior.pdf(distance=l) for l in dist_locs]
dist_weights = numpy.array(dist_weights)
# (2) prior is univariate and can be converted to distance
elif marginalize_distance_param != 'distance':
waveform_transforms = kwargs['waveform_transforms']
pname = dprior.params[0]
logging.info("Settings up transform, prior is in terms of"
" %s", pname)
wtrans = [d for d in waveform_transforms
if 'distance' not in d.outputs]
if len(wtrans) == 0:
wtrans = None
kwargs['waveform_transforms'] = wtrans
dtrans = [d for d in waveform_transforms
if 'distance' in d.outputs][0]
v = dprior.rvs(int(1e8))
d = dtrans.transform({pname: v[pname]})['distance']
d.sort()
cdf = numpy.arange(1, len(d)+1) / len(d)
i = interp1d(d, cdf)
dmin, dmax = d.min(), d.max()
logging.info('Distance range %s-%s', dmin, dmax)
x = numpy.linspace(dmin, dmax,
int(marginalize_distance_samples) + 1)
xl, xr = x[:-1], x[1:]
dist_locs = 0.5 * (xr + xl)
dist_weights = i(xr) - i(xl)
else:
raise ValueError("No prior seems to determine the distance")
dist_weights /= dist_weights.sum()
dist_ref = 0.5 * (dmax + dmin)
self.dist_locs = dist_locs
self.distance_marginalization = dist_ref / dist_locs, dist_weights
self.distance_interpolator = None
if str_to_bool(marginalize_distance_interpolator):
setup_args = {}
if marginalize_distance_snr_range:
setup_args['snr_range'] = marginalize_distance_snr_range
if marginalize_distance_density:
setup_args['density'] = marginalize_distance_density
i = setup_distance_marg_interpolant(self.distance_marginalization,
phase=self.marginalize_phase,
**setup_args)
self.distance_interpolator = i
kwargs['static_params']['distance'] = dist_ref
return variable_params, kwargs
def reset_vector_params(self):
""" Redraw vector params from their priors
"""
for param in self.marginalized_vector_priors:
vprior = self.marginalized_vector_priors[param]
values = vprior.rvs(self.vsamples)[param]
self.marginalize_vector_params[param] = values
def marginalize_loglr(self, sh_total, hh_total,
skip_vector=False, return_peak=False):
""" Return the marginal likelihood
Parameters
-----------
sh_total: float or ndarray
The total <s|h> inner product summed over detectors
hh_total: float or ndarray
The total <h|h> inner product summed over detectors
skip_vector: bool, False
If true, and input is a vector, do not marginalize over that
vector, instead return the likelihood values as a vector.
"""
interpolator = self.distance_interpolator
return_complex = False
distance = self.distance_marginalization
if self.reconstruct_vector:
skip_vector = True
if self.reconstruct_distance:
interpolator = None
skip_vector = True
if self.reconstruct_phase:
interpolator = None
distance = False
skip_vector = True
return_complex = True
return marginalize_likelihood(sh_total, hh_total,
logw=self.marginalize_vector_weights,
phase=self.marginalize_phase,
interpolator=interpolator,
distance=distance,
skip_vector=skip_vector,
return_complex=return_complex,
return_peak=return_peak)
def premarg_draw(self):
""" Choose random samples from prechosen set"""
# Update the current proposed times and the marginalization values
logw = self.premarg['logw_partial']
choice = numpy.random.randint(0, len(logw), size=self.vsamples)
for k in self.snr_params:
self.marginalize_vector_params[k] = self.premarg[k][choice]
self._current_params.update(self.marginalize_vector_params)
self.sample_idx = self.premarg['sample_idx'][choice]
# Update the importance weights for each vector sample
logw = self.marginalize_vector_weights + logw[choice]
self.marginalize_vector_weights = logw - logsumexp(logw)
return self.marginalize_vector_params
def snr_draw(self, wfs=None, snrs=None, size=None):
""" Improve the monte-carlo vector marginalization using the SNR time
series of each detector
"""
try:
p = self.current_params
set_scalar = numpy.isscalar(p['tc'])
except:
set_scalar = False
if not set_scalar:
if hasattr(self, 'premarg'):
return self.premarg_draw()
if snrs is None:
snrs = self.get_snr(wfs)
if ('tc' in self.marginalized_vector_priors and
not ('ra' in self.marginalized_vector_priors
or 'dec' in self.marginalized_vector_priors)):
return self.draw_times(snrs, size=size)
elif ('tc' in self.marginalized_vector_priors and
'ra' in self.marginalized_vector_priors and
'dec' in self.marginalized_vector_priors):
return self.draw_sky_times(snrs, size=size)
else:
# OK, we couldn't do anything with the requested monte-carlo
# marginalizations.
self.precalc_antenna_factors = None
return None
def draw_times(self, snrs, size=None):
""" Draw times consistent with the incoherent network SNR
Parameters
----------
snrs: dist of TimeSeries
"""
if not hasattr(self, 'tinfo'):
# determine the rough time offsets for this sky location
tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc']
tcave = (tcmax + tcmin) / 2.0
ifos = list(snrs.keys())
if hasattr(self, 'keep_ifos'):
ifos = self.keep_ifos
d = {ifo: Detector(ifo, reference_time=tcave) for ifo in ifos}
self.tinfo = tcmin, tcmax, tcave, ifos, d
self.snr_params = ['tc']
tcmin, tcmax, tcave, ifos, d = self.tinfo
vsamples = size if size is not None else self.vsamples
# Determine the weights for the valid time range
ra = self._current_params['ra']
dec = self._current_params['dec']
# Determine the common valid time range
iref = ifos[0]
dref = d[iref]
dt = dref.time_delay_from_earth_center(ra, dec, tcave)
starts = []
ends = []
tmin, tmax = tcmin - dt, tcmax + dt
if hasattr(self, 'tstart'):
tmin = self.tstart[iref]
tmax = self.tend[iref]
starts.append(max(tmin, snrs[iref].start_time))
ends.append(min(tmax, snrs[iref].end_time))
idels = {}
for ifo in ifos[1:]:
dti = d[ifo].time_delay_from_detector(dref, ra, dec, tcave)
idel = round(dti / snrs[iref].delta_t) * snrs[iref].delta_t
idels[ifo] = idel
starts.append(snrs[ifo].start_time - idel)
ends.append(snrs[ifo].end_time - idel)
start = max(starts)
end = min(ends)
if end <= start:
return
# get the weights
snr = snrs[iref].time_slice(start, end, mode='nearest')
logweight = snr.squared_norm().numpy()
for ifo in ifos[1:]:
idel = idels[ifo]
snrv = snrs[ifo].time_slice(snr.start_time + idel,
snr.end_time + idel,
mode='nearest')
logweight += snrv.squared_norm().numpy()
logweight /= 2.0
# Draw proportional to the incoherent likelihood
# Draw first which time sample
tci = draw_sample(logweight, size=vsamples)
# Second draw a subsample size offset so that all times are covered
tct = numpy.random.uniform(-snr.delta_t / 2.0,
snr.delta_t / 2.0,
size=vsamples)
tc = tct + tci * snr.delta_t + float(snr.start_time) - dt
# Update the current proposed times and the marginalization values
logw = - logweight[tci]
self.marginalize_vector_params['tc'] = tc
self.marginalize_vector_params['logw_partial'] = logw
if self._current_params is not None:
# Update the importance weights for each vector sample
logw = self.marginalize_vector_weights + logw
self._current_params.update(self.marginalize_vector_params)
self.marginalize_vector_weights = logw - logsumexp(logw)
return self.marginalize_vector_params
def draw_sky_times(self, snrs, size=None):
""" Draw ra, dec, and tc together using SNR timeseries to determine
monte-carlo weights.
"""
# First setup
# precalculate dense sky grid and make dict and or array of the results
ifos = list(snrs.keys())
if hasattr(self, 'keep_ifos'):
ifos = self.keep_ifos
ikey = ''.join(ifos)
# No good SNR peaks, go with prior draw
if len(ifos) == 0:
return
def make_init():
self.snr_params = ['tc', 'ra', 'dec']
size = self.marginalize_sky_initial_samples
logging.info('drawing samples: %s', size)
ra = self.marginalized_vector_priors['ra'].rvs(size=size)['ra']
dec = self.marginalized_vector_priors['dec'].rvs(size=size)['dec']
tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc']
tcave = (tcmax + tcmin) / 2.0
d = {ifo: Detector(ifo, reference_time=tcave) for ifo in self.data}
# What data structure to hold times? Dict of offset -> list?
logging.info('sorting into time delay dict')
dts = []
for i in range(len(ifos) - 1):
dt = d[ifos[0]].time_delay_from_detector(d[ifos[i+1]],
ra, dec, tcave)
dt = numpy.rint(dt / snrs[ifos[0]].delta_t)
dts.append(dt)
fp, fc, dtc = {}, {}, {}
for ifo in self.data:
fp[ifo], fc[ifo] = d[ifo].antenna_pattern(ra, dec, 0.0, tcave)
dtc[ifo] = d[ifo].time_delay_from_earth_center(ra, dec, tcave)
dmap = {}
for i, t in enumerate(tqdm.tqdm(zip(*dts))):
if t not in dmap:
dmap[t] = []
dmap[t].append(i)
if len(ifos) == 1:
dmap[()] = numpy.arange(0, size, 1).astype(int)
return dmap, tcmin, tcmax, fp, fc, ra, dec, dtc
if not hasattr(self, 'tinfo'):
self.tinfo = {}
if ikey not in self.tinfo:
logging.info('pregenerating sky pointings')
self.tinfo[ikey] = make_init()
dmap, tcmin, tcmax, fp, fc, ra, dec, dtc = self.tinfo[ikey]
vsamples = size if size is not None else self.vsamples
# draw times from each snr time series
# Is it worth doing this if some detector has low SNR?
sref = None
iref = None
idx = []
dx = []
mcweight = None
for ifo in ifos:
snr = snrs[ifo]
tmin, tmax = tcmin - EARTH_RADIUS, tcmax + EARTH_RADIUS
if hasattr(self, 'tstart'):
tmin = self.tstart[ifo]
tmax = self.tend[ifo]
start = max(tmin, snrs[ifo].start_time)
end = min(tmax, snrs[ifo].end_time)
snr = snr.time_slice(start, end, mode='nearest')
w = snr.squared_norm().numpy() / 2.0
i = draw_sample(w, size=vsamples)
if sref is not None:
mcweight -= w[i]
delt = float(snr.start_time - sref.start_time)
i += round(delt / sref.delta_t)
dx.append(iref - i)
else:
sref = snr
iref = i
mcweight = -w[i]
idx.append(i)
# check if delay is in dict, if not, throw out
ti = []
ix = []
wi = []
rand = numpy.random.uniform(0, 1, size=vsamples)
for i in range(vsamples):
t = tuple(x[i] for x in dx)
if t in dmap:
randi = int(rand[i] * (len(dmap[t])))
ix.append(dmap[t][randi])
wi.append(len(dmap[t]))
ti.append(i)
# If we had really poor efficiency at finding a point, we should
# give up and just use the original random draws
if len(ra) < 0.05 * vsamples:
return
# fill back to fixed size with repeat samples
# sample order is random, so this should be OK statistically
ix = numpy.resize(numpy.array(ix, dtype=int), vsamples)
self.sample_idx = ix
self.precalc_antenna_factors = fp, fc, dtc
ra = ra[ix]
dec = dec[ix]
dtc = {ifo: dtc[ifo][ix] for ifo in dtc}
ti = numpy.resize(numpy.array(ti, dtype=int), vsamples)
wi = numpy.resize(numpy.array(wi), vsamples)
# Second draw a subsample size offset so that all times are covered
tct = numpy.random.uniform(-snr.delta_t / 2.0,
snr.delta_t / 2.0,
size=len(ti))
tc = tct + iref[ti] * snr.delta_t + float(sref.start_time) - dtc[ifos[0]]
# Update the current proposed times and the marginalization values
logw_sky = mcweight[ti] + numpy.log(wi)
self.marginalize_vector_params['tc'] = tc
self.marginalize_vector_params['ra'] = ra
self.marginalize_vector_params['dec'] = dec
self.marginalize_vector_params['logw_partial'] = logw_sky
if self._current_params is not None:
# Update the importance weights for each vector sample
logw = self.marginalize_vector_weights + logw_sky
self._current_params.update(self.marginalize_vector_params)
self.marginalize_vector_weights = logw - logsumexp(logw)
return self.marginalize_vector_params
def get_precalc_antenna_factors(self, ifo):
""" Get the antenna factors for marginalized samples if they exist """
ix = self.sample_idx
fp, fc, dtc = self.precalc_antenna_factors
return fp[ifo][ix], fc[ifo][ix], dtc[ifo][ix]
def setup_peak_lock(self,
sample_rate=4096,
snrs=None,
peak_lock_snr=None,
peak_lock_ratio=1e4,
peak_lock_region=4,
**kwargs):
""" Determine where to constrain marginalization based on
the observed reference SNR peaks.
Parameters
----------
sample_rate : float
The SNR sample rate
snrs : Dict of SNR time series
Either provide this or the model needs a function
to get the reference SNRs.
peak_lock_snr: float
The minimum SNR to bother restricting from the prior range
peak_lock_ratio: float
The likelihood ratio (not log) relative to the peak to
act as a threshold bounding region.
peak_lock_region: int
Number of samples to inclue beyond the strict region
determined by the relative likelihood
"""
if 'tc' not in self.marginalized_vector_priors:
return
tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc']
tstart = tcmin - EARTH_RADIUS
tmax = tcmax - tcmin + EARTH_RADIUS * 2.0
num_samples = int(tmax * sample_rate)
self.tstart = {ifo: tstart for ifo in self.data}
self.num_samples = {ifo: num_samples for ifo in self.data}
if snrs is None:
if not hasattr(self, 'ref_snr'):
raise ValueError("Model didn't have a reference SNR!")
snrs = self.ref_snr
# Restrict the time range for constructing SNR time series
# to identifiable peaks
if peak_lock_snr is not None:
peak_lock_snr = float(peak_lock_snr)
peak_lock_ratio = float(peak_lock_ratio)
peak_lock_region = int(peak_lock_region)
for ifo in snrs:
s = max(tstart, snrs[ifo].start_time)
e = min(tstart + tmax, snrs[ifo].end_time)
z = snrs[ifo].time_slice(s, e, mode='nearest')
peak_snr, imax = z.abs_max_loc()
times = z.sample_times
peak_time = times[imax]
logging.info('%s: Max Ref SNR Peak of %s at %s',
ifo, peak_snr, peak_time)
if peak_snr > peak_lock_snr:
target = peak_snr ** 2.0 / 2.0 - numpy.log(peak_lock_ratio)
target = (target * 2.0) ** 0.5
region = numpy.where(abs(z) > target)[0]
ts = times[region[0]] - peak_lock_region / sample_rate
te = times[region[-1]] + peak_lock_region / sample_rate
self.tstart[ifo] = ts
self.num_samples[ifo] = int((te - ts) * sample_rate)
# Check times are commensurate with each other
for ifo in snrs:
ts = self.tstart[ifo]
te = ts + self.num_samples[ifo] / sample_rate
for ifo2 in snrs:
if ifo == ifo2:
continue
ts2 = self.tstart[ifo2]
te2 = ts2 + self.num_samples[ifo2] / sample_rate
det = Detector(ifo)
dt = Detector(ifo2).light_travel_time_to_detector(det)
ts = max(ts, ts2 - dt)
te = min(te, te2 + dt)
self.tstart[ifo] = ts
self.num_samples[ifo] = int((te - ts) * sample_rate) + 1
logging.info('%s: use region %s-%s, %s points',
ifo, ts, te, self.num_samples[ifo])
self.tend = self.tstart.copy()
for ifo in snrs:
self.tend[ifo] += self.num_samples[ifo] / sample_rate
def draw_ifos(self, snrs, peak_snr_threshold=4.0, log=True,
precalculate_marginalization_points=False,
**kwargs):
""" Helper utility to determine which ifos we should use based on the
reference SNR time series.
"""
if 'tc' not in self.marginalized_vector_priors:
return
peak_snr_threshold = float(peak_snr_threshold)
tcmin, tcmax = self.marginalized_vector_priors['tc'].bounds['tc']
ifos = list(snrs.keys())
keep_ifos = []
psnrs = []
for ifo in ifos:
snr = snrs[ifo]
start = max(tcmin - EARTH_RADIUS, snr.start_time)
end = min(tcmax + EARTH_RADIUS, snr.end_time)
snr = snr.time_slice(start, end, mode='nearest')
psnr = abs(snr).max()
if psnr > peak_snr_threshold:
keep_ifos.append(ifo)
psnrs.append(psnr)
if log:
logging.info("Ifos used for SNR based draws:"
" %s, snrs: %s, peak_snr_threshold=%s",
keep_ifos, psnrs, peak_snr_threshold)
self.keep_ifos = keep_ifos
if precalculate_marginalization_points:
num_points = int(float(precalculate_marginalization_points))
self.premarg = self.snr_draw(size=num_points, snrs=snrs).copy()
self.premarg['sample_idx'] = self.sample_idx
return keep_ifos
@property
def current_params(self):
""" The current parameters
If a parameter has been vector marginalized, the likelihood should
expect an array for the given parameter. This allows transparent
vectorization for many models.
"""
params = self._current_params
for k in self.marginalize_vector_params:
if k not in params:
params[k] = self.marginalize_vector_params[k]
self.marginalize_vector_weights = - numpy.log(self.vsamples)
return params
def reconstruct(self, rec=None, seed=None):
""" Reconstruct the distance or vectored marginalized parameter
of this class.
"""
if seed:
numpy.random.seed(seed)
if rec is None:
rec = {}
def get_loglr():
p = self.current_params.copy()
p.update(rec)
self.update(**p)
return self.loglr
if self.marginalize_vector_params:
logging.debug('Reconstruct vector')
self.reconstruct_vector = True
self.reset_vector_params()
loglr = get_loglr()
xl = draw_sample(loglr + self.marginalize_vector_weights)
for k in self.marginalize_vector_params:
rec[k] = self.marginalize_vector_params[k][xl]
self.reconstruct_vector = False
if self.distance_marginalization:
logging.debug('Reconstruct distance')
# call likelihood to get vector output
self.reconstruct_distance = True
_, weights = self.distance_marginalization
loglr = get_loglr()
xl = draw_sample(loglr + numpy.log(weights))
rec['distance'] = self.dist_locs[xl]
self.reconstruct_distance = False
if self.marginalize_phase:
logging.debug('Reconstruct phase')
self.reconstruct_phase = True
s, h = get_loglr()
phasev = numpy.linspace(0, numpy.pi*2.0, int(1e4))
# This assumes that the template was conjugated in inner products
loglr = (numpy.exp(-2.0j * phasev) * s).real + h
xl = draw_sample(loglr)
rec['coa_phase'] = phasev[xl]
self.reconstruct_phase = False
rec['loglr'] = loglr[xl]
rec['loglikelihood'] = self.lognl + rec['loglr']
return rec
def setup_distance_marg_interpolant(dist_marg,
phase=False,
snr_range=(1, 50),
density=(1000, 1000)):
""" Create the interpolant for distance marginalization
Parameters
----------
dist_marg: tuple of two arrays
The (dist_loc, dist_weight) tuple which defines the grid
for integrating over distance
snr_range: tuple of (float, float)
Tuple of min, max SNR that the interpolant is expected to work
for.
density: tuple of (float, float)
The number of samples in either dimension of the 2d interpolant
Returns
-------
interp: function
Function which returns the precalculated likelihood for a given
inner product sh/hh.
"""
dist_rescale, _ = dist_marg
logging.info("Interpolator valid for SNRs in %s", snr_range)
logging.info("Interpolator using grid %s", density)
# approximate maximum shr and hhr values, assuming the true SNR is
# within the indicated range (and neglecting noise fluctuations)
snr_min, snr_max = snr_range
smax = dist_rescale.max()
smin = dist_rescale.min()
shr_max = snr_max ** 2.0 / smin
hhr_max = snr_max ** 2.0 / smin / smin
shr_min = snr_min ** 2.0 / smax
hhr_min = snr_min ** 2.0 / smax / smax
shr = numpy.geomspace(shr_min, shr_max, density[0])
hhr = numpy.geomspace(hhr_min, hhr_max, density[1])
lvals = numpy.zeros((len(shr), len(hhr)))
logging.info('Setup up likelihood interpolator')
for i, sh in enumerate(tqdm.tqdm(shr)):
for j, hh in enumerate(hhr):
lvals[i, j] = marginalize_likelihood(sh, hh,
distance=dist_marg,
phase=phase)
interp = RectBivariateSpline(shr, hhr, lvals)
def interp_wrapper(x, y, bounds_check=True):
k = None
if bounds_check:
if isinstance(x, float):
if x > shr_max or x < shr_min or y > hhr_max or y < hhr_min:
return -numpy.inf
else:
k = (x > shr_max) | (x < shr_min)
k = k | (y > hhr_max) | (y < hhr_min)
v = interp(x, y, grid=False)
if k is not None:
v[k] = -numpy.inf
return v
return interp_wrapper
def marginalize_likelihood(sh, hh,
logw=None,
phase=False,
distance=False,
skip_vector=False,
interpolator=None,
return_peak=False,
return_complex=False,
):
""" Return the marginalized likelihood.
Apply various marginalizations to the data, including phase, distance,
and brute-force vector marginalizations. Several options relate
to how the distance marginalization is approximated and others allow for
special return products to aid in parameter reconstruction.
Parameters
----------
sh: complex float or numpy.ndarray
The data-template inner product
hh: complex float or numpy.ndarray
The template-template inner product
logw:
log weighting factors if vector marginalization is used, if not
given, each sample is assumed to be equally weighted
phase: bool, False
Enable phase marginalization. Only use if orbital phase can be related
to just a single overall phase (e.g. not true for waveform with
sub-dominant modes)
skip_vector: bool, False
Don't apply marginalization of vector component of input (i.e. leave
as vector).
interpolator: function, None
If provided, internal calculation is skipped in favor of a
precalculated interpolating function which takes in sh/hh
and returns the likelihood.
return_peak: bool, False
Return the peak likelihood and index if using passing an array as
input in addition to the marginalized over the array likelihood.
return_complex: bool, False
Return the sh / hh data products before applying phase marginalization.
This option is intended to aid in reconstucting phase marginalization
and is unlikely to be useful for other purposes.
Returns
-------
loglr: float
The marginalized loglikehood ratio
"""
if distance and not interpolator and not numpy.isscalar(sh):
raise ValueError("Cannot do vector marginalization "
"and distance at the same time")
if logw is None:
if isinstance(hh, float):
logw = 0
else:
logw = -numpy.log(len(sh))
if return_complex:
pass
elif phase:
sh = abs(sh)
else:
sh = sh.real
if interpolator:
# pre-calculated result for this function
vloglr = interpolator(sh, hh)
if skip_vector:
return vloglr
else:
# explicit calculation
if distance:
# brute force distance path
dist_rescale, dist_weights = distance
sh = sh * dist_rescale
hh = hh * dist_rescale ** 2.0
logw = numpy.log(dist_weights)
if return_complex:
return sh, -0.5 * hh
# Apply the phase marginalization
if phase:
sh = numpy.log(i0e(sh)) + sh
# Calculate loglikelihood ratio
vloglr = sh - 0.5 * hh
if return_peak:
maxv = vloglr.argmax()
maxl = vloglr[maxv]
# Do brute-force marginalization if loglr is a vector
if isinstance(vloglr, float):
vloglr = float(vloglr)
elif not skip_vector:
vloglr = float(logsumexp(vloglr, b=numpy.exp(logw)))
if return_peak:
return vloglr, maxv, maxl
return vloglr
| 36,070
| 37.291932
| 81
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/relbin.py
|
# Copyright (C) 2020 Daniel Finstad
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module provides model classes and functions for implementing
a relative binning likelihood for parameter estimation.
"""
import logging
import numpy
import itertools
from scipy.interpolate import interp1d
from pycbc.waveform import (get_fd_waveform_sequence,
get_fd_det_waveform_sequence, fd_det_sequence)
from pycbc.detector import Detector
from pycbc.types import Array, TimeSeries
from .gaussian_noise import BaseGaussianNoise
from .relbin_cpu import (likelihood_parts, likelihood_parts_v,
likelihood_parts_multi, likelihood_parts_multi_v,
likelihood_parts_det, likelihood_parts_vector,
likelihood_parts_v_pol,
likelihood_parts_v_time,
likelihood_parts_v_pol_time,
likelihood_parts_vectorp, snr_predictor,
likelihood_parts_vectort,
snr_predictor_dom)
from .tools import DistMarg
def setup_bins(f_full, f_lo, f_hi, chi=1.0,
eps=0.1, gammas=None,
):
"""Construct frequency bins for use in a relative likelihood
model. For details, see [Barak, Dai & Venumadhav 2018].
Parameters
----------
f_full : array
The full resolution array of frequencies being used in the analysis.
f_lo : float
The starting frequency used in matched filtering. This will be the
left edge of the first frequency bin.
f_hi : float
The ending frequency used in matched filtering. This will be the right
edge of the last frequency bin.
chi : float, optional
Tunable parameter, see [Barak, Dai & Venumadhav 2018]
eps : float, optional
Tunable parameter, see [Barak, Dai & Venumadhav 2018]. Lower values
result in larger number of bins.
gammas : array, optional
Frequency powerlaw indices to be used in computing bins.
Returns
-------
nbin : int
Number of bins.
fbin : numpy.array of floats
Bin edge frequencies.
fbin_ind : numpy.array of ints
Indices of bin edges in full frequency array.
"""
f = numpy.linspace(f_lo, f_hi, 10000)
# f^ga power law index
ga = (
gammas
if gammas is not None
else numpy.array([-5.0 / 3, -2.0 / 3, 1.0, 5.0 / 3, 7.0 / 3])
)
logging.info("Using powerlaw indices: %s", ga)
dalp = chi * 2.0 * numpy.pi / numpy.absolute((f_lo ** ga) - (f_hi ** ga))
dphi = numpy.sum(
numpy.array([numpy.sign(g) * d * (f ** g) for g, d in zip(ga, dalp)]),
axis=0,
)
dphi_diff = dphi - dphi[0]
# now construct frequency bins
nbin = int(dphi_diff[-1] / eps)
dphi2f = interp1d(
dphi_diff, f, kind="slinear", bounds_error=False, fill_value=0.0
)
dphi_grid = numpy.linspace(dphi_diff[0], dphi_diff[-1], nbin + 1)
# frequency grid points
fbin = dphi2f(dphi_grid)
# indices of frequency grid points in the FFT array
fbin_ind = numpy.searchsorted(f_full, fbin)
for idx_fbin, idx_f_full in enumerate(fbin_ind):
if idx_f_full == 0:
curr_idx = 0
elif idx_f_full == len(f_full):
curr_idx = len(f_full) - 1
else:
abs1 = abs(f_full[idx_f_full] - fbin[idx_fbin])
abs2 = abs(f_full[idx_f_full-1] - fbin[idx_fbin])
if abs1 > abs2:
curr_idx = idx_f_full - 1
else:
curr_idx = idx_f_full
fbin_ind[idx_fbin] = curr_idx
fbin_ind = numpy.unique(fbin_ind)
return fbin_ind
class Relative(DistMarg, BaseGaussianNoise):
r"""Model that assumes the likelihood in a region around the peak
is slowly varying such that a linear approximation can be made, and
likelihoods can be calculated at a coarser frequency resolution. For
more details on the implementation, see https://arxiv.org/abs/1806.08792.
This model requires the use of a fiducial waveform whose parameters are
near the peak of the likelihood. The fiducial waveform and all template
waveforms used in likelihood calculation are currently generated using
the SPAtmplt approximant.
For more details on initialization parameters and definition of terms, see
:py:class:`BaseGaussianNoise`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
figucial_params : dict
A dictionary of waveform parameters to be used for generating the
fiducial waveform. Keys must be parameter names in the form
'PARAM_ref' where PARAM is a recognized extrinsic parameter or
an intrinsic parameter compatible with the chosen approximant.
gammas : array of floats, optional
Frequency powerlaw indices to be used in computing frequency bins.
epsilon : float, optional
Tuning parameter used in calculating the frequency bins. Lower values
will result in higher resolution and more bins.
earth_rotation: boolean, optional
Default is False. If True, then vary the fp/fc polarization values
as a function of frequency bin, using a predetermined PN approximation
for the time offsets.
\**kwargs :
All other keyword arguments are passed to
:py:class:`BaseGaussianNoise`.
"""
name = "relative"
def __init__(
self,
variable_params,
data,
low_frequency_cutoff,
fiducial_params=None,
gammas=None,
epsilon=0.5,
earth_rotation=False,
earth_rotation_mode=2,
marginalize_phase=True,
**kwargs
):
variable_params, kwargs = self.setup_marginalization(
variable_params,
marginalize_phase=marginalize_phase,
**kwargs)
super(Relative, self).__init__(
variable_params, data, low_frequency_cutoff, **kwargs
)
# If the waveform needs us to apply the detector response,
# set flag to true (most cases for ground-based observatories).
self.still_needs_det_response = False
if self.static_params['approximant'] in fd_det_sequence:
self.still_needs_det_response = True
# reference waveform and bin edges
self.f, self.df, self.end_time, self.det = {}, {}, {}, {}
self.h00, self.h00_sparse = {}, {}
self.fedges, self.edges = {}, {}
self.ta, self.antenna_time = {}, {}
# filtered summary data for linear approximation
self.sdat = {}
# store fiducial waveform params
self.fid_params = self.static_params.copy()
self.fid_params.update(fiducial_params)
for k in self.static_params:
if self.fid_params[k] == 'REPLACE':
self.fid_params.pop(k)
for ifo in data:
# store data and frequencies
d0 = self.data[ifo]
self.f[ifo] = numpy.array(d0.sample_frequencies)
self.df[ifo] = d0.delta_f
self.end_time[ifo] = float(d0.end_time)
# self.det[ifo] = Detector(ifo)
# generate fiducial waveform
f_lo = self.kmin[ifo] * self.df[ifo]
f_hi = self.kmax[ifo] * self.df[ifo]
logging.info(
"%s: Generating fiducial waveform from %s to %s Hz",
ifo, f_lo, f_hi,
)
# prune low frequency samples to avoid waveform errors
fpoints = Array(self.f[ifo].astype(numpy.float64))
fpoints = fpoints[self.kmin[ifo]:self.kmax[ifo]+1]
if self.still_needs_det_response:
wave = get_fd_det_waveform_sequence(ifos=ifo,
sample_points=fpoints,
**self.fid_params)
curr_wav = wave[ifo]
self.ta[ifo] = 0
else:
fid_hp, fid_hc = get_fd_waveform_sequence(sample_points=fpoints,
**self.fid_params)
# Apply detector response if not handled by
# the waveform generator
self.det[ifo] = Detector(ifo)
dt = self.det[ifo].time_delay_from_earth_center(
self.fid_params["ra"],
self.fid_params["dec"],
self.fid_params["tc"],
)
self.ta[ifo] = self.fid_params["tc"] + dt
fp, fc = self.det[ifo].antenna_pattern(
self.fid_params["ra"], self.fid_params["dec"],
self.fid_params["polarization"], self.fid_params["tc"])
curr_wav = (fid_hp * fp + fid_hc * fc)
# check for zeros at low and high frequencies
# make sure only nonzero samples are included in bins
numzeros_lo = list(curr_wav != 0j).index(True)
if numzeros_lo > 0:
new_kmin = self.kmin[ifo] + numzeros_lo
f_lo = new_kmin * self.df[ifo]
logging.info(
"WARNING! Fiducial waveform starts above "
"low-frequency-cutoff, initial bin frequency "
"will be %s Hz", f_lo)
numzeros_hi = list(curr_wav[::-1] != 0j).index(True)
if numzeros_hi > 0:
new_kmax = self.kmax[ifo] - numzeros_hi
f_hi = new_kmax * self.df[ifo]
logging.info(
"WARNING! Fiducial waveform terminates below "
"high-frequency-cutoff, final bin frequency "
"will be %s Hz", f_hi)
self.ta[ifo] -= self.end_time[ifo]
curr_wav.resize(len(self.f[ifo]))
curr_wav = numpy.roll(curr_wav, self.kmin[ifo])
# We'll apply this to the data, in lieu of the ref waveform
# This makes it easier to compare target signal to reference later
tshift = numpy.exp(-2.0j * numpy.pi * self.f[ifo] * self.ta[ifo])
self.h00[ifo] = numpy.array(curr_wav) # * tshift
data_shifted = self.data[ifo] * numpy.conjugate(tshift)
logging.info("Computing frequency bins")
fbin_ind = setup_bins(
f_full=self.f[ifo], f_lo=f_lo, f_hi=f_hi,
gammas=gammas, eps=float(epsilon),
)
logging.info("Using %s bins for this model", len(fbin_ind))
self.fedges[ifo] = self.f[ifo][fbin_ind]
self.edges[ifo] = fbin_ind
self.init_from_frequencies(data_shifted, self.h00, fbin_ind, ifo)
self.antenna_time[ifo] = self.setup_antenna(
earth_rotation,
int(earth_rotation_mode),
self.fedges[ifo])
self.combine_layout()
def init_from_frequencies(self, data, h00, fbin_ind, ifo):
bins = numpy.array(
[
(fbin_ind[i], fbin_ind[i + 1])
for i in range(len(fbin_ind) - 1)
]
)
# store low res copy of fiducial waveform
self.h00_sparse[ifo] = h00[ifo].copy().take(fbin_ind)
# compute summary data
logging.info(
"Calculating summary data at frequency resolution %s Hz",
self.df[ifo],
)
a0, a1 = self.summary_product(data, h00[ifo], bins, ifo)
b0, b1 = self.summary_product(h00[ifo], h00[ifo], bins, ifo)
self.sdat[ifo] = {"a0": a0, "a1": a1, "b0": abs(b0), "b1": abs(b1)}
def combine_layout(self):
# determine the unique ifo layouts
self.edge_unique = []
self.ifo_map = {}
for ifo in self.fedges:
if len(self.edge_unique) == 0:
self.ifo_map[ifo] = 0
self.edge_unique.append(Array(self.fedges[ifo]))
else:
for i, edge in enumerate(self.edge_unique):
if numpy.array_equal(edge, self.fedges[ifo]):
self.ifo_map[ifo] = i
break
else:
self.ifo_map[ifo] = len(self.edge_unique)
self.edge_unique.append(Array(self.fedges[ifo]))
logging.info("%s unique ifo layouts", len(self.edge_unique))
def setup_antenna(self, earth_rotation, mode, fedges):
# Calculate the times to evaluate fp/fc
self.earth_rotation = earth_rotation
if earth_rotation is not False:
logging.info("Enabling frequency-dependent earth rotation")
from pycbc.waveform.spa_tmplt import spa_length_in_time
times = spa_length_in_time(
phase_order=-1,
mass1=self.fid_params["mass1"],
mass2=self.fid_params["mass2"],
f_lower=numpy.array(fedges) / mode * 2.0,
)
atimes = self.fid_params["tc"] - times
self.lik = likelihood_parts_v
self.mlik = likelihood_parts_multi_v
else:
atimes = self.fid_params["tc"]
if self.still_needs_det_response:
self.lik = likelihood_parts_det
else:
self.lik = likelihood_parts
self.mlik = likelihood_parts_multi
return atimes
@property
def likelihood_function(self):
self.lformat = None
if self.marginalize_vector_params:
p = self.current_params
vmarg = set(k for k in self.marginalize_vector_params
if not numpy.isscalar(p[k]))
if self.earth_rotation:
if set(['tc', 'polarization']).issubset(vmarg):
self.lformat = 'earth_time_pol'
return likelihood_parts_v_pol_time
elif set(['polarization']).issubset(vmarg):
self.lformat = 'earth_pol'
return likelihood_parts_v_pol
elif set(['tc']).issubset(vmarg):
self.lformat = 'earth_time'
return likelihood_parts_v_time
else:
if set(['ra', 'dec', 'tc']).issubset(vmarg):
return likelihood_parts_vector
elif set(['tc', 'polarization']).issubset(vmarg):
return likelihood_parts_vector
elif set(['tc']).issubset(vmarg):
return likelihood_parts_vectort
elif set(['polarization']).issubset(vmarg):
return likelihood_parts_vectorp
return self.lik
def summary_product(self, h1, h2, bins, ifo):
""" Calculate the summary values for the inner product <h1|h2>
"""
# calculate coefficients
h12 = numpy.conjugate(h1) * h2 / self.psds[ifo]
# constant terms
a0 = numpy.array([
4.0 * self.df[ifo] * h12[l:h].sum()
for l, h in bins
])
# linear terms
a1 = numpy.array([
4.0 / (h - l) *
(h12[l:h] * (self.f[ifo][l:h] - self.f[ifo][l])).sum()
for l, h in bins])
return a0, a1
def get_waveforms(self, params):
""" Get the waveform polarizations for each ifo
"""
if self.still_needs_det_response:
wfs = {}
for ifo in self.data:
wfs.update(get_fd_det_waveform_sequence(
ifos=ifo, sample_points=self.fedges[ifo], **params))
return wfs
wfs = []
for edge in self.edge_unique:
hp, hc = get_fd_waveform_sequence(sample_points=edge, **params)
hp = hp.numpy()
hc = hc.numpy()
wfs.append((hp, hc))
wf_ret = {ifo: wfs[self.ifo_map[ifo]] for ifo in self.data}
self.wf_ret = wf_ret
return wf_ret
@property
def multi_signal_support(self):
""" The list of classes that this model supports in a multi-signal
likelihood
"""
# Check if this model *can* be included in a multi-signal model.
# All marginalizations must currently be disabled to work!
if (self.marginalize_vector_params or
self.marginalize_distance or
self.marginalize_phase):
logging.info("Cannot use single template model inside of"
"multi_signal if marginalizations are enabled")
return [type(self)]
def calculate_hihjs(self, models):
""" Pre-calculate the hihj inner products on a grid
"""
self.hihj = {}
for m1, m2 in itertools.combinations(models, 2):
self.hihj[(m1, m2)] = {}
for ifo in self.data:
h1 = m1.h00[ifo]
h2 = m2.h00[ifo]
# Combine the grids
edge = numpy.unique([m1.edges[ifo], m2.edges[ifo]])
# Remove any points where either reference is zero
keep = numpy.where((h1[edge] != 0) | (h2[edge] != 0))[0]
edge = edge[keep]
fedge = m1.f[ifo][edge]
bins = numpy.array([
(edge[i], edge[i + 1])
for i in range(len(edge) - 1)
])
a0, a1 = self.summary_product(h1, h2, bins, ifo)
self.hihj[(m1, m2)][ifo] = a0, a1, fedge
def multi_loglikelihood(self, models):
""" Calculate a multi-model (signal) likelihood
"""
models = [self] + models
loglr = 0
# handle sum[<d|h_i> - 0.5 <h_i|h_i>]
for m in models:
loglr += m.loglr
if not hasattr(self, 'hihj'):
self.calculate_hihjs(models)
# finally add in the lognl term from this model
for m1, m2 in itertools.combinations(models, 2):
for det in self.data:
a0, a1, fedge = self.hihj[(m1, m2)][det]
fp, fc, dtc, hp, hc, h00 = m1._current_wf_parts[det]
fp2, fc2, dtc2, hp2, hc2, h002 = m2._current_wf_parts[det]
h1h2 = self.mlik(fedge,
fp, fc, dtc, hp, hc, h00,
fp2, fc2, dtc2, hp2, hc2, h002,
a0, a1)
loglr += - h1h2.real # This is -0.5 * re(<h1|h2> + <h2|h1>)
return loglr + self.lognl
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
# get model params
p = self.current_params
wfs = self.get_waveforms(p)
lik = self.likelihood_function
norm = 0.0
filt = 0j
self._current_wf_parts = {}
pol_phase = numpy.exp(-2.0j * p['polarization'])
for ifo in self.data:
freqs = self.fedges[ifo]
sdat = self.sdat[ifo]
h00 = self.h00_sparse[ifo]
end_time = self.end_time[ifo]
times = self.antenna_time[ifo]
# project waveform to detector frame if waveform does not deal
# with detector response. Otherwise, skip detector response.
if self.still_needs_det_response:
channel = wfs[ifo].numpy()
filter_i, norm_i = lik(freqs, 0.0, channel, h00,
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
else:
hp, hc = wfs[ifo]
det = self.det[ifo]
fp, fc = det.antenna_pattern(p["ra"], p["dec"],
0.0, times)
dt = det.time_delay_from_earth_center(p["ra"], p["dec"], times)
dtc = p["tc"] + dt - end_time - self.ta[ifo]
if self.lformat == 'earth_pol':
filter_i, norm_i = lik(freqs, fp, fc, dtc, pol_phase,
hp, hc, h00,
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
else:
f = (fp + 1.0j * fc) * pol_phase
fp = f.real.copy()
fc = f.imag.copy()
filter_i, norm_i = lik(freqs, fp, fc, dtc,
hp, hc, h00,
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
self._current_wf_parts[ifo] = (fp, fc, dtc, hp, hc, h00)
filt += filter_i
norm += norm_i
loglr = self.marginalize_loglr(filt, norm)
return loglr
def write_metadata(self, fp, group=None):
"""Adds writing the fiducial parameters and epsilon to file's attrs.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
super().write_metadata(fp, group=group)
if group is None:
attrs = fp.attrs
else:
attrs = fp[group].attrs
for p, v in self.fid_params.items():
attrs["{}_ref".format(p)] = v
def max_curvature_from_reference(self):
""" Return the maximum change in slope between frequency bins
relative to the reference waveform.
"""
dmax = 0
for ifo in self.data:
r = self.wf_ret[ifo][0] / self.h00_sparse[ifo]
d = abs(numpy.diff(r / abs(r).min(), n=2)).max()
dmax = d if dmax < d else dmax
return dmax
@staticmethod
def extra_args_from_config(cp, section, skip_args=None, dtypes=None):
"""Adds reading fiducial waveform parameters from config file."""
# add fiducial params to skip list
skip_args += [
option for option in cp.options(section) if option.endswith("_ref")
]
# get frequency power-law indices if specified
# NOTE these should be supplied in units of 1/3
gammas = None
if cp.has_option(section, "gammas"):
skip_args.append("gammas")
gammas = numpy.array(
[float(g) / 3.0 for g in cp.get(section, "gammas").split()]
)
args = super(Relative, Relative).extra_args_from_config(
cp, section, skip_args=skip_args, dtypes=dtypes
)
# get fiducial params from config
fid_params = {
p.replace("_ref", ""): float(cp.get("model", p))
for p in cp.options("model")
if p.endswith("_ref")
}
# add optional params with default values if not specified
opt_params = {
"ra": numpy.pi,
"dec": 0.0,
"inclination": 0.0,
"polarization": numpy.pi,
}
fid_params.update(
{p: opt_params[p] for p in opt_params if p not in fid_params}
)
args.update({"fiducial_params": fid_params, "gammas": gammas})
return args
class RelativeTime(Relative):
""" Heterodyne likelihood optimized for time marginalization. In addition
it supports phase (dominant-mode), sky location, and polarization
marginalization.
"""
name = "relative_time"
def __init__(self, *args,
sample_rate=4096,
**kwargs):
super(RelativeTime, self).__init__(*args, **kwargs)
self.sample_rate = float(sample_rate)
self.setup_peak_lock(sample_rate=self.sample_rate, **kwargs)
self.draw_ifos(self.ref_snr, **kwargs)
@property
def ref_snr(self):
if not hasattr(self, '_ref_snr'):
wfs = {ifo: (self.h00_sparse[ifo],
self.h00_sparse[ifo]) for ifo in self.h00_sparse}
self._ref_snr = self.get_snr(wfs)
return self._ref_snr
def get_snr(self, wfs):
""" Return hp/hc maximized SNR time series
"""
delta_t = 1.0 / self.sample_rate
snrs = {}
for ifo in wfs:
sdat = self.sdat[ifo]
dtc = self.tstart[ifo] - self.end_time[ifo] - self.ta[ifo]
snr = snr_predictor(self.fedges[ifo],
dtc - delta_t * 2.0, delta_t,
self.num_samples[ifo] + 4,
wfs[ifo][0], wfs[ifo][1],
self.h00_sparse[ifo],
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
snrs[ifo] = TimeSeries(snr, delta_t=delta_t,
epoch=self.tstart[ifo] - delta_t * 2.0)
return snrs
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
# get model params
p = self.current_params
wfs = self.get_waveforms(p)
lik = self.likelihood_function
norm = 0.0
filt = 0j
pol_phase = numpy.exp(-2.0j * p['polarization'])
self.snr_draw(wfs)
p = self.current_params
for ifo in self.data:
freqs = self.fedges[ifo]
sdat = self.sdat[ifo]
h00 = self.h00_sparse[ifo]
end_time = self.end_time[ifo]
times = self.antenna_time[ifo]
hp, hc = wfs[ifo]
det = self.det[ifo]
fp, fc = det.antenna_pattern(p["ra"], p["dec"],
0, times)
times = det.time_delay_from_earth_center(p["ra"], p["dec"], times)
dtc = p["tc"] - end_time - self.ta[ifo]
if self.lformat == 'earth_time_pol':
filter_i, norm_i = lik(
freqs, fp, fc, times, dtc, pol_phase,
hp, hc, h00,
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
else:
f = (fp + 1.0j * fc) * pol_phase
fp = f.real.copy()
fc = f.imag.copy()
if self.lformat == 'earth_time':
filter_i, norm_i = lik(
freqs, fp, fc, times, dtc,
hp, hc, h00,
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
else:
filter_i, norm_i = lik(freqs, fp, fc, times + dtc,
hp, hc, h00,
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
filt += filter_i
norm += norm_i
loglr = self.marginalize_loglr(filt, norm)
return loglr
class RelativeTimeDom(RelativeTime):
""" Heterodyne likelihood optimized for time marginalization and only
dominant-mode waveforms. This enables the ability to do inclination
marginalization in addition to the other forms supportedy by RelativeTime.
"""
name = "relative_time_dom"
def get_snr(self, wfs):
""" Return hp/hc maximized SNR time series
"""
delta_t = 1.0 / self.sample_rate
snrs = {}
self.sh = {}
self.hh = {}
for ifo in wfs:
sdat = self.sdat[ifo]
dtc = self.tstart[ifo] - self.end_time[ifo] - self.ta[ifo]
sh, hh = snr_predictor_dom(self.fedges[ifo],
dtc - delta_t * 2.0, delta_t,
self.num_samples[ifo] + 4,
wfs[ifo][0],
self.h00_sparse[ifo],
sdat['a0'], sdat['a1'],
sdat['b0'], sdat['b1'])
snr = TimeSeries(abs(sh[2:-2]) / hh ** 0.5, delta_t=delta_t,
epoch=self.tstart[ifo])
self.sh[ifo] = TimeSeries(sh, delta_t=delta_t,
epoch=self.tstart[ifo] - delta_t * 2.0)
self.hh[ifo] = hh
snrs[ifo] = snr
return snrs
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
# calculate <d-h|d-h> = <h|h> - 2<h|d> + <d|d> up to a constant
p = self.current_params
p2 = p.copy()
p2.pop('inclination')
wfs = self.get_waveforms(p2)
sh_total = hh_total = 0
ic = numpy.cos(p['inclination'])
ip = 0.5 * (1.0 + ic * ic)
pol_phase = numpy.exp(-2.0j * p['polarization'])
snrs = self.get_snr(wfs)
self.snr_draw(snrs=snrs)
for ifo in self.sh:
if self.precalc_antenna_factors:
fp, fc, dt = self.get_precalc_antenna_factors(ifo)
else:
dt = self.det[ifo].time_delay_from_earth_center(p['ra'],
p['dec'],
p['tc'])
fp, fc = self.det[ifo].antenna_pattern(p['ra'], p['dec'],
0, p['tc'])
dts = p['tc'] + dt
f = (fp + 1.0j * fc) * pol_phase
# Note, this includes complex conjugation already
# as our stored inner products were hp* x data
htf = (f.real * ip + 1.0j * f.imag * ic)
sh = self.sh[ifo].at_time(dts, interpolate='quadratic')
sh_total += sh * htf
hh_total += self.hh[ifo] * abs(htf) ** 2.0
loglr = self.marginalize_loglr(sh_total, hh_total)
return loglr
| 32,610
| 37.547281
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/base_data.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Base classes for mofdels with data.
"""
import numpy
from abc import (ABCMeta, abstractmethod)
from .base import BaseModel
class BaseDataModel(BaseModel, metaclass=ABCMeta):
r"""Base class for models that require data and a waveform generator.
This adds propeties for the log of the likelihood that the data contain
noise, ``lognl``, and the log likelihood ratio ``loglr``.
Classes that inherit from this class must define ``_loglr`` and ``_lognl``
functions, in addition to the ``_loglikelihood`` requirement inherited from
``BaseModel``.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data.
recalibration : dict of pycbc.calibration.Recalibrate, optional
Dictionary of detectors -> recalibration class instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by `pycbc.gate.gates_from_cli`.
injection_file : str, optional
If an injection was added to the data, the name of the injection file
used. If provided, the injection parameters will be written to
file when ``write_metadata`` is called.
\**kwargs :
All other keyword arguments are passed to ``BaseModel``.
See ``BaseModel`` for additional attributes and properties.
"""
def __init__(self, variable_params, data, recalibration=None, gates=None,
injection_file=None, no_save_data=False, **kwargs):
self._data = None
self.data = data
self.recalibration = recalibration
self.no_save_data = no_save_data
self.gates = gates
self.injection_file = injection_file
super(BaseDataModel, self).__init__(variable_params, **kwargs)
@property
def data(self):
"""dict: Dictionary mapping detector names to data."""
return self._data
@data.setter
def data(self, data):
"""Store a copy of the data."""
self._data = {det: d.copy() for (det, d) in data.items()}
@property
def _extra_stats(self):
"""Adds ``loglr`` and ``lognl`` to the ``default_stats``."""
return ['loglr', 'lognl']
@property
def lognl(self):
"""The log likelihood of the model assuming the data is noise.
This will initially try to return the ``current_stats.lognl``.
If that raises an ``AttributeError``, will call `_lognl`` to
calculate it and store it to ``current_stats``.
"""
return self._trytoget('lognl', self._lognl)
@abstractmethod
def _lognl(self):
"""Low-level function that calculates the lognl."""
pass
@property
def loglr(self):
"""The log likelihood ratio at the current parameters.
This will initially try to return the ``current_stats.loglr``.
If that raises an ``AttributeError``, will call `_loglr`` to
calculate it and store it to ``current_stats``.
"""
return self._trytoget('loglr', self._loglr, apply_transforms=True)
@abstractmethod
def _loglr(self):
"""Low-level function that calculates the loglr."""
pass
@property
def logplr(self):
"""Returns the log of the prior-weighted likelihood ratio at the
current parameter values.
The logprior is calculated first. If the logprior returns ``-inf``
(possibly indicating a non-physical point), then ``loglr`` is not
called.
"""
logp = self.logprior
if logp == -numpy.inf:
return logp
else:
return logp + self.loglr
@property
def detectors(self):
"""list: Returns the detectors used."""
return list(self._data.keys())
def write_metadata(self, fp, group=None):
"""Adds data to the metadata that's written.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
super().write_metadata(fp, group=group)
if not self.no_save_data:
fp.write_stilde(self.data, group=group)
# save injection parameters
if self.injection_file is not None:
fp.write_injections(self.injection_file, group=group)
| 5,697
| 34.391304
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/gaussian_noise.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian.
"""
import logging
import shlex
from abc import ABCMeta
import numpy
from pycbc import filter as pyfilter
from pycbc.waveform import (NoWaveformError, FailedWaveformError)
from pycbc.waveform import generator
from pycbc.types import FrequencySeries
from pycbc.strain import gates_from_cli
from pycbc.strain.calibration import Recalibrate
from pycbc.inject import InjectionSet
from pycbc.io import FieldArray
from pycbc.types.optparse import MultiDetOptionAction
from .base import ModelStats
from .base_data import BaseDataModel
from .data_utils import (data_opts_from_config, data_from_cli,
fd_data_from_strain_dict, gate_overwhitened_data)
class BaseGaussianNoise(BaseDataModel, metaclass=ABCMeta):
r"""Model for analyzing GW data with assuming a wide-sense stationary
Gaussian noise model.
This model will load gravitational wave data and calculate the log noise
likelihood ``_lognl`` and normalization. It also implements the
``_loglikelihood`` function as the sum of the log likelihood ratio and the
``lognl``. It does not implement a log likelihood ratio function
``_loglr``, however, since that can differ depending on the signal model.
Models that analyze GW data assuming it is stationary Gaussian should
therefore inherit from this class and implement their own ``_loglr``
function.
For more details on the inner product used, the log likelihood of the
noise, and the normalization factor, see :py:class:`GaussianNoise`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
psds : dict, optional
A dictionary of FrequencySeries keyed by the detector names. The
dictionary must have a psd for each detector specified in the data
dictionary. If provided, the inner products in each detector will be
weighted by 1/psd of that detector.
high_frequency_cutoff : dict, optional
A dictionary of ending frequencies, in which the keys are the
detector names and the values are the ending frequencies for the
respective detectors to be used for computing inner products. If not
provided, the minimum of the largest frequency stored in the data
and a given waveform will be used.
normalize : bool, optional
If True, the normalization factor :math:`alpha` will be included in the
log likelihood. See :py:class:`GaussianNoise` for details. Default is
to not include it.
static_params : dict, optional
A dictionary of parameter names -> values to keep fixed.
ignore_failed_waveforms : bool, optional
If the waveform generator raises an error when it tries to generate,
treat the point as having zero likelihood. This allows the parameter
estimation to continue. Otherwise, an error will be raised, stopping
the run. Default is False.
\**kwargs :
All other keyword arguments are passed to ``BaseDataModel``.
Attributes
----------
ignore_failed_waveforms : bool
If True, points in parameter space that cause waveform generation to
fail (i.e., they raise a ``FailedWaveformError``) will be treated as
points with zero likelihood. Otherwise, such points will cause the
model to raise a ``FailedWaveformError``.
"""
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, ignore_failed_waveforms=False,
no_save_data=False,
**kwargs):
# set up the boiler-plate attributes
super(BaseGaussianNoise, self).__init__(variable_params, data,
static_params=static_params,
no_save_data=no_save_data,
**kwargs)
self.ignore_failed_waveforms = ignore_failed_waveforms
self.no_save_data = no_save_data
# check if low frequency cutoff has been provided for every IFO with
# data
for ifo in self.data:
if low_frequency_cutoff[ifo] is None:
raise ValueError(
"A low-frequency-cutoff must be provided for every "
"detector for which data has been provided. If "
"loading the model settings from "
"a config file, please provide "
"`{DETECTOR}:low-frequency-cutoff` options for "
"every detector in the `[model]` section, where "
"`{DETECTOR} is the name of the detector,"
"or provide a single low-frequency-cutoff option"
"which will be used for all detectors")
# check that the data sets all have the same delta fs and delta ts
dts = numpy.array([d.delta_t for d in self.data.values()])
dfs = numpy.array([d.delta_f for d in self.data.values()])
if all(dts == dts[0]) and all(dfs == dfs[0]):
self.all_ifodata_same_rate_length = True
else:
self.all_ifodata_same_rate_length = False
logging.info(
"You are using different data segment lengths or "
"sampling rates for different IFOs")
# store the number of samples in the time domain
self._N = {}
for (det, d) in self._data.items():
self._N[det] = int(1./(d.delta_f*d.delta_t))
# set lower/upper frequency cutoff
if high_frequency_cutoff is None:
high_frequency_cutoff = {ifo: None for ifo in self.data}
self._f_upper = high_frequency_cutoff
self._f_lower = low_frequency_cutoff
# Set the cutoff indices
self._kmin = {}
self._kmax = {}
for (det, d) in self._data.items():
kmin, kmax = pyfilter.get_cutoff_indices(self._f_lower[det],
self._f_upper[det],
d.delta_f, self._N[det])
self._kmin[det] = kmin
self._kmax[det] = kmax
# store the psd segments
self._psd_segments = {}
if psds is not None:
self.set_psd_segments(psds)
# store the psds and calculate the inner product weight
self._psds = {}
self._invpsds = {}
self._weight = {}
self._lognorm = {}
self._det_lognls = {}
self._whitened_data = {}
# set the normalization state
self._normalize = False
self.normalize = normalize
# store the psds and whiten the data
self.psds = psds
# attribute for storing the current waveforms
self._current_wfs = None
@property
def high_frequency_cutoff(self):
"""The high frequency cutoff of the inner product."""
return self._f_upper
@property
def low_frequency_cutoff(self):
"""The low frequency cutoff of the inner product."""
return self._f_lower
@property
def kmin(self):
"""Dictionary of starting indices for the inner product.
This is determined from the lower frequency cutoff and the ``delta_f``
of the data using
:py:func:`pycbc.filter.matchedfilter.get_cutoff_indices`.
"""
return self._kmin
@property
def kmax(self):
"""Dictionary of ending indices for the inner product.
This is determined from the high frequency cutoff and the ``delta_f``
of the data using
:py:func:`pycbc.filter.matchedfilter.get_cutoff_indices`. If no high
frequency cutoff was provided, this will be the indice corresponding to
the Nyquist frequency.
"""
return self._kmax
@property
def psds(self):
"""Dictionary of detectors -> PSD frequency series.
If no PSD was provided for a detector, this will just be a frequency
series of ones.
"""
return self._psds
@psds.setter
def psds(self, psds):
"""Sets the psds, and calculates the weight and norm from them.
The data and the low and high frequency cutoffs must be set first.
"""
# check that the data has been set
if self._data is None:
raise ValueError("No data set")
if self._f_lower is None:
raise ValueError("low frequency cutoff not set")
if self._f_upper is None:
raise ValueError("high frequency cutoff not set")
# make sure the relevant caches are cleared
self._psds.clear()
self._invpsds.clear()
self._weight.clear()
self._lognorm.clear()
self._det_lognls.clear()
self._whitened_data.clear()
for det, d in self._data.items():
if psds is None:
# No psd means assume white PSD
p = FrequencySeries(numpy.ones(int(self._N[det]/2+1)),
delta_f=d.delta_f)
else:
# copy for storage
p = psds[det].copy()
self._psds[det] = p
# we'll store the weight to apply to the inner product
# only set weight in band we will analyze
kmin = self._kmin[det]
kmax = self._kmax[det]
invp = FrequencySeries(numpy.zeros(len(p)), delta_f=p.delta_f)
invp[kmin:kmax] = 1./p[kmin:kmax]
self._invpsds[det] = invp
self._weight[det] = numpy.sqrt(4 * invp.delta_f * invp)
self._whitened_data[det] = d.copy()
self._whitened_data[det] *= self._weight[det]
# set the lognl and lognorm; we'll get this by just calling lognl
_ = self.lognl
@property
def psd_segments(self):
"""Dictionary giving times used for PSD estimation for each detector.
If a detector's PSD was not estimated from data, or the segment wasn't
provided, that detector will not be in the dictionary.
"""
return self._psd_segments
def set_psd_segments(self, psds):
"""Sets the PSD segments from a dictionary of PSDs.
This attempts to get the PSD segment from a ``psd_segment`` attribute
of each detector's PSD frequency series. If that attribute isn't set,
then that detector is not added to the dictionary of PSD segments.
Parameters
----------
psds : dict
Dictionary of detector name -> PSD frequency series. The segment
used for each PSD will try to be retrieved from the PSD's
``.psd_segment`` attribute.
"""
for det, p in psds.items():
try:
self._psd_segments[det] = p.psd_segment
except AttributeError:
continue
@property
def weight(self):
r"""Dictionary of detectors -> frequency series of inner-product
weights.
The weights are :math:`\sqrt{4 \Delta f / S_n(f)}`. This is set when
the PSDs are set.
"""
return self._weight
@property
def whitened_data(self):
r"""Dictionary of detectors -> whitened data frequency series.
The whitened data is the data multiplied by the inner-product weight.
Note that this includes the :math:`\sqrt{4 \Delta f}` factor. This
is set when the PSDs are set.
"""
return self._whitened_data
def det_lognorm(self, det):
"""The log of the likelihood normalization in the given detector.
If ``self.normalize`` is False, will just return 0.
"""
if not self.normalize:
return 0.
try:
return self._lognorm[det]
except KeyError:
# hasn't been calculated yet
p = self._psds[det]
dt = self._whitened_data[det].delta_t
kmin = self._kmin[det]
kmax = self._kmax[det]
lognorm = -float(self._N[det]*numpy.log(numpy.pi*self._N[det]*dt)/2.
+ numpy.log(p[kmin:kmax]).sum())
self._lognorm[det] = lognorm
return self._lognorm[det]
@property
def normalize(self):
"""Determines if the loglikelihood includes the normalization term.
"""
return self._normalize
@normalize.setter
def normalize(self, normalize):
"""Clears the current stats if the normalization state is changed.
"""
if normalize != self._normalize:
self._current_stats = ModelStats()
self._lognorm.clear()
self._det_lognls.clear()
self._normalize = normalize
@property
def lognorm(self):
"""The log of the normalization of the log likelihood."""
return sum(self.det_lognorm(det) for det in self._data)
def det_lognl(self, det):
r"""Returns the log likelihood of the noise in the given detector:
.. math::
\log p(d_i|n_i) = \log \alpha_i -
\frac{1}{2} \left<d_i | d_i\right>.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The log likelihood of the noise in the requested detector.
"""
try:
return self._det_lognls[det]
except KeyError:
# hasn't been calculated yet; calculate & store
kmin = self._kmin[det]
kmax = self._kmax[det]
d = self._whitened_data[det]
lognorm = self.det_lognorm(det)
lognl = lognorm - 0.5 * d[kmin:kmax].inner(d[kmin:kmax]).real
self._det_lognls[det] = lognl
return self._det_lognls[det]
def _lognl(self):
"""Computes the log likelihood assuming the data is noise.
Since this is a constant for Gaussian noise, this is only computed once
then stored.
"""
return sum(self.det_lognl(det) for det in self._data)
def update(self, **params):
# update
super().update(**params)
# reset current waveforms
self._current_wfs = None
def _loglikelihood(self):
r"""Computes the log likelihood of the paramaters,
.. math::
\log p(d|\Theta, h) = \log \alpha -\frac{1}{2}\sum_i
\left<d_i - h_i(\Theta) | d_i - h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood evaluated at the given point.
"""
# since the loglr has fewer terms, we'll call that, then just add
# back the noise term that canceled in the log likelihood ratio
return self.loglr + self.lognl
def write_metadata(self, fp, group=None):
"""Adds writing the psds, analyzed detectors, and lognl.
The analyzed detectors, their analysis segments, and the segments
used for psd estimation are written as
``analyzed_detectors``, ``{{detector}}_analysis_segment``, and
``{{detector}}_psd_segment``, respectively. These are either written
to the specified ``group``'s attrs, or to the top level attrs if
``group`` is None.
The total and each detector's lognl is written to the sample group's
``attrs``. If a group is specified, the group name will be prependend
to the lognl labels with ``{group}__``, with any ``/`` in the group
path replaced with ``__``. For example, if group is ``/a/b``, the
``lognl`` will be written as ``a__b__lognl`` in the sample's group
attrs.
Parameters
----------
fp : pycbc.inference.io.BaseInferenceFile instance
The inference file to write to.
group : str, optional
If provided, the metadata will be written to the attrs specified
by group, i.e., to ``fp[group].attrs``. Otherwise, metadata is
written to the top-level attrs (``fp.attrs``).
"""
super().write_metadata(fp, group=group)
attrs = fp.getattrs(group=group)
# write the analyzed detectors and times
attrs['analyzed_detectors'] = self.detectors
for det, data in self.data.items():
key = '{}_analysis_segment'.format(det)
attrs[key] = [float(data.start_time), float(data.end_time)]
if self._psds is not None and not self.no_save_data:
fp.write_psd(self._psds, group=group)
# write the times used for psd estimation (if they were provided)
for det in self.psd_segments:
key = '{}_psd_segment'.format(det)
attrs[key] = list(map(float, self.psd_segments[det]))
# save the frequency cutoffs
for det in self.detectors:
attrs['{}_likelihood_low_freq'.format(det)] = self._f_lower[det]
if self._f_upper[det] is not None:
attrs['{}_likelihood_high_freq'.format(det)] = \
self._f_upper[det]
# write the lognl to the samples group attrs
sampattrs = fp.getattrs(group=fp.samples_group)
# if a group is specified, prepend the lognl names with it
if group is None or group == '/':
prefix = ''
else:
prefix = group.replace('/', '__')
if not prefix.endswith('__'):
prefix += '__'
sampattrs['{}lognl'.format(prefix)] = self.lognl
# also save the lognl in each detector
for det in self.detectors:
sampattrs['{}{}_lognl'.format(prefix, det)] = self.det_lognl(det)
@staticmethod
def _fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict):
"""Wrapper around :py:func:`data_utils.fd_data_from_strain_dict`."""
return fd_data_from_strain_dict(opts, strain_dict, psd_strain_dict)
@classmethod
def from_config(cls, cp, data_section='data', data=None, psds=None,
**kwargs):
r"""Initializes an instance of this class from the given config file.
In addition to ``[model]``, a ``data_section`` (default ``[data]``)
must be in the configuration file. The data section specifies settings
for loading data and estimating PSDs. See the `online documentation
<http://pycbc.org/pycbc/latest/html/inference.html#setting-data>`_ for
more details.
The following options are read from the ``[model]`` section, in
addition to ``name`` (which must be set):
* ``{{DET}}-low-frequency-cutoff = FLOAT`` :
The low frequency cutoff to use for each detector {{DET}}. A cutoff
must be provided for every detector that may be analyzed (any
additional detectors are ignored).
* ``{{DET}}-high-frequency-cutoff = FLOAT`` :
(Optional) A high frequency cutoff for each detector. If not
provided, the Nyquist frequency is used.
* ``check-for-valid-times =`` :
(Optional) If provided, will check that there are no data quality
flags on during the analysis segment and the segment used for PSD
estimation in each detector. To check for flags,
:py:func:`pycbc.dq.query_flag` is used, with settings pulled from the
``dq-*`` options in the ``[data]`` section. If a detector has bad
data quality during either the analysis segment or PSD segment, it
will be removed from the analysis.
* ``shift-psd-times-to-valid =`` :
(Optional) If provided, the segment used for PSD estimation will
automatically be shifted left or right until a continous block of
data with no data quality issues can be found. If no block can be
found with a maximum shift of +/- the requested psd segment length,
the detector will not be analyzed.
* ``err-on-missing-detectors =`` :
Raises an error if any detector is removed from the analysis because
a valid time could not be found. Otherwise, a warning is printed
to screen and the detector is removed from the analysis.
* ``normalize =`` :
(Optional) Turn on the normalization factor.
* ``ignore-failed-waveforms =`` :
Sets the ``ignore_failed_waveforms`` attribute.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
data_section : str, optional
The name of the section to load data options from.
\**kwargs :
All additional keyword arguments are passed to the class. Any
provided keyword will override what is in the config file.
"""
# get the injection file, to replace any FROM_INJECTION settings
if 'injection-file' in cp.options('data'):
injection_file = cp.get('data', 'injection-file')
else:
injection_file = None
# update any values that are to be retrieved from the injection
# Note: this does nothing if there are FROM_INJECTION values
get_values_from_injection(cp, injection_file, update_cp=True)
args = cls._init_args_from_config(cp)
# add the injection file
args['injection_file'] = injection_file
# check if normalize is set
if cp.has_option('model', 'normalize'):
args['normalize'] = True
if cp.has_option('model', 'ignore-failed-waveforms'):
args['ignore_failed_waveforms'] = True
if cp.has_option('model', 'no-save-data'):
args['no_save_data'] = True
# get any other keyword arguments provided in the model section
ignore_args = ['name', 'normalize',
'ignore-failed-waveforms', 'no-save-data']
for option in cp.options("model"):
if option in ("low-frequency-cutoff", "high-frequency-cutoff"):
ignore_args.append(option)
name = option.replace('-', '_')
args[name] = cp.get_cli_option('model', name,
nargs='+', type=float,
action=MultiDetOptionAction)
if 'low_frequency_cutoff' not in args:
raise ValueError("low-frequency-cutoff must be provided in the"
" model section, but is not found!")
# data args
bool_args = ['check-for-valid-times', 'shift-psd-times-to-valid',
'err-on-missing-detectors']
data_args = {arg.replace('-', '_'): True for arg in bool_args
if cp.has_option('model', arg)}
ignore_args += bool_args
# load the data
opts = data_opts_from_config(cp, data_section,
args['low_frequency_cutoff'])
if data is None or psds is None:
strain_dict, psd_strain_dict = data_from_cli(opts, **data_args)
# convert to frequency domain and get psds
stilde_dict, psds = cls._fd_data_from_strain_dict(
opts, strain_dict, psd_strain_dict)
# save the psd data segments if the psd was estimated from data
if opts.psd_estimation:
_tdict = psd_strain_dict or strain_dict
for det in psds:
psds[det].psd_segment = (_tdict[det].start_time,
_tdict[det].end_time)
# gate overwhitened if desired
if opts.gate_overwhitened and opts.gate is not None:
stilde_dict = gate_overwhitened_data(
stilde_dict, psds, opts.gate)
data = stilde_dict
args.update({'data': data, 'psds': psds})
# any extra args
args.update(cls.extra_args_from_config(cp, "model",
skip_args=ignore_args))
# get ifo-specific instances of calibration model
if cp.has_section('calibration'):
logging.info("Initializing calibration model")
recalib = {
ifo: Recalibrate.from_config(cp, ifo, section='calibration')
for ifo in opts.instruments}
args['recalibration'] = recalib
# get gates for templates
gates = gates_from_cli(opts)
if gates:
args['gates'] = gates
args.update(kwargs)
return cls(**args)
class GaussianNoise(BaseGaussianNoise):
r"""Model that assumes data is stationary Gaussian noise.
With Gaussian noise the log likelihood functions for signal
:math:`\log p(d|\Theta, h)` and for noise :math:`\log p(d|n)` are given by:
.. math::
\log p(d|\Theta, h) &= \log\alpha -\frac{1}{2} \sum_i
\left< d_i - h_i(\Theta) | d_i - h_i(\Theta) \right> \\
\log p(d|n) &= \log\alpha -\frac{1}{2} \sum_i \left<d_i | d_i\right>
where the sum is over the number of detectors, :math:`d_i` is the data in
each detector, and :math:`h_i(\Theta)` is the model signal in each
detector. The (discrete) inner product is given by:
.. math::
\left<a_i | b_i\right> = 4\Re \Delta f
\sum_{k=k_{\mathrm{min}}}^{k_{\mathrm{max}}}
\frac{\tilde{a}_i^{*}[k] \tilde{b}_i[k]}{S^{(i)}_n[k]},
where :math:`\Delta f` is the frequency resolution (given by 1 / the
observation time :math:`T`), :math:`k` is an index over the discretely
sampled frequencies :math:`f = k \Delta_f`, and :math:`S^{(i)}_n[k]` is the
PSD in the given detector. The upper cutoff on the inner product
:math:`k_{\max}` is by default the Nyquist frequency
:math:`k_{\max} = N/2+1`, where :math:`N = \lfloor T/\Delta t \rfloor`
is the number of samples in the time domain, but this can be set manually
to a smaller value.
The normalization factor :math:`\alpha` is:
.. math::
\alpha = \prod_{i} \frac{1}{\left(\pi T\right)^{N/2}
\prod_{k=k_\mathrm{min}}^{k_{\mathrm{max}}} S^{(i)}_n[k]},
where the product is over the number of detectors. By default, the
normalization constant is not included in the log likelihood, but it can
be turned on using the ``normalize`` keyword argument.
Note that the log likelihood ratio has fewer terms than the log likelihood,
since the normalization and :math:`\left<d_i|d_i\right>` terms cancel:
.. math::
\log \mathcal{L}(\Theta) = \sum_i \left[
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2} \left<h_i(\Theta)|h_i(\Theta)\right> \right]
Upon initialization, the data is whitened using the given PSDs. If no PSDs
are given the data and waveforms returned by the waveform generator are
assumed to be whitened.
For more details on initialization parameters and definition of terms, see
:py:class:`models.BaseDataModel`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). The list of keys must
match the waveform generator's detectors keys, and the epoch of every
data set must be the same as the waveform generator's epoch.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
psds : dict, optional
A dictionary of FrequencySeries keyed by the detector names. The
dictionary must have a psd for each detector specified in the data
dictionary. If provided, the inner products in each detector will be
weighted by 1/psd of that detector.
high_frequency_cutoff : dict, optional
A dictionary of ending frequencies, in which the keys are the
detector names and the values are the ending frequencies for the
respective detectors to be used for computing inner products. If not
provided, the minimum of the largest frequency stored in the data
and a given waveform will be used.
normalize : bool, optional
If True, the normalization factor :math:`alpha` will be included in the
log likelihood. Default is to not include it.
static_params : dict, optional
A dictionary of parameter names -> values to keep fixed.
\**kwargs :
All other keyword arguments are passed to ``BaseDataModel``.
Examples
--------
Create a signal, and set up the model using that signal:
>>> from pycbc import psd as pypsd
>>> from pycbc.inference.models import GaussianNoise
>>> from pycbc.waveform.generator import (FDomainDetFrameGenerator,
... FDomainCBCGenerator)
>>> seglen = 4
>>> sample_rate = 2048
>>> N = seglen*sample_rate/2+1
>>> fmin = 30.
>>> static_params = {'approximant': 'IMRPhenomD', 'f_lower': fmin,
... 'mass1': 38.6, 'mass2': 29.3,
... 'spin1z': 0., 'spin2z': 0., 'ra': 1.37, 'dec': -1.26,
... 'polarization': 2.76, 'distance': 3*500.}
>>> variable_params = ['tc']
>>> tsig = 3.1
>>> generator = FDomainDetFrameGenerator(
... FDomainCBCGenerator, 0., detectors=['H1', 'L1'],
... variable_args=variable_params,
... delta_f=1./seglen, **static_params)
>>> signal = generator.generate(tc=tsig)
>>> psd = pypsd.aLIGOZeroDetHighPower(N, 1./seglen, 20.)
>>> psds = {'H1': psd, 'L1': psd}
>>> low_frequency_cutoff = {'H1': fmin, 'L1': fmin}
>>> model = GaussianNoise(variable_params, signal, low_frequency_cutoff,
psds=psds, static_params=static_params)
Set the current position to the coalescence time of the signal:
>>> model.update(tc=tsig)
Now compute the log likelihood ratio and prior-weighted likelihood ratio;
since we have not provided a prior, these should be equal to each other:
>>> print('{:.2f}'.format(model.loglr))
282.43
>>> print('{:.2f}'.format(model.logplr))
282.43
Print all of the default_stats:
>>> print(',\n'.join(['{}: {:.2f}'.format(s, v)
... for (s, v) in sorted(model.current_stats.items())]))
H1_cplx_loglr: 177.76+0.00j,
H1_optimal_snrsq: 355.52,
L1_cplx_loglr: 104.67+0.00j,
L1_optimal_snrsq: 209.35,
logjacobian: 0.00,
loglikelihood: 0.00,
loglr: 282.43,
logprior: 0.00
Compute the SNR; for this system and PSD, this should be approximately 24:
>>> from pycbc.conversions import snr_from_loglr
>>> x = snr_from_loglr(model.loglr)
>>> print('{:.2f}'.format(x))
23.77
Since there is no noise, the SNR should be the same as the quadrature sum
of the optimal SNRs in each detector:
>>> x = (model.det_optimal_snrsq('H1') +
... model.det_optimal_snrsq('L1'))**0.5
>>> print('{:.2f}'.format(x))
23.77
Toggle on the normalization constant:
>>> model.normalize = True
>>> model.loglikelihood
835397.8757405131
Using the same model, evaluate the log likelihood ratio at several points
in time and check that the max is at tsig:
>>> import numpy
>>> times = numpy.linspace(tsig-1, tsig+1, num=101)
>>> loglrs = numpy.zeros(len(times))
>>> for (ii, t) in enumerate(times):
... model.update(tc=t)
... loglrs[ii] = model.loglr
>>> print('tsig: {:.2f}, time of max loglr: {:.2f}'.format(
... tsig, times[loglrs.argmax()]))
tsig: 3.10, time of max loglr: 3.10
Create a prior and use it (see distributions module for more details):
>>> from pycbc import distributions
>>> uniform_prior = distributions.Uniform(tc=(tsig-0.2,tsig+0.2))
>>> prior = distributions.JointDistribution(variable_params, uniform_prior)
>>> model = GaussianNoise(variable_params,
... signal, low_frequency_cutoff, psds=psds, prior=prior,
... static_params=static_params)
>>> model.update(tc=tsig)
>>> print('{:.2f}'.format(model.logplr))
283.35
>>> print(',\n'.join(['{}: {:.2f}'.format(s, v)
... for (s, v) in sorted(model.current_stats.items())]))
H1_cplx_loglr: 177.76+0.00j,
H1_optimal_snrsq: 355.52,
L1_cplx_loglr: 104.67+0.00j,
L1_optimal_snrsq: 209.35,
logjacobian: 0.00,
loglikelihood: 0.00,
loglr: 282.43,
logprior: 0.92
"""
name = 'gaussian_noise'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super(GaussianNoise, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
# Determine if all data have the same sampling rate and segment length
if self.all_ifodata_same_rate_length:
# create a waveform generator for all ifos
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
gates=self.gates, **self.static_params)
else:
# create a waveform generator for each ifo respestively
self.waveform_generator = {}
for det in self.data:
self.waveform_generator[det] = create_waveform_generator(
self.variable_params, {det: self.data[det]},
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
gates=self.gates, **self.static_params)
@property
def _extra_stats(self):
"""Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each
detector."""
return ['loglr'] + \
['{}_cplx_loglr'.format(det) for det in self._data] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
for det in self._data:
setattr(self._current_stats, 'loglikelihood', -numpy.inf)
setattr(self._current_stats, '{}_cplx_loglr'.format(det),
-numpy.inf)
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf
@property
def multi_signal_support(self):
""" The list of classes that this model supports in a multi-signal
likelihood
"""
return [type(self)]
def multi_loglikelihood(self, models):
""" Calculate a multi-model (signal) likelihood
"""
# Generate the waveforms for each submodel
wfs = []
for m in models + [self]:
wfs.append(m.get_waveforms())
# combine into a single waveform
combine = {}
for det in self.data:
mlen = max([len(x[det]) for x in wfs])
[x[det].resize(mlen) for x in wfs]
combine[det] = sum([x[det] for x in wfs])
self._current_wfs = combine
loglr = self._loglr()
self._current_wfs = None
return loglr + self.lognl
def get_waveforms(self):
"""The waveforms generated using the current parameters.
If the waveforms haven't been generated yet, they will be generated.
Returns
-------
dict :
Dictionary of detector names -> FrequencySeries.
"""
if self._current_wfs is None:
params = self.current_params
if self.all_ifodata_same_rate_length:
wfs = self.waveform_generator.generate(**params)
else:
wfs = {}
for det in self.data:
wfs.update(self.waveform_generator[det].generate(**params))
self._current_wfs = wfs
return self._current_wfs
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
try:
wfs = self.get_waveforms()
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
lr = 0.
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax[det])
if self._kmin[det] >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
cplx_hd = 0j
hh = 0.
else:
slc = slice(self._kmin[det], kmax)
# whiten the waveform
h[self._kmin[det]:kmax] *= self._weight[det][slc]
# the inner products
cplx_hd = h[slc].inner(self._whitened_data[det][slc]) # <h, d>
hh = h[slc].inner(h[slc]).real # < h, h>
cplx_loglr = cplx_hd - 0.5 * hh
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh)
setattr(self._current_stats, '{}_cplx_loglr'.format(det),
cplx_loglr)
lr += cplx_loglr.real
# also store the loglikelihood, to ensure it is populated in the
# current stats even if loglikelihood is never called
self._current_stats.loglikelihood = lr + self.lognl
return float(lr)
def det_cplx_loglr(self, det):
"""Returns the complex log likelihood ratio in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
complex float :
The complex log likelihood ratio.
"""
# try to get it from current stats
try:
return getattr(self._current_stats, '{}_cplx_loglr'.format(det))
except AttributeError:
# hasn't been calculated yet; call loglr to do so
self._loglr()
# now try returning again
return getattr(self._current_stats, '{}_cplx_loglr'.format(det))
def det_optimal_snrsq(self, det):
"""Returns the opitmal SNR squared in the given detector.
Parameters
----------
det : str
The name of the detector.
Returns
-------
float :
The opimtal SNR squared.
"""
# try to get it from current stats
try:
return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
except AttributeError:
# hasn't been calculated yet; call loglr to do so
self._loglr()
# now try returning again
return getattr(self._current_stats, '{}_optimal_snrsq'.format(det))
#
# =============================================================================
#
# Support functions
#
# =============================================================================
#
def get_values_from_injection(cp, injection_file, update_cp=True):
"""Replaces all FROM_INJECTION values in a config file with the
corresponding value from the injection.
This looks for any options that start with ``FROM_INJECTION[:ARG]`` in
a config file. It then replaces that value with the corresponding value
from the injection file. An argument may be optionally provided, in which
case the argument will be retrieved from the injection file. Functions of
parameters in the injection file may be used; the syntax and functions
available is the same as the ``--parameters`` argument in executables
such as ``pycbc_inference_extract_samples``. If no ``ARG`` is provided,
then the option name will try to be retrieved from the injection.
For example,
.. code-block:: ini
mass1 = FROM_INJECTION
will cause ``mass1`` to be retrieved from the injection file, while:
.. code-block:: ini
mass1 = FROM_INJECTION:'primary_mass(mass1, mass2)'
will cause the larger of mass1 and mass2 to be retrieved from the injection
file. Note that if spaces are in the argument, it must be encased in
single quotes.
The injection file may contain only one injection. Otherwise, a ValueError
will be raised.
Parameters
----------
cp : ConfigParser
The config file within which to replace values.
injection_file : str or None
The injection file to get values from. A ValueError will be raised
if there are any ``FROM_INJECTION`` values in the config file, and
injection file is None, or if there is more than one injection.
update_cp : bool, optional
Update the config parser with the replaced parameters. If False,
will just retrieve the parameter values to update, without updating
the config file. Default is True.
Returns
-------
list
The parameters that were replaced, as a tuple of section name, option,
value.
"""
lookfor = 'FROM_INJECTION'
# figure out what parameters need to be set
replace_params = []
for sec in cp.sections():
for opt in cp.options(sec):
val = cp.get(sec, opt)
splitvals = shlex.split(val)
replace_this = []
for ii, subval in enumerate(splitvals):
if subval.startswith(lookfor):
# determine what we should retrieve from the injection
subval = subval.split(':', 1)
if len(subval) == 1:
subval = opt
else:
subval = subval[1]
replace_this.append((ii, subval))
if replace_this:
replace_params.append((sec, opt, splitvals, replace_this))
if replace_params:
# check that we have an injection file
if injection_file is None:
raise ValueError("One or values are set to {}, but no injection "
"file provided".format(lookfor))
# load the injection file
inj = InjectionSet(injection_file).table.view(type=FieldArray)
# make sure there's only one injection provided
if inj.size > 1:
raise ValueError("One or more values are set to {}, but more than "
"one injection exists in the injection file."
.format(lookfor))
# get the injection values to replace
for ii, (sec, opt, splitvals, replace_this) in enumerate(replace_params):
# replace the value in the shlex-splitted string with the value
# from the injection
for jj, arg in replace_this:
splitvals[jj] = str(inj[arg][0])
# now rejoin the string...
# shlex will strip quotes around arguments; this can be problematic
# when rejoining if the the argument had a space in it. In python 3.8
# there is a shlex.join function which properly rejoins things taking
# that into account. Since we need to continue to support earlier
# versions of python, the following kludge tries to account for that.
# If/when we drop support for all earlier versions of python, then the
# following can just be replaced by:
# replace_val = shlex.join(splitvals)
for jj, arg in enumerate(splitvals):
if ' ' in arg:
arg = "'" + arg + "'"
splitvals[jj] = arg
replace_val = ' '.join(splitvals)
replace_params[ii] = (sec, opt, replace_val)
# replace in the config file
if update_cp:
for (sec, opt, replace_val) in replace_params:
cp.set(sec, opt, replace_val)
return replace_params
def create_waveform_generator(
variable_params, data, waveform_transforms=None,
recalibration=None, gates=None,
generator_class=generator.FDomainDetFrameGenerator,
**static_params):
r"""Creates a waveform generator for use with a model.
Parameters
----------
variable_params : list of str
The names of the parameters varied.
data : dict
Dictionary mapping detector names to either a
:py:class:`<pycbc.types.TimeSeries TimeSeries>` or
:py:class:`<pycbc.types.FrequencySeries FrequencySeries>`.
waveform_transforms : list, optional
The list of transforms applied to convert variable parameters into
parameters that will be understood by the waveform generator.
recalibration : dict, optional
Dictionary mapping detector names to
:py:class:`<pycbc.calibration.Recalibrate>` instances for
recalibrating data.
gates : dict of tuples, optional
Dictionary of detectors -> tuples of specifying gate times. The
sort of thing returned by :py:func:`pycbc.gate.gates_from_cli`.
generator_class : detector-frame fdomain generator, optional
Class to use for generating waveforms. Default is
:py:class:`waveform.generator.FDomainDetFrameGenerator`.
\**static_params :
All other keyword arguments are passed as static parameters to the
waveform generator.
Returns
-------
pycbc.waveform.FDomainDetFrameGenerator
A waveform generator for frequency domain generation.
"""
# the waveform generator will get the variable_params + the output
# of the waveform transforms, so we'll add them to the list of
# parameters
if waveform_transforms is not None:
wfoutputs = set.union(*[t.outputs
for t in waveform_transforms])
else:
wfoutputs = set()
variable_params = list(variable_params) + list(wfoutputs)
# figure out what generator to use based on the approximant
try:
approximant = static_params['approximant']
except KeyError:
raise ValueError("no approximant provided in the static args")
generator_function = generator_class.select_rframe_generator(approximant)
# get data parameters; we'll just use one of the data to get the
# values, then check that all the others are the same
delta_f = None
for d in data.values():
if delta_f is None:
delta_f = d.delta_f
delta_t = d.delta_t
start_time = d.start_time
else:
if not all([d.delta_f == delta_f, d.delta_t == delta_t,
d.start_time == start_time]):
raise ValueError("data must all have the same delta_t, "
"delta_f, and start_time")
waveform_generator = generator_class(
generator_function, epoch=start_time,
variable_args=variable_params, detectors=list(data.keys()),
delta_f=delta_f, delta_t=delta_t,
recalib=recalibration, gates=gates,
**static_params)
return waveform_generator
| 49,252
| 39.941812
| 80
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/__init__.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This package provides classes and functions for evaluating Bayesian statistics
assuming various noise models.
"""
import logging
from pkg_resources import iter_entry_points as _iter_entry_points
from .base import BaseModel
from .base_data import BaseDataModel
from .analytic import (TestEggbox, TestNormal, TestRosenbrock, TestVolcano,
TestPrior, TestPosterior)
from .gaussian_noise import GaussianNoise
from .marginalized_gaussian_noise import MarginalizedPhaseGaussianNoise
from .marginalized_gaussian_noise import MarginalizedPolarization
from .marginalized_gaussian_noise import MarginalizedHMPolPhase
from .marginalized_gaussian_noise import MarginalizedTime
from .brute_marg import BruteParallelGaussianMarginalize
from .brute_marg import BruteLISASkyModesMarginalize
from .gated_gaussian_noise import (GatedGaussianNoise, GatedGaussianMargPol)
from .single_template import SingleTemplate
from .relbin import Relative, RelativeTime, RelativeTimeDom
from .hierarchical import HierarchicalModel, MultiSignalModel
# Used to manage a model instance across multiple cores or MPI
_global_instance = None
def _call_global_model(*args, **kwds):
"""Private function for global model (needed for parallelization)."""
return _global_instance(*args, **kwds) # pylint:disable=not-callable
def _call_global_model_logprior(*args, **kwds):
"""Private function for a calling global's logprior.
This is needed for samplers that use a separate function for the logprior,
like ``emcee_pt``.
"""
# pylint:disable=not-callable
return _global_instance(*args, callstat='logprior', **kwds)
class CallModel(object):
"""Wrapper class for calling models from a sampler.
This class can be called like a function, with the parameter values to
evaluate provided as a list in the same order as the model's
``variable_params``. In that case, the model is updated with the provided
parameters and then the ``callstat`` retrieved. If ``return_all_stats`` is
set to ``True``, then all of the stats specified by the model's
``default_stats`` will be returned as a tuple, in addition to the stat
value.
The model's attributes are promoted to this class's namespace, so that any
attribute and method of ``model`` may be called directly from this class.
This class must be initalized prior to the creation of a ``Pool`` object.
Parameters
----------
model : Model instance
The model to call.
callstat : str
The statistic to call.
return_all_stats : bool, optional
Whether or not to return all of the other statistics along with the
``callstat`` value.
Examples
--------
Create a wrapper around an instance of the ``TestNormal`` model, with the
``callstat`` set to ``logposterior``:
>>> from pycbc.inference.models import TestNormal, CallModel
>>> model = TestNormal(['x', 'y'])
>>> call_model = CallModel(model, 'logposterior')
Now call on a set of parameter values:
>>> call_model([0.1, -0.2])
(-1.8628770664093453, (0.0, 0.0, -1.8628770664093453))
Note that a tuple of all of the model's ``default_stats`` were returned in
addition to the ``logposterior`` value. We can shut this off by toggling
``return_all_stats``:
>>> call_model.return_all_stats = False
>>> call_model([0.1, -0.2])
-1.8628770664093453
Attributes of the model can be called from the call model. For example:
>>> call_model.variable_params
('x', 'y')
"""
def __init__(self, model, callstat, return_all_stats=True):
self.model = model
self.callstat = callstat
self.return_all_stats = return_all_stats
def __getattr__(self, attr):
"""Adds the models attributes to self."""
return getattr(self.model, attr)
def __call__(self, param_values, callstat=None, return_all_stats=None):
"""Updates the model with the given parameter values, then calls the
call function.
Parameters
----------
param_values : list of float
The parameter values to test. Assumed to be in the same order as
``model.sampling_params``.
callstat : str, optional
Specify which statistic to call. Default is to call whatever self's
``callstat`` is set to.
return_all_stats : bool, optional
Whether or not to return all stats in addition to the ``callstat``
value. Default is to use self's ``return_all_stats``.
Returns
-------
stat : float
The statistic returned by the ``callfunction``.
all_stats : tuple, optional
The values of all of the model's ``default_stats`` at the given
param values. Any stat that has not be calculated is set to
``numpy.nan``. This is only returned if ``return_all_stats`` is
set to ``True``.
"""
if callstat is None:
callstat = self.callstat
if return_all_stats is None:
return_all_stats = self.return_all_stats
params = dict(zip(self.model.sampling_params, param_values))
self.model.update(**params)
val = getattr(self.model, callstat)
if return_all_stats:
return val, self.model.get_current_stats()
else:
return val
def read_from_config(cp, **kwargs):
"""Initializes a model from the given config file.
The section must have a ``name`` argument. The name argument corresponds to
the name of the class to initialize.
Parameters
----------
cp : WorkflowConfigParser
Config file parser to read.
\**kwargs :
All other keyword arguments are passed to the ``from_config`` method
of the class specified by the name argument.
Returns
-------
cls
The initialized model.
"""
# use the name to get the distribution
name = cp.get("model", "name")
return get_model(name).from_config(cp, **kwargs)
_models = {_cls.name: _cls for _cls in (
TestEggbox,
TestNormal,
TestRosenbrock,
TestVolcano,
TestPosterior,
TestPrior,
GaussianNoise,
MarginalizedPhaseGaussianNoise,
MarginalizedPolarization,
MarginalizedHMPolPhase,
MarginalizedTime,
BruteParallelGaussianMarginalize,
BruteLISASkyModesMarginalize,
GatedGaussianNoise,
GatedGaussianMargPol,
SingleTemplate,
Relative,
RelativeTime,
HierarchicalModel,
MultiSignalModel,
RelativeTimeDom,
)}
class _ModelManager(dict):
"""Sub-classes dictionary to manage the collection of available models.
The first time this is called, any plugin models that are available will be
added to the dictionary before returning.
"""
def __init__(self, *args, **kwargs):
self.retrieve_plugins = True
super().__init__(*args, **kwargs)
def add_model(self, model):
"""Adds a model to the dictionary.
If the given model has the same name as a model already in the
dictionary, the original model will be overridden. A warning will be
printed in that case.
"""
if super().__contains__(model.name):
logging.warning("Custom model %s will override a model of the "
"same name. If you don't want this, change the "
"model's name attribute and restart.", model.name)
self[model.name] = model
def add_plugins(self):
"""Adds any plugin models that are available.
This will only add the plugins if ``self.retrieve_plugins = True``.
After this runs, ``self.retrieve_plugins`` is set to ``False``, so that
subsequent calls to this will no re-add models.
"""
if self.retrieve_plugins:
for plugin in _iter_entry_points('pycbc.inference.models'):
self.add_model(plugin.resolve())
self.retrieve_plugins = False
def __len__(self):
self.add_plugins()
super().__len__()
def __contains__(self, key):
self.add_plugins()
return super().__contains__(key)
def get(self, *args):
self.add_plugins()
return super().get(*args)
def popitem(self):
self.add_plugins()
return super().popitem()
def pop(self, *args):
try:
return super().pop(*args)
except KeyError:
self.add_plugins()
return super().pop(*args)
def keys(self):
self.add_plugins()
return super().keys()
def values(self):
self.add_plugins()
return super().values()
def items(self):
self.add_plugins()
return super().items()
def __iter__(self):
self.add_plugins()
return super().__iter__()
def __repr__(self):
self.add_plugins()
return super().__repr__()
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError:
self.add_plugins()
return super().__getitem__(item)
def __delitem__(self, *args, **kwargs):
try:
super().__delitem__(*args, **kwargs)
except KeyError:
self.add_plugins()
super().__delitem__(*args, **kwargs)
models = _ModelManager(_models)
def get_models():
"""Returns the dictionary of current models.
Ensures that plugins are added to the dictionary first.
"""
models.add_plugins()
return models
def get_model(model_name):
"""Retrieve the given model.
Parameters
----------
model_name : str
The name of the model to get.
Returns
-------
model :
The requested model.
"""
return get_models()[model_name]
def available_models():
"""List the currently available models."""
return list(get_models().keys())
def register_model(model):
"""Makes a custom model available to PyCBC.
The provided model will be added to the dictionary of models that PyCBC
knows about, using the model's ``name`` attribute. If the ``name`` is the
same as a model that already exists in PyCBC, a warning will be printed.
Parameters
----------
model : pycbc.inference.models.base.BaseModel
The model to use. The model should be a sub-class of
:py:class:`BaseModel <pycbc.inference.models.base.BaseModel>` to ensure
it has the correct API for use within ``pycbc_inference``.
"""
get_models().add_model(model)
| 11,328
| 31.002825
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/analytic.py
|
# Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules provides models that have analytic solutions for the
log likelihood.
"""
import logging
import numpy
import numpy.random
from scipy import stats
from .base import BaseModel
class TestNormal(BaseModel):
r"""The test distribution is an multi-variate normal distribution.
The number of dimensions is set by the number of ``variable_params`` that
are passed. For details on the distribution used, see
``scipy.stats.multivariate_normal``.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
mean : array-like, optional
The mean values of the parameters. If None provide, will use 0 for all
parameters.
cov : array-like, optional
The covariance matrix of the parameters. If None provided, will use
unit variance for all parameters, with cross-terms set to 0.
**kwargs :
All other keyword arguments are passed to ``BaseModel``.
Examples
--------
Create a 2D model with zero mean and unit variance:
>>> m = TestNormal(['x', 'y'])
Set the current parameters and evaluate the log posterior:
>>> m.update(x=-0.2, y=0.1)
>>> m.logposterior
-1.8628770664093453
See the current stats that were evaluated:
>>> m.current_stats
{'logjacobian': 0.0, 'loglikelihood': -1.8628770664093453, 'logprior': 0.0}
"""
name = "test_normal"
def __init__(self, variable_params, mean=None, cov=None, **kwargs):
# set up base likelihood parameters
super(TestNormal, self).__init__(variable_params, **kwargs)
# store the pdf
if mean is None:
mean = [0.]*len(variable_params)
if cov is None:
cov = [1.]*len(variable_params)
self._dist = stats.multivariate_normal(mean=mean, cov=cov)
# check that the dimension is correct
if self._dist.dim != len(variable_params):
raise ValueError("dimension mis-match between variable_params and "
"mean and/or cov")
def _loglikelihood(self):
"""Returns the log pdf of the multivariate normal.
"""
return self._dist.logpdf([self.current_params[p]
for p in self.variable_params])
class TestEggbox(BaseModel):
r"""The test distribution is an 'eggbox' function:
.. math::
\log \mathcal{L}(\Theta) = \left[
2+\prod_{i=1}^{n}\cos\left(\frac{\theta_{i}}{2}\right)\right]^{5}
The number of dimensions is set by the number of ``variable_params`` that
are passed.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
**kwargs :
All other keyword arguments are passed to ``BaseModel``.
"""
name = "test_eggbox"
def __init__(self, variable_params, **kwargs):
# set up base likelihood parameters
super(TestEggbox, self).__init__(variable_params, **kwargs)
def _loglikelihood(self):
"""Returns the log pdf of the eggbox function.
"""
return (2 + numpy.prod(numpy.cos([
self.current_params[p]/2. for p in self.variable_params]))) ** 5
class TestRosenbrock(BaseModel):
r"""The test distribution is the Rosenbrock function:
.. math::
\log \mathcal{L}(\Theta) = -\sum_{i=1}^{n-1}[
(1-\theta_{i})^{2}+100(\theta_{i+1} - \theta_{i}^{2})^{2}]
The number of dimensions is set by the number of ``variable_params`` that
are passed.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
**kwargs :
All other keyword arguments are passed to ``BaseModel``.
"""
name = "test_rosenbrock"
def __init__(self, variable_params, **kwargs):
# set up base likelihood parameters
super(TestRosenbrock, self).__init__(variable_params, **kwargs)
def _loglikelihood(self):
"""Returns the log pdf of the Rosenbrock function.
"""
logl = 0
p = [self.current_params[p] for p in self.variable_params]
for i in range(len(p) - 1):
logl -= ((1 - p[i])**2 + 100 * (p[i+1] - p[i]**2)**2)
return logl
class TestVolcano(BaseModel):
r"""The test distribution is a two-dimensional 'volcano' function:
.. math::
\Theta =
\sqrt{\theta_{1}^{2} + \theta_{2}^{2}} \log \mathcal{L}(\Theta) =
25\left(e^{\frac{-\Theta}{35}} +
\frac{1}{2\sqrt{2\pi}} e^{-\frac{(\Theta-5)^{2}}{8}}\right)
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied. Must have length 2.
**kwargs :
All other keyword arguments are passed to ``BaseModel``.
"""
name = "test_volcano"
def __init__(self, variable_params, **kwargs):
# set up base likelihood parameters
super(TestVolcano, self).__init__(variable_params, **kwargs)
# make sure there are exactly two variable args
if len(self.variable_params) != 2:
raise ValueError("TestVolcano distribution requires exactly "
"two variable args")
def _loglikelihood(self):
"""Returns the log pdf of the 2D volcano function.
"""
p = [self.current_params[p] for p in self.variable_params]
r = numpy.sqrt(p[0]**2 + p[1]**2)
mu, sigma = 5.0, 2.0
return 25 * (
numpy.exp(-r/35) + 1 / (sigma * numpy.sqrt(2 * numpy.pi)) *
numpy.exp(-0.5 * ((r - mu) / sigma) ** 2))
class TestPrior(BaseModel):
r"""Uses the prior as the test distribution.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied. Must have length 2.
**kwargs :
All other keyword arguments are passed to ``BaseModel``.
"""
name = "test_prior"
def __init__(self, variable_params, **kwargs):
# set up base likelihood parameters
super(TestPrior, self).__init__(variable_params, **kwargs)
def _loglikelihood(self):
"""Returns zero.
"""
return 0.
class TestPosterior(BaseModel):
r"""Build a test posterior from a set of samples using a kde
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
posterior_file : hdf file
A compatible pycbc inference output file which posterior samples can
be read from.
nsamples : int
Number of samples to draw from posterior file to build KDE.
**kwargs :
All other keyword arguments are passed to ``BaseModel``.
"""
name = "test_posterior"
def __init__(self, variable_params, posterior_file, nsamples, **kwargs):
super(TestPosterior, self).__init__(variable_params, **kwargs)
from pycbc.inference.io import loadfile # avoid cyclic import
logging.info('loading test posterior model')
inf_file = loadfile(posterior_file)
logging.info('reading samples')
samples = inf_file.read_samples(variable_params)
samples = numpy.array([samples[v] for v in variable_params])
# choose only the requested amount of samples
idx = numpy.arange(0, samples.shape[-1])
idx = numpy.random.choice(idx, size=int(nsamples), replace=False)
samples = samples[:, idx]
logging.info('making kde with %s samples', samples.shape[-1])
self.kde = stats.gaussian_kde(samples)
logging.info('done initializing test posterior model')
def _loglikelihood(self):
"""Returns the log pdf of the test posterior kde
"""
p = numpy.array([self.current_params[p] for p in self.variable_params])
logpost = self.kde.logpdf(p)
return float(logpost[0])
| 8,703
| 32.221374
| 79
|
py
|
pycbc
|
pycbc-master/pycbc/inference/models/marginalized_gaussian_noise.py
|
# Copyright (C) 2018 Charlie Hoy, Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides model classes that assume the noise is Gaussian and
allows for the likelihood to be marginalized over phase and/or time and/or
distance.
"""
import itertools
import numpy
from scipy import special
from pycbc.waveform import generator
from pycbc.waveform import (NoWaveformError, FailedWaveformError)
from pycbc.detector import Detector
from .gaussian_noise import (BaseGaussianNoise,
create_waveform_generator,
GaussianNoise)
from .tools import marginalize_likelihood, DistMarg
class MarginalizedPhaseGaussianNoise(GaussianNoise):
r"""The likelihood is analytically marginalized over phase.
This class can be used with signal models that can be written as:
.. math::
\tilde{h}(f; \Theta, \phi) = A(f; \Theta)e^{i\Psi(f; \Theta) + i \phi},
where :math:`\phi` is an arbitrary phase constant. This phase constant
can be analytically marginalized over with a uniform prior as follows:
assuming the noise is stationary and Gaussian (see `GaussianNoise`
for details), the posterior is:
.. math::
p(\Theta,\phi|d)
&\propto p(\Theta)p(\phi)p(d|\Theta,\phi) \\
&\propto p(\Theta)\frac{1}{2\pi}\exp\left[
-\frac{1}{2}\sum_{i}^{N_D} \left<
h_i(\Theta,\phi) - d_i, h_i(\Theta,\phi) - d_i
\right>\right].
Here, the sum is over the number of detectors :math:`N_D`, :math:`d_i`
and :math:`h_i` are the data and signal in detector :math:`i`,
respectively, and we have assumed a uniform prior on :math:`\phi \in [0,
2\pi)`. With the form of the signal model given above, the inner product
in the exponent can be written as:
.. math::
-\frac{1}{2}\left<h_i - d_i, h_i- d_i\right>
&= \left<h_i, d_i\right> -
\frac{1}{2}\left<h_i, h_i\right> -
\frac{1}{2}\left<d_i, d_i\right> \\
&= \Re\left\{O(h^0_i, d_i)e^{-i\phi}\right\} -
\frac{1}{2}\left<h^0_i, h^0_i\right> -
\frac{1}{2}\left<d_i, d_i\right>,
where:
.. math::
h_i^0 &\equiv \tilde{h}_i(f; \Theta, \phi=0); \\
O(h^0_i, d_i) &\equiv 4 \int_0^\infty
\frac{\tilde{h}_i^*(f; \Theta,0)\tilde{d}_i(f)}{S_n(f)}\mathrm{d}f.
Gathering all of the terms that are not dependent on :math:`\phi` together:
.. math::
\alpha(\Theta, d) \equiv \exp\left[-\frac{1}{2}\sum_i
\left<h^0_i, h^0_i\right> + \left<d_i, d_i\right>\right],
we can marginalize the posterior over :math:`\phi`:
.. math::
p(\Theta|d)
&\propto p(\Theta)\alpha(\Theta,d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[\Re \left\{
e^{-i\phi} \sum_i O(h^0_i, d_i)
\right\}\right]\mathrm{d}\phi \\
&\propto p(\Theta)\alpha(\Theta, d)\frac{1}{2\pi}
\int_{0}^{2\pi}\exp\left[
x(\Theta,d)\cos(\phi) + y(\Theta, d)\sin(\phi)
\right]\mathrm{d}\phi.
The integral in the last line is equal to :math:`2\pi I_0(\sqrt{x^2+y^2})`,
where :math:`I_0` is the modified Bessel function of the first kind. Thus
the marginalized posterior is:
.. math::
p(\Theta|d) \propto
I_0\left(\left|\sum_i O(h^0_i, d_i)\right|\right)
p(\Theta)\exp\left[\frac{1}{2}\sum_i\left( \left<h^0_i, h^0_i\right> -
\left<d_i, d_i\right> \right)\right]
"""
name = 'marginalized_phase'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super(MarginalizedPhaseGaussianNoise, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
@property
def _extra_stats(self):
"""Adds ``loglr``, plus ``cplx_loglr`` and ``optimal_snrsq`` in each
detector."""
return ['loglr', 'maxl_phase'] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
setattr(self._current_stats, 'loglikelihood', -numpy.inf)
# maxl phase doesn't exist, so set it to nan
setattr(self._current_stats, 'maxl_phase', numpy.nan)
for det in self._data:
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) =
I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
\frac{1}{2}\left<h^0_i, h^0_i\right>,
at the current point in parameter space :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio evaluated at the given point.
"""
params = self.current_params
try:
if self.all_ifodata_same_rate_length:
wfs = self.waveform_generator.generate(**params)
else:
wfs = {}
for det in self.data:
wfs.update(self.waveform_generator[det].generate(**params))
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
hh = 0.
hd = 0j
for det, h in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(len(h), self._kmax[det])
if self._kmin[det] >= kmax:
# if the waveform terminates before the filtering low frequency
# cutoff, then the loglr is just 0 for this detector
hh_i = 0.
hd_i = 0j
else:
# whiten the waveform
h[self._kmin[det]:kmax] *= \
self._weight[det][self._kmin[det]:kmax]
# calculate inner products
hh_i = h[self._kmin[det]:kmax].inner(
h[self._kmin[det]:kmax]).real
hd_i = h[self._kmin[det]:kmax].inner(
self._whitened_data[det][self._kmin[det]:kmax])
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
hh += hh_i
hd += hd_i
self._current_stats.maxl_phase = numpy.angle(hd)
return marginalize_likelihood(hd, hh, phase=True)
class MarginalizedTime(DistMarg, BaseGaussianNoise):
r""" This likelihood numerically marginalizes over time
This likelihood is optimized for marginalizing over time, but can also
handle marginalization over polarization, phase (where appropriate),
and sky location. The time series is interpolated using a
quadratic apparoximation for sub-sample times.
"""
name = 'marginalized_time'
def __init__(self, variable_params,
data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
**kwargs):
self.kwargs = kwargs
variable_params, kwargs = self.setup_marginalization(
variable_params,
**kwargs)
# set up the boiler-plate attributes
super(MarginalizedTime, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
**kwargs)
# Determine if all data have the same sampling rate and segment length
if self.all_ifodata_same_rate_length:
# create a waveform generator for all ifos
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolNoRespGenerator,
gates=self.gates, **kwargs['static_params'])
else:
# create a waveform generator for each ifo respestively
self.waveform_generator = {}
for det in self.data:
self.waveform_generator[det] = create_waveform_generator(
self.variable_params, {det: self.data[det]},
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolNoRespGenerator,
gates=self.gates, **kwargs['static_params'])
self.dets = {}
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
from pycbc.filter import matched_filter_core
params = self.current_params
try:
if self.all_ifodata_same_rate_length:
wfs = self.waveform_generator.generate(**params)
else:
wfs = {}
for det in self.data:
wfs.update(self.waveform_generator[det].generate(**params))
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
sh_total = hh_total = 0.
snr_estimate = {}
cplx_hpd = {}
cplx_hcd = {}
hphp = {}
hchc = {}
hphc = {}
for det, (hp, hc) in wfs.items():
# the kmax of the waveforms may be different than internal kmax
kmax = min(max(len(hp), len(hc)), self._kmax[det])
slc = slice(self._kmin[det], kmax)
# whiten both polarizations
hp[self._kmin[det]:kmax] *= self._weight[det][slc]
hc[self._kmin[det]:kmax] *= self._weight[det][slc]
hp.resize(len(self._whitened_data[det]))
hc.resize(len(self._whitened_data[det]))
cplx_hpd[det], _, _ = matched_filter_core(
hp,
self._whitened_data[det],
low_frequency_cutoff=self._f_lower[det],
high_frequency_cutoff=self._f_upper[det],
h_norm=1)
cplx_hcd[det], _, _ = matched_filter_core(
hc,
self._whitened_data[det],
low_frequency_cutoff=self._f_lower[det],
high_frequency_cutoff=self._f_upper[det],
h_norm=1)
hphp[det] = hp[slc].inner(hp[slc]).real
hchc[det] = hc[slc].inner(hc[slc]).real
hphc[det] = hp[slc].inner(hc[slc]).real
snr_proxy = ((cplx_hpd[det] / hphp[det] ** 0.5).squared_norm() +
(cplx_hcd[det] / hchc[det] ** 0.5).squared_norm())
snr_estimate[det] = (0.5 * snr_proxy) ** 0.5
self.draw_ifos(snr_estimate, log=False, **self.kwargs)
self.snr_draw(snrs=snr_estimate)
for det in wfs:
if det not in self.dets:
self.dets[det] = Detector(det)
fp, fc = self.dets[det].antenna_pattern(
params['ra'],
params['dec'],
params['polarization'],
params['tc'])
dt = self.dets[det].time_delay_from_earth_center(params['ra'],
params['dec'],
params['tc'])
dtc = params['tc'] + dt
cplx_hd = fp * cplx_hpd[det].at_time(dtc,
interpolate='quadratic')
cplx_hd += fc * cplx_hcd[det].at_time(dtc,
interpolate='quadratic')
hh = (fp * fp * hphp[det] +
fc * fc * hchc[det] +
2.0 * fp * fc * hphc[det])
sh_total += cplx_hd
hh_total += hh
loglr = self.marginalize_loglr(sh_total, hh_total)
return loglr
class MarginalizedPolarization(DistMarg, BaseGaussianNoise):
r""" This likelihood numerically marginalizes over polarization angle
This class implements the Gaussian likelihood with an explicit numerical
marginalization over polarization angle. This is accomplished using
a fixed set of integration points distribution uniformation between
0 and 2pi. By default, 1000 integration points are used.
The 'polarization_samples' argument can be passed to set an alternate
number of integration points.
"""
name = 'marginalized_polarization'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
polarization_samples=1000,
**kwargs):
variable_params, kwargs = self.setup_marginalization(
variable_params,
polarization_samples=polarization_samples,
**kwargs)
# set up the boiler-plate attributes
super(MarginalizedPolarization, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
**kwargs)
# Determine if all data have the same sampling rate and segment length
if self.all_ifodata_same_rate_length:
# create a waveform generator for all ifos
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolGenerator,
gates=self.gates, **kwargs['static_params'])
else:
# create a waveform generator for each ifo respestively
self.waveform_generator = {}
for det in self.data:
self.waveform_generator[det] = create_waveform_generator(
self.variable_params, {det: self.data[det]},
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameTwoPolGenerator,
gates=self.gates, **kwargs['static_params'])
self.dets = {}
@property
def _extra_stats(self):
"""Adds ``loglr``, ``maxl_polarization``, and the ``optimal_snrsq`` in
each detector.
"""
return ['loglr', 'maxl_polarization', 'maxl_loglr'] + \
['{}_optimal_snrsq'.format(det) for det in self._data]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
setattr(self._current_stats, 'loglr', -numpy.inf)
# maxl phase doesn't exist, so set it to nan
setattr(self._current_stats, 'maxl_polarization', numpy.nan)
for det in self._data:
# snr can't be < 0 by definition, so return 0
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), 0.)
return -numpy.inf
def _loglr(self):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
params = self.current_params
try:
if self.all_ifodata_same_rate_length:
wfs = self.waveform_generator.generate(**params)
else:
wfs = {}
for det in self.data:
wfs.update(self.waveform_generator[det].generate(**params))
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
lr = sh_total = hh_total = 0.
for det, (hp, hc) in wfs.items():
if det not in self.dets:
self.dets[det] = Detector(det)
fp, fc = self.dets[det].antenna_pattern(
params['ra'],
params['dec'],
params['polarization'],
params['tc'])
# the kmax of the waveforms may be different than internal kmax
kmax = min(max(len(hp), len(hc)), self._kmax[det])
slc = slice(self._kmin[det], kmax)
# whiten both polarizations
hp[self._kmin[det]:kmax] *= self._weight[det][slc]
hc[self._kmin[det]:kmax] *= self._weight[det][slc]
# h = fp * hp + hc * hc
# <h, d> = fp * <hp,d> + fc * <hc,d>
# the inner products
cplx_hpd = hp[slc].inner(self._whitened_data[det][slc]) # <hp, d>
cplx_hcd = hc[slc].inner(self._whitened_data[det][slc]) # <hc, d>
cplx_hd = fp * cplx_hpd + fc * cplx_hcd
# <h, h> = <fp * hp + fc * hc, fp * hp + fc * hc>
# = Real(fpfp * <hp,hp> + fcfc * <hc,hc> + \
# fphc * (<hp, hc> + <hc, hp>))
hphp = hp[slc].inner(hp[slc]).real # < hp, hp>
hchc = hc[slc].inner(hc[slc]).real # <hc, hc>
# Below could be combined, but too tired to figure out
# if there should be a sign applied if so
hphc = hp[slc].inner(hc[slc]).real # <hp, hc>
hchp = hc[slc].inner(hp[slc]).real # <hc, hp>
hh = fp * fp * hphp + fc * fc * hchc + fp * fc * (hphc + hchp)
# store
setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh)
sh_total += cplx_hd
hh_total += hh
lr, idx, maxl = self.marginalize_loglr(sh_total, hh_total,
return_peak=True)
# store the maxl polarization
setattr(self._current_stats,
'maxl_polarization',
params['polarization'])
setattr(self._current_stats, 'maxl_loglr', maxl)
# just store the maxl optimal snrsq
for det in wfs:
p = '{}_optimal_snrsq'.format(det)
setattr(self._current_stats, p,
getattr(self._current_stats, p)[idx])
return lr
class MarginalizedHMPolPhase(BaseGaussianNoise):
r"""Numerically marginalizes waveforms with higher modes over polarization
`and` phase.
This class implements the Gaussian likelihood with an explicit numerical
marginalization over polarization angle and orbital phase. This is
accomplished using a fixed set of integration points distributed uniformly
between 0 and 2:math:`\pi` for both the polarization and phase. By default,
100 integration points are used for each parameter, giving :math:`10^4`
evaluation points in total. This can be modified using the
``polarization_samples`` and ``coa_phase_samples`` arguments.
This only works with waveforms that return separate spherical harmonic
modes for each waveform. For a list of currently supported approximants,
see :py:func:`pycbc.waveform.waveform_modes.fd_waveform_mode_approximants`
and :py:func:`pycbc.waveform.waveform_modes.td_waveform_mode_approximants`.
Parameters
----------
variable_params : (tuple of) string(s)
A tuple of parameter names that will be varied.
data : dict
A dictionary of data, in which the keys are the detector names and the
values are the data (assumed to be unwhitened). All data must have the
same frequency resolution.
low_frequency_cutoff : dict
A dictionary of starting frequencies, in which the keys are the
detector names and the values are the starting frequencies for the
respective detectors to be used for computing inner products.
psds : dict, optional
A dictionary of FrequencySeries keyed by the detector names. The
dictionary must have a psd for each detector specified in the data
dictionary. If provided, the inner products in each detector will be
weighted by 1/psd of that detector.
high_frequency_cutoff : dict, optional
A dictionary of ending frequencies, in which the keys are the
detector names and the values are the ending frequencies for the
respective detectors to be used for computing inner products. If not
provided, the minimum of the largest frequency stored in the data
and a given waveform will be used.
normalize : bool, optional
If True, the normalization factor :math:`alpha` will be included in the
log likelihood. See :py:class:`GaussianNoise` for details. Default is
to not include it.
polarization_samples : int, optional
How many points to use in polarization. Default is 100.
coa_phase_samples : int, optional
How many points to use in phase. Defaults is 100.
\**kwargs :
All other keyword arguments are passed to
:py:class:`BaseGaussianNoise
<pycbc.inference.models.gaussian_noise.BaseGaussianNoise>`.
"""
name = 'marginalized_hmpolphase'
def __init__(self, variable_params, data, low_frequency_cutoff, psds=None,
high_frequency_cutoff=None, normalize=False,
polarization_samples=100,
coa_phase_samples=100,
static_params=None, **kwargs):
# set up the boiler-plate attributes
super(MarginalizedHMPolPhase, self).__init__(
variable_params, data, low_frequency_cutoff, psds=psds,
high_frequency_cutoff=high_frequency_cutoff, normalize=normalize,
static_params=static_params, **kwargs)
# create the waveform generator
self.waveform_generator = create_waveform_generator(
self.variable_params, self.data,
waveform_transforms=self.waveform_transforms,
recalibration=self.recalibration,
generator_class=generator.FDomainDetFrameModesGenerator,
gates=self.gates, **self.static_params)
pol = numpy.linspace(0, 2*numpy.pi, polarization_samples)
phase = numpy.linspace(0, 2*numpy.pi, coa_phase_samples)
# remap to every combination of the parameters
# this gets every combination by mappin them to an NxM grid
# one needs to be transposed so that they run allong opposite
# dimensions
n = coa_phase_samples * polarization_samples
self.nsamples = n
self.pol = numpy.resize(pol, n)
phase = numpy.resize(phase, n)
phase = phase.reshape(coa_phase_samples, polarization_samples)
self.phase = phase.T.flatten()
self._phase_fac = {}
self.dets = {}
def phase_fac(self, m):
r"""The phase :math:`\exp[i m \phi]`."""
try:
return self._phase_fac[m]
except KeyError:
# hasn't been computed yet, calculate it
self._phase_fac[m] = numpy.exp(1.0j * m * self.phase)
return self._phase_fac[m]
@property
def _extra_stats(self):
"""Adds ``maxl_polarization`` and the ``maxl_phase``
"""
return ['maxl_polarization', 'maxl_phase', ]
def _nowaveform_loglr(self):
"""Convenience function to set loglr values if no waveform generated.
"""
# maxl phase doesn't exist, so set it to nan
setattr(self._current_stats, 'maxl_polarization', numpy.nan)
setattr(self._current_stats, 'maxl_phase', numpy.nan)
return -numpy.inf
def _loglr(self, return_unmarginalized=False):
r"""Computes the log likelihood ratio,
.. math::
\log \mathcal{L}(\Theta) = \sum_i
\left<h_i(\Theta)|d_i\right> -
\frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,
at the current parameter values :math:`\Theta`.
Returns
-------
float
The value of the log likelihood ratio.
"""
params = self.current_params
try:
wfs = self.waveform_generator.generate(**params)
except NoWaveformError:
return self._nowaveform_loglr()
except FailedWaveformError as e:
if self.ignore_failed_waveforms:
return self._nowaveform_loglr()
else:
raise e
# ---------------------------------------------------------------------
# Some optimizations not yet taken:
# * higher m calculations could have a lot of redundancy
# * fp/fc need not be calculated except where polarization is different
# * may be possible to simplify this by making smarter use of real/imag
# ---------------------------------------------------------------------
lr = 0.
hds = {}
hhs = {}
for det, modes in wfs.items():
if det not in self.dets:
self.dets[det] = Detector(det)
fp, fc = self.dets[det].antenna_pattern(params['ra'],
params['dec'],
self.pol,
params['tc'])
# loop over modes and prepare the waveform modes
# we will sum up zetalm = glm <ulm, d> + i glm <vlm, d>
# over all common m so that we can apply the phase once
zetas = {}
rlms = {}
slms = {}
for mode in modes:
l, m = mode
ulm, vlm = modes[mode]
# whiten the waveforms
# the kmax of the waveforms may be different than internal kmax
kmax = min(max(len(ulm), len(vlm)), self._kmax[det])
slc = slice(self._kmin[det], kmax)
ulm[self._kmin[det]:kmax] *= self._weight[det][slc]
vlm[self._kmin[det]:kmax] *= self._weight[det][slc]
# the inner products
# <ulm, d>
ulmd = ulm[slc].inner(self._whitened_data[det][slc]).real
# <vlm, d>
vlmd = vlm[slc].inner(self._whitened_data[det][slc]).real
# add inclination, and pack into a complex number
import lal
glm = lal.SpinWeightedSphericalHarmonic(
params['inclination'], 0, -2, l, m).real
if m not in zetas:
zetas[m] = 0j
zetas[m] += glm * (ulmd + 1j*vlmd)
# Get condense set of the parts of the waveform that only diff
# by m, this is used next to help calculate <h, h>
r = glm * ulm
s = glm * vlm
if m not in rlms:
rlms[m] = r
slms[m] = s
else:
rlms[m] += r
slms[m] += s
# now compute all possible <hlm, hlm>
rr_m = {}
ss_m = {}
rs_m = {}
sr_m = {}
combos = itertools.combinations_with_replacement(rlms.keys(), 2)
for m, mprime in combos:
r = rlms[m]
s = slms[m]
rprime = rlms[mprime]
sprime = slms[mprime]
rr_m[mprime, m] = r[slc].inner(rprime[slc]).real
ss_m[mprime, m] = s[slc].inner(sprime[slc]).real
rs_m[mprime, m] = s[slc].inner(rprime[slc]).real
sr_m[mprime, m] = r[slc].inner(sprime[slc]).real
# store the conjugate for easy retrieval later
rr_m[m, mprime] = rr_m[mprime, m]
ss_m[m, mprime] = ss_m[mprime, m]
rs_m[m, mprime] = sr_m[mprime, m]
sr_m[m, mprime] = rs_m[mprime, m]
# now apply the phase to all the common ms
hpd = 0.
hcd = 0.
hphp = 0.
hchc = 0.
hphc = 0.
for m, zeta in zetas.items():
phase_coeff = self.phase_fac(m)
# <h+, d> = (exp[i m phi] * zeta).real()
# <hx, d> = -(exp[i m phi] * zeta).imag()
z = phase_coeff * zeta
hpd += z.real
hcd -= z.imag
# now calculate the contribution to <h, h>
cosm = phase_coeff.real
sinm = phase_coeff.imag
for mprime in zetas:
pcprime = self.phase_fac(mprime)
cosmprime = pcprime.real
sinmprime = pcprime.imag
# needed components
rr = rr_m[m, mprime]
ss = ss_m[m, mprime]
rs = rs_m[m, mprime]
sr = sr_m[m, mprime]
# <hp, hp>
hphp += rr * cosm * cosmprime \
+ ss * sinm * sinmprime \
- rs * cosm * sinmprime \
- sr * sinm * cosmprime
# <hc, hc>
hchc += rr * sinm * sinmprime \
+ ss * cosm * cosmprime \
+ rs * sinm * cosmprime \
+ sr * cosm * sinmprime
# <hp, hc>
hphc += -rr * cosm * sinmprime \
+ ss * sinm * cosmprime \
+ sr * sinm * sinmprime \
- rs * cosm * cosmprime
# Now apply the polarizations and calculate the loglr
# We have h = Fp * hp + Fc * hc
# loglr = <h, d> - <h, h>/2
# = Fp*<hp, d> + Fc*<hc, d>
# - (1/2)*(Fp*Fp*<hp, hp> + Fc*Fc*<hc, hc>
# + 2*Fp*Fc<hp, hc>)
# (in the last line we have made use of the time series being
# real, so that <a, b> = <b, a>).
hd = fp * hpd + fc * hcd
hh = fp * fp * hphp + fc * fc * hchc + 2 * fp * fc * hphc
hds[det] = hd
hhs[det] = hh
lr += hd - 0.5 * hh
if return_unmarginalized:
return self.pol, self.phase, lr, hds, hhs
lr_total = special.logsumexp(lr) - numpy.log(self.nsamples)
# store the maxl values
idx = lr.argmax()
setattr(self._current_stats, 'maxl_polarization', self.pol[idx])
setattr(self._current_stats, 'maxl_phase', self.phase[idx])
return float(lr_total)
| 32,995
| 40.142145
| 83
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.